]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.10.7-201308192211.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.10.7-201308192211.patch
CommitLineData
56b12065
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..889ee23 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 noexec=on: enable non-executable mappings (default)
248 noexec=off: disable non-executable mappings
249
250+ nopcid [X86-64]
251+ Disable PCID (Process-Context IDentifier) even if it
252+ is supported by the processor.
253+
254 nosmap [X86]
255 Disable SMAP (Supervisor Mode Access Prevention)
256 even if it is supported by processor.
257@@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
258 the specified number of seconds. This is to be used if
259 your oopses keep scrolling off the screen.
260
261+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
262+ virtualization environments that don't cope well with the
263+ expand down segment used by UDEREF on X86-32 or the frequent
264+ page table updates on X86-64.
265+
266+ pax_sanitize_slab=
267+ 0/1 to disable/enable slab object sanitization (enabled by
268+ default).
269+
270+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
271+
272+ pax_extra_latent_entropy
273+ Enable a very simple form of latent entropy extraction
274+ from the first 4GB of memory as the bootmem allocator
275+ passes the memory pages to the buddy allocator.
276+
277+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
278+ when the processor supports PCID.
279+
280 pcbit= [HW,ISDN]
281
282 pcd. [PARIDE]
283diff --git a/Makefile b/Makefile
284index 33e36ab..31f1dc8 100644
285--- a/Makefile
286+++ b/Makefile
287@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
288
289 HOSTCC = gcc
290 HOSTCXX = g++
291-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
292-HOSTCXXFLAGS = -O2
293+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
294+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
295+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
296
297 # Decide whether to build built-in, modular, or both.
298 # Normally, just do built-in.
299@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
300 # Rules shared between *config targets and build targets
301
302 # Basic helpers built in scripts/
303-PHONY += scripts_basic
304-scripts_basic:
305+PHONY += scripts_basic gcc-plugins
306+scripts_basic: gcc-plugins
307 $(Q)$(MAKE) $(build)=scripts/basic
308 $(Q)rm -f .tmp_quiet_recordmcount
309
310@@ -576,6 +577,65 @@ else
311 KBUILD_CFLAGS += -O2
312 endif
313
314+ifndef DISABLE_PAX_PLUGINS
315+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
316+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
317+else
318+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
319+endif
320+ifneq ($(PLUGINCC),)
321+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
322+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
323+endif
324+ifdef CONFIG_PAX_MEMORY_STACKLEAK
325+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
326+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
327+endif
328+ifdef CONFIG_KALLOCSTAT_PLUGIN
329+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
330+endif
331+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
332+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
333+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
334+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
335+endif
336+ifdef CONFIG_CHECKER_PLUGIN
337+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
338+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
339+endif
340+endif
341+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
342+ifdef CONFIG_PAX_SIZE_OVERFLOW
343+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
344+endif
345+ifdef CONFIG_PAX_LATENT_ENTROPY
346+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
347+endif
348+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
349+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
350+endif
351+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
352+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
353+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
354+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
355+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
356+ifeq ($(KBUILD_EXTMOD),)
357+gcc-plugins:
358+ $(Q)$(MAKE) $(build)=tools/gcc
359+else
360+gcc-plugins: ;
361+endif
362+else
363+gcc-plugins:
364+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
365+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
366+else
367+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
368+endif
369+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
370+endif
371+endif
372+
373 include $(srctree)/arch/$(SRCARCH)/Makefile
374
375 ifdef CONFIG_READABLE_ASM
376@@ -733,7 +793,7 @@ export mod_sign_cmd
377
378
379 ifeq ($(KBUILD_EXTMOD),)
380-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
381+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
382
383 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
384 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
385@@ -782,6 +842,8 @@ endif
386
387 # The actual objects are generated when descending,
388 # make sure no implicit rule kicks in
389+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
390+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
391 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
392
393 # Handle descending into subdirectories listed in $(vmlinux-dirs)
394@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
395 # Error messages still appears in the original language
396
397 PHONY += $(vmlinux-dirs)
398-$(vmlinux-dirs): prepare scripts
399+$(vmlinux-dirs): gcc-plugins prepare scripts
400 $(Q)$(MAKE) $(build)=$@
401
402 # Store (new) KERNELRELASE string in include/config/kernel.release
403@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
404 $(Q)$(MAKE) $(build)=.
405
406 # All the preparing..
407+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
408 prepare: prepare0
409
410 # Generate some files
411@@ -942,6 +1005,8 @@ all: modules
412 # using awk while concatenating to the final file.
413
414 PHONY += modules
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
418 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
419 @$(kecho) ' Building modules, stage 2.';
420@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
421
422 # Target to prepare building external modules
423 PHONY += modules_prepare
424-modules_prepare: prepare scripts
425+modules_prepare: gcc-plugins prepare scripts
426
427 # Target to install modules
428 PHONY += modules_install
429@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
430 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
431 signing_key.priv signing_key.x509 x509.genkey \
432 extra_certificates signing_key.x509.keyid \
433- signing_key.x509.signer
434+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
435
436 # clean - Delete most, but leave enough to build external modules
437 #
438@@ -1063,6 +1128,7 @@ distclean: mrproper
439 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
440 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
441 -o -name '.*.rej' \
442+ -o -name '.*.rej' -o -name '*.so' \
443 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
444 -type f -print | xargs rm -f
445
446@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
449
450+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
451+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
452 modules: $(module-dirs)
453 @$(kecho) ' Building modules, stage 2.';
454 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
455@@ -1359,17 +1427,21 @@ else
456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
457 endif
458
459-%.s: %.c prepare scripts FORCE
460+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
461+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
462+%.s: %.c gcc-plugins prepare scripts FORCE
463 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
464 %.i: %.c prepare scripts FORCE
465 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
466-%.o: %.c prepare scripts FORCE
467+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
468+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
469+%.o: %.c gcc-plugins prepare scripts FORCE
470 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
471 %.lst: %.c prepare scripts FORCE
472 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
473-%.s: %.S prepare scripts FORCE
474+%.s: %.S gcc-plugins prepare scripts FORCE
475 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
476-%.o: %.S prepare scripts FORCE
477+%.o: %.S gcc-plugins prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 %.symtypes: %.c prepare scripts FORCE
480 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
481@@ -1379,11 +1451,15 @@ endif
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir)
485-%/: prepare scripts FORCE
486+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
487+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
488+%/: gcc-plugins prepare scripts FORCE
489 $(cmd_crmodverdir)
490 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
491 $(build)=$(build-dir)
492-%.ko: prepare scripts FORCE
493+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495+%.ko: gcc-plugins prepare scripts FORCE
496 $(cmd_crmodverdir)
497 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
498 $(build)=$(build-dir) $(@:.ko=.o)
499diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
500index c2cbe4f..f7264b4 100644
501--- a/arch/alpha/include/asm/atomic.h
502+++ b/arch/alpha/include/asm/atomic.h
503@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
504 #define atomic_dec(v) atomic_sub(1,(v))
505 #define atomic64_dec(v) atomic64_sub(1,(v))
506
507+#define atomic64_read_unchecked(v) atomic64_read(v)
508+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
509+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
510+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
511+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
512+#define atomic64_inc_unchecked(v) atomic64_inc(v)
513+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
514+#define atomic64_dec_unchecked(v) atomic64_dec(v)
515+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
516+
517 #define smp_mb__before_atomic_dec() smp_mb()
518 #define smp_mb__after_atomic_dec() smp_mb()
519 #define smp_mb__before_atomic_inc() smp_mb()
520diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
521index ad368a9..fbe0f25 100644
522--- a/arch/alpha/include/asm/cache.h
523+++ b/arch/alpha/include/asm/cache.h
524@@ -4,19 +4,19 @@
525 #ifndef __ARCH_ALPHA_CACHE_H
526 #define __ARCH_ALPHA_CACHE_H
527
528+#include <linux/const.h>
529
530 /* Bytes per L1 (data) cache line. */
531 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
532-# define L1_CACHE_BYTES 64
533 # define L1_CACHE_SHIFT 6
534 #else
535 /* Both EV4 and EV5 are write-through, read-allocate,
536 direct-mapped, physical.
537 */
538-# define L1_CACHE_BYTES 32
539 # define L1_CACHE_SHIFT 5
540 #endif
541
542+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
543 #define SMP_CACHE_BYTES L1_CACHE_BYTES
544
545 #endif
546diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
547index 968d999..d36b2df 100644
548--- a/arch/alpha/include/asm/elf.h
549+++ b/arch/alpha/include/asm/elf.h
550@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
551
552 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
553
554+#ifdef CONFIG_PAX_ASLR
555+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
556+
557+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
558+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
559+#endif
560+
561 /* $0 is set by ld.so to a pointer to a function which might be
562 registered using atexit. This provides a mean for the dynamic
563 linker to call DT_FINI functions for shared libraries that have
564diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
565index bc2a0da..8ad11ee 100644
566--- a/arch/alpha/include/asm/pgalloc.h
567+++ b/arch/alpha/include/asm/pgalloc.h
568@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
569 pgd_set(pgd, pmd);
570 }
571
572+static inline void
573+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
574+{
575+ pgd_populate(mm, pgd, pmd);
576+}
577+
578 extern pgd_t *pgd_alloc(struct mm_struct *mm);
579
580 static inline void
581diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
582index 81a4342..348b927 100644
583--- a/arch/alpha/include/asm/pgtable.h
584+++ b/arch/alpha/include/asm/pgtable.h
585@@ -102,6 +102,17 @@ struct vm_area_struct;
586 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
587 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
588 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
589+
590+#ifdef CONFIG_PAX_PAGEEXEC
591+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
592+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
593+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
594+#else
595+# define PAGE_SHARED_NOEXEC PAGE_SHARED
596+# define PAGE_COPY_NOEXEC PAGE_COPY
597+# define PAGE_READONLY_NOEXEC PAGE_READONLY
598+#endif
599+
600 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
601
602 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
603diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
604index 2fd00b7..cfd5069 100644
605--- a/arch/alpha/kernel/module.c
606+++ b/arch/alpha/kernel/module.c
607@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
608
609 /* The small sections were sorted to the end of the segment.
610 The following should definitely cover them. */
611- gp = (u64)me->module_core + me->core_size - 0x8000;
612+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
613 got = sechdrs[me->arch.gotsecindex].sh_addr;
614
615 for (i = 0; i < n; i++) {
616diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
617index b9e37ad..44c24e7 100644
618--- a/arch/alpha/kernel/osf_sys.c
619+++ b/arch/alpha/kernel/osf_sys.c
620@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
621 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
622
623 static unsigned long
624-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
625- unsigned long limit)
626+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
627+ unsigned long limit, unsigned long flags)
628 {
629 struct vm_unmapped_area_info info;
630+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
631
632 info.flags = 0;
633 info.length = len;
634@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
635 info.high_limit = limit;
636 info.align_mask = 0;
637 info.align_offset = 0;
638+ info.threadstack_offset = offset;
639 return vm_unmapped_area(&info);
640 }
641
642@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
643 merely specific addresses, but regions of memory -- perhaps
644 this feature should be incorporated into all ports? */
645
646+#ifdef CONFIG_PAX_RANDMMAP
647+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
648+#endif
649+
650 if (addr) {
651- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
652+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
653 if (addr != (unsigned long) -ENOMEM)
654 return addr;
655 }
656
657 /* Next, try allocating at TASK_UNMAPPED_BASE. */
658- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
659- len, limit);
660+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
661+
662 if (addr != (unsigned long) -ENOMEM)
663 return addr;
664
665 /* Finally, try allocating in low memory. */
666- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
667+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
668
669 return addr;
670 }
671diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
672index 0c4132d..88f0d53 100644
673--- a/arch/alpha/mm/fault.c
674+++ b/arch/alpha/mm/fault.c
675@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
676 __reload_thread(pcb);
677 }
678
679+#ifdef CONFIG_PAX_PAGEEXEC
680+/*
681+ * PaX: decide what to do with offenders (regs->pc = fault address)
682+ *
683+ * returns 1 when task should be killed
684+ * 2 when patched PLT trampoline was detected
685+ * 3 when unpatched PLT trampoline was detected
686+ */
687+static int pax_handle_fetch_fault(struct pt_regs *regs)
688+{
689+
690+#ifdef CONFIG_PAX_EMUPLT
691+ int err;
692+
693+ do { /* PaX: patched PLT emulation #1 */
694+ unsigned int ldah, ldq, jmp;
695+
696+ err = get_user(ldah, (unsigned int *)regs->pc);
697+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
698+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
699+
700+ if (err)
701+ break;
702+
703+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
704+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
705+ jmp == 0x6BFB0000U)
706+ {
707+ unsigned long r27, addr;
708+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
709+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
710+
711+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
712+ err = get_user(r27, (unsigned long *)addr);
713+ if (err)
714+ break;
715+
716+ regs->r27 = r27;
717+ regs->pc = r27;
718+ return 2;
719+ }
720+ } while (0);
721+
722+ do { /* PaX: patched PLT emulation #2 */
723+ unsigned int ldah, lda, br;
724+
725+ err = get_user(ldah, (unsigned int *)regs->pc);
726+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
727+ err |= get_user(br, (unsigned int *)(regs->pc+8));
728+
729+ if (err)
730+ break;
731+
732+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
733+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
734+ (br & 0xFFE00000U) == 0xC3E00000U)
735+ {
736+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
737+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
738+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
739+
740+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
741+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742+ return 2;
743+ }
744+ } while (0);
745+
746+ do { /* PaX: unpatched PLT emulation */
747+ unsigned int br;
748+
749+ err = get_user(br, (unsigned int *)regs->pc);
750+
751+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
752+ unsigned int br2, ldq, nop, jmp;
753+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
754+
755+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
756+ err = get_user(br2, (unsigned int *)addr);
757+ err |= get_user(ldq, (unsigned int *)(addr+4));
758+ err |= get_user(nop, (unsigned int *)(addr+8));
759+ err |= get_user(jmp, (unsigned int *)(addr+12));
760+ err |= get_user(resolver, (unsigned long *)(addr+16));
761+
762+ if (err)
763+ break;
764+
765+ if (br2 == 0xC3600000U &&
766+ ldq == 0xA77B000CU &&
767+ nop == 0x47FF041FU &&
768+ jmp == 0x6B7B0000U)
769+ {
770+ regs->r28 = regs->pc+4;
771+ regs->r27 = addr+16;
772+ regs->pc = resolver;
773+ return 3;
774+ }
775+ }
776+ } while (0);
777+#endif
778+
779+ return 1;
780+}
781+
782+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
783+{
784+ unsigned long i;
785+
786+ printk(KERN_ERR "PAX: bytes at PC: ");
787+ for (i = 0; i < 5; i++) {
788+ unsigned int c;
789+ if (get_user(c, (unsigned int *)pc+i))
790+ printk(KERN_CONT "???????? ");
791+ else
792+ printk(KERN_CONT "%08x ", c);
793+ }
794+ printk("\n");
795+}
796+#endif
797
798 /*
799 * This routine handles page faults. It determines the address,
800@@ -133,8 +251,29 @@ retry:
801 good_area:
802 si_code = SEGV_ACCERR;
803 if (cause < 0) {
804- if (!(vma->vm_flags & VM_EXEC))
805+ if (!(vma->vm_flags & VM_EXEC)) {
806+
807+#ifdef CONFIG_PAX_PAGEEXEC
808+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
809+ goto bad_area;
810+
811+ up_read(&mm->mmap_sem);
812+ switch (pax_handle_fetch_fault(regs)) {
813+
814+#ifdef CONFIG_PAX_EMUPLT
815+ case 2:
816+ case 3:
817+ return;
818+#endif
819+
820+ }
821+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
822+ do_group_exit(SIGKILL);
823+#else
824 goto bad_area;
825+#endif
826+
827+ }
828 } else if (!cause) {
829 /* Allow reads even for write-only mappings */
830 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
831diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
832index 18a9f5e..ca910b7 100644
833--- a/arch/arm/Kconfig
834+++ b/arch/arm/Kconfig
835@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
836
837 config UACCESS_WITH_MEMCPY
838 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
839- depends on MMU
840+ depends on MMU && !PAX_MEMORY_UDEREF
841 default y if CPU_FEROCEON
842 help
843 Implement faster copy_to_user and clear_user methods for CPU
844diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
845index da1c77d..2ee6056 100644
846--- a/arch/arm/include/asm/atomic.h
847+++ b/arch/arm/include/asm/atomic.h
848@@ -17,17 +17,35 @@
849 #include <asm/barrier.h>
850 #include <asm/cmpxchg.h>
851
852+#ifdef CONFIG_GENERIC_ATOMIC64
853+#include <asm-generic/atomic64.h>
854+#endif
855+
856 #define ATOMIC_INIT(i) { (i) }
857
858 #ifdef __KERNEL__
859
860+#define _ASM_EXTABLE(from, to) \
861+" .pushsection __ex_table,\"a\"\n"\
862+" .align 3\n" \
863+" .long " #from ", " #to"\n" \
864+" .popsection"
865+
866 /*
867 * On ARM, ordinary assignment (str instruction) doesn't clear the local
868 * strex/ldrex monitor on some implementations. The reason we can use it for
869 * atomic_set() is the clrex or dummy strex done on every exception return.
870 */
871 #define atomic_read(v) (*(volatile int *)&(v)->counter)
872+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
873+{
874+ return v->counter;
875+}
876 #define atomic_set(v,i) (((v)->counter) = (i))
877+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
878+{
879+ v->counter = i;
880+}
881
882 #if __LINUX_ARM_ARCH__ >= 6
883
884@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
885 int result;
886
887 __asm__ __volatile__("@ atomic_add\n"
888+"1: ldrex %1, [%3]\n"
889+" adds %0, %1, %4\n"
890+
891+#ifdef CONFIG_PAX_REFCOUNT
892+" bvc 3f\n"
893+"2: bkpt 0xf103\n"
894+"3:\n"
895+#endif
896+
897+" strex %1, %0, [%3]\n"
898+" teq %1, #0\n"
899+" bne 1b"
900+
901+#ifdef CONFIG_PAX_REFCOUNT
902+"\n4:\n"
903+ _ASM_EXTABLE(2b, 4b)
904+#endif
905+
906+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
907+ : "r" (&v->counter), "Ir" (i)
908+ : "cc");
909+}
910+
911+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
912+{
913+ unsigned long tmp;
914+ int result;
915+
916+ __asm__ __volatile__("@ atomic_add_unchecked\n"
917 "1: ldrex %0, [%3]\n"
918 " add %0, %0, %4\n"
919 " strex %1, %0, [%3]\n"
920@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
921 smp_mb();
922
923 __asm__ __volatile__("@ atomic_add_return\n"
924+"1: ldrex %1, [%3]\n"
925+" adds %0, %1, %4\n"
926+
927+#ifdef CONFIG_PAX_REFCOUNT
928+" bvc 3f\n"
929+" mov %0, %1\n"
930+"2: bkpt 0xf103\n"
931+"3:\n"
932+#endif
933+
934+" strex %1, %0, [%3]\n"
935+" teq %1, #0\n"
936+" bne 1b"
937+
938+#ifdef CONFIG_PAX_REFCOUNT
939+"\n4:\n"
940+ _ASM_EXTABLE(2b, 4b)
941+#endif
942+
943+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
944+ : "r" (&v->counter), "Ir" (i)
945+ : "cc");
946+
947+ smp_mb();
948+
949+ return result;
950+}
951+
952+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
953+{
954+ unsigned long tmp;
955+ int result;
956+
957+ smp_mb();
958+
959+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
960 "1: ldrex %0, [%3]\n"
961 " add %0, %0, %4\n"
962 " strex %1, %0, [%3]\n"
963@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
964 int result;
965
966 __asm__ __volatile__("@ atomic_sub\n"
967+"1: ldrex %1, [%3]\n"
968+" subs %0, %1, %4\n"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+" bvc 3f\n"
972+"2: bkpt 0xf103\n"
973+"3:\n"
974+#endif
975+
976+" strex %1, %0, [%3]\n"
977+" teq %1, #0\n"
978+" bne 1b"
979+
980+#ifdef CONFIG_PAX_REFCOUNT
981+"\n4:\n"
982+ _ASM_EXTABLE(2b, 4b)
983+#endif
984+
985+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
986+ : "r" (&v->counter), "Ir" (i)
987+ : "cc");
988+}
989+
990+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
991+{
992+ unsigned long tmp;
993+ int result;
994+
995+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
996 "1: ldrex %0, [%3]\n"
997 " sub %0, %0, %4\n"
998 " strex %1, %0, [%3]\n"
999@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1000 smp_mb();
1001
1002 __asm__ __volatile__("@ atomic_sub_return\n"
1003-"1: ldrex %0, [%3]\n"
1004-" sub %0, %0, %4\n"
1005+"1: ldrex %1, [%3]\n"
1006+" subs %0, %1, %4\n"
1007+
1008+#ifdef CONFIG_PAX_REFCOUNT
1009+" bvc 3f\n"
1010+" mov %0, %1\n"
1011+"2: bkpt 0xf103\n"
1012+"3:\n"
1013+#endif
1014+
1015 " strex %1, %0, [%3]\n"
1016 " teq %1, #0\n"
1017 " bne 1b"
1018+
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+"\n4:\n"
1021+ _ASM_EXTABLE(2b, 4b)
1022+#endif
1023+
1024 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1025 : "r" (&v->counter), "Ir" (i)
1026 : "cc");
1027@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1028 return oldval;
1029 }
1030
1031+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1032+{
1033+ unsigned long oldval, res;
1034+
1035+ smp_mb();
1036+
1037+ do {
1038+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1039+ "ldrex %1, [%3]\n"
1040+ "mov %0, #0\n"
1041+ "teq %1, %4\n"
1042+ "strexeq %0, %5, [%3]\n"
1043+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1044+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1045+ : "cc");
1046+ } while (res);
1047+
1048+ smp_mb();
1049+
1050+ return oldval;
1051+}
1052+
1053 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 {
1055 unsigned long tmp, tmp2;
1056@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1057
1058 return val;
1059 }
1060+
1061+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1062+{
1063+ return atomic_add_return(i, v);
1064+}
1065+
1066 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1067+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1068+{
1069+ (void) atomic_add_return(i, v);
1070+}
1071
1072 static inline int atomic_sub_return(int i, atomic_t *v)
1073 {
1074@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1075 return val;
1076 }
1077 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1078+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1079+{
1080+ (void) atomic_sub_return(i, v);
1081+}
1082
1083 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1084 {
1085@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1086 return ret;
1087 }
1088
1089+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1090+{
1091+ return atomic_cmpxchg(v, old, new);
1092+}
1093+
1094 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1095 {
1096 unsigned long flags;
1097@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1098 #endif /* __LINUX_ARM_ARCH__ */
1099
1100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1101+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1102+{
1103+ return xchg(&v->counter, new);
1104+}
1105
1106 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1107 {
1108@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1109 }
1110
1111 #define atomic_inc(v) atomic_add(1, v)
1112+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1113+{
1114+ atomic_add_unchecked(1, v);
1115+}
1116 #define atomic_dec(v) atomic_sub(1, v)
1117+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1118+{
1119+ atomic_sub_unchecked(1, v);
1120+}
1121
1122 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1123+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1124+{
1125+ return atomic_add_return_unchecked(1, v) == 0;
1126+}
1127 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1128 #define atomic_inc_return(v) (atomic_add_return(1, v))
1129+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1130+{
1131+ return atomic_add_return_unchecked(1, v);
1132+}
1133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1134 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1135
1136@@ -241,6 +428,14 @@ typedef struct {
1137 u64 __aligned(8) counter;
1138 } atomic64_t;
1139
1140+#ifdef CONFIG_PAX_REFCOUNT
1141+typedef struct {
1142+ u64 __aligned(8) counter;
1143+} atomic64_unchecked_t;
1144+#else
1145+typedef atomic64_t atomic64_unchecked_t;
1146+#endif
1147+
1148 #define ATOMIC64_INIT(i) { (i) }
1149
1150 #ifdef CONFIG_ARM_LPAE
1151@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1152 return result;
1153 }
1154
1155+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1156+{
1157+ u64 result;
1158+
1159+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1160+" ldrd %0, %H0, [%1]"
1161+ : "=&r" (result)
1162+ : "r" (&v->counter), "Qo" (v->counter)
1163+ );
1164+
1165+ return result;
1166+}
1167+
1168 static inline void atomic64_set(atomic64_t *v, u64 i)
1169 {
1170 __asm__ __volatile__("@ atomic64_set\n"
1171@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1172 : "r" (&v->counter), "r" (i)
1173 );
1174 }
1175+
1176+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1177+{
1178+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1179+" strd %2, %H2, [%1]"
1180+ : "=Qo" (v->counter)
1181+ : "r" (&v->counter), "r" (i)
1182+ );
1183+}
1184 #else
1185 static inline u64 atomic64_read(const atomic64_t *v)
1186 {
1187@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1188 return result;
1189 }
1190
1191+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1192+{
1193+ u64 result;
1194+
1195+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1196+" ldrexd %0, %H0, [%1]"
1197+ : "=&r" (result)
1198+ : "r" (&v->counter), "Qo" (v->counter)
1199+ );
1200+
1201+ return result;
1202+}
1203+
1204 static inline void atomic64_set(atomic64_t *v, u64 i)
1205 {
1206 u64 tmp;
1207@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1208 : "r" (&v->counter), "r" (i)
1209 : "cc");
1210 }
1211+
1212+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1213+{
1214+ u64 tmp;
1215+
1216+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1217+"1: ldrexd %0, %H0, [%2]\n"
1218+" strexd %0, %3, %H3, [%2]\n"
1219+" teq %0, #0\n"
1220+" bne 1b"
1221+ : "=&r" (tmp), "=Qo" (v->counter)
1222+ : "r" (&v->counter), "r" (i)
1223+ : "cc");
1224+}
1225+
1226 #endif
1227
1228 static inline void atomic64_add(u64 i, atomic64_t *v)
1229@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1230 __asm__ __volatile__("@ atomic64_add\n"
1231 "1: ldrexd %0, %H0, [%3]\n"
1232 " adds %0, %0, %4\n"
1233+" adcs %H0, %H0, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+"2: bkpt 0xf103\n"
1238+"3:\n"
1239+#endif
1240+
1241+" strexd %1, %0, %H0, [%3]\n"
1242+" teq %1, #0\n"
1243+" bne 1b"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+"\n4:\n"
1247+ _ASM_EXTABLE(2b, 4b)
1248+#endif
1249+
1250+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1251+ : "r" (&v->counter), "r" (i)
1252+ : "cc");
1253+}
1254+
1255+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1256+{
1257+ u64 result;
1258+ unsigned long tmp;
1259+
1260+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1261+"1: ldrexd %0, %H0, [%3]\n"
1262+" adds %0, %0, %4\n"
1263 " adc %H0, %H0, %H4\n"
1264 " strexd %1, %0, %H0, [%3]\n"
1265 " teq %1, #0\n"
1266@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1267
1268 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1269 {
1270- u64 result;
1271- unsigned long tmp;
1272+ u64 result, tmp;
1273
1274 smp_mb();
1275
1276 __asm__ __volatile__("@ atomic64_add_return\n"
1277+"1: ldrexd %1, %H1, [%3]\n"
1278+" adds %0, %1, %4\n"
1279+" adcs %H0, %H1, %H4\n"
1280+
1281+#ifdef CONFIG_PAX_REFCOUNT
1282+" bvc 3f\n"
1283+" mov %0, %1\n"
1284+" mov %H0, %H1\n"
1285+"2: bkpt 0xf103\n"
1286+"3:\n"
1287+#endif
1288+
1289+" strexd %1, %0, %H0, [%3]\n"
1290+" teq %1, #0\n"
1291+" bne 1b"
1292+
1293+#ifdef CONFIG_PAX_REFCOUNT
1294+"\n4:\n"
1295+ _ASM_EXTABLE(2b, 4b)
1296+#endif
1297+
1298+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299+ : "r" (&v->counter), "r" (i)
1300+ : "cc");
1301+
1302+ smp_mb();
1303+
1304+ return result;
1305+}
1306+
1307+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1308+{
1309+ u64 result;
1310+ unsigned long tmp;
1311+
1312+ smp_mb();
1313+
1314+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1315 "1: ldrexd %0, %H0, [%3]\n"
1316 " adds %0, %0, %4\n"
1317 " adc %H0, %H0, %H4\n"
1318@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_sub\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " subs %0, %0, %4\n"
1322+" sbcs %H0, %H0, %H4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330+" strexd %1, %0, %H0, [%3]\n"
1331+" teq %1, #0\n"
1332+" bne 1b"
1333+
1334+#ifdef CONFIG_PAX_REFCOUNT
1335+"\n4:\n"
1336+ _ASM_EXTABLE(2b, 4b)
1337+#endif
1338+
1339+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1340+ : "r" (&v->counter), "r" (i)
1341+ : "cc");
1342+}
1343+
1344+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1345+{
1346+ u64 result;
1347+ unsigned long tmp;
1348+
1349+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1350+"1: ldrexd %0, %H0, [%3]\n"
1351+" subs %0, %0, %4\n"
1352 " sbc %H0, %H0, %H4\n"
1353 " strexd %1, %0, %H0, [%3]\n"
1354 " teq %1, #0\n"
1355@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1356
1357 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1358 {
1359- u64 result;
1360- unsigned long tmp;
1361+ u64 result, tmp;
1362
1363 smp_mb();
1364
1365 __asm__ __volatile__("@ atomic64_sub_return\n"
1366-"1: ldrexd %0, %H0, [%3]\n"
1367-" subs %0, %0, %4\n"
1368-" sbc %H0, %H0, %H4\n"
1369+"1: ldrexd %1, %H1, [%3]\n"
1370+" subs %0, %1, %4\n"
1371+" sbcs %H0, %H1, %H4\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+" bvc 3f\n"
1375+" mov %0, %1\n"
1376+" mov %H0, %H1\n"
1377+"2: bkpt 0xf103\n"
1378+"3:\n"
1379+#endif
1380+
1381 " strexd %1, %0, %H0, [%3]\n"
1382 " teq %1, #0\n"
1383 " bne 1b"
1384+
1385+#ifdef CONFIG_PAX_REFCOUNT
1386+"\n4:\n"
1387+ _ASM_EXTABLE(2b, 4b)
1388+#endif
1389+
1390 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1391 : "r" (&v->counter), "r" (i)
1392 : "cc");
1393@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1394 return oldval;
1395 }
1396
1397+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1398+{
1399+ u64 oldval;
1400+ unsigned long res;
1401+
1402+ smp_mb();
1403+
1404+ do {
1405+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1406+ "ldrexd %1, %H1, [%3]\n"
1407+ "mov %0, #0\n"
1408+ "teq %1, %4\n"
1409+ "teqeq %H1, %H4\n"
1410+ "strexdeq %0, %5, %H5, [%3]"
1411+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1412+ : "r" (&ptr->counter), "r" (old), "r" (new)
1413+ : "cc");
1414+ } while (res);
1415+
1416+ smp_mb();
1417+
1418+ return oldval;
1419+}
1420+
1421 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1422 {
1423 u64 result;
1424@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1425
1426 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1427 {
1428- u64 result;
1429- unsigned long tmp;
1430+ u64 result, tmp;
1431
1432 smp_mb();
1433
1434 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1435-"1: ldrexd %0, %H0, [%3]\n"
1436-" subs %0, %0, #1\n"
1437-" sbc %H0, %H0, #0\n"
1438+"1: ldrexd %1, %H1, [%3]\n"
1439+" subs %0, %1, #1\n"
1440+" sbcs %H0, %H1, #0\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: bkpt 0xf103\n"
1447+"3:\n"
1448+#endif
1449+
1450 " teq %H0, #0\n"
1451-" bmi 2f\n"
1452+" bmi 4f\n"
1453 " strexd %1, %0, %H0, [%3]\n"
1454 " teq %1, #0\n"
1455 " bne 1b\n"
1456-"2:"
1457+"4:\n"
1458+
1459+#ifdef CONFIG_PAX_REFCOUNT
1460+ _ASM_EXTABLE(2b, 4b)
1461+#endif
1462+
1463 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1464 : "r" (&v->counter)
1465 : "cc");
1466@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1467 " teq %0, %5\n"
1468 " teqeq %H0, %H5\n"
1469 " moveq %1, #0\n"
1470-" beq 2f\n"
1471+" beq 4f\n"
1472 " adds %0, %0, %6\n"
1473-" adc %H0, %H0, %H6\n"
1474+" adcs %H0, %H0, %H6\n"
1475+
1476+#ifdef CONFIG_PAX_REFCOUNT
1477+" bvc 3f\n"
1478+"2: bkpt 0xf103\n"
1479+"3:\n"
1480+#endif
1481+
1482 " strexd %2, %0, %H0, [%4]\n"
1483 " teq %2, #0\n"
1484 " bne 1b\n"
1485-"2:"
1486+"4:\n"
1487+
1488+#ifdef CONFIG_PAX_REFCOUNT
1489+ _ASM_EXTABLE(2b, 4b)
1490+#endif
1491+
1492 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1493 : "r" (&v->counter), "r" (u), "r" (a)
1494 : "cc");
1495@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1496
1497 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1498 #define atomic64_inc(v) atomic64_add(1LL, (v))
1499+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1501+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1502 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1503 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1504 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1505+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1506 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1507 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1508 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1509diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1510index 75fe66b..ba3dee4 100644
1511--- a/arch/arm/include/asm/cache.h
1512+++ b/arch/arm/include/asm/cache.h
1513@@ -4,8 +4,10 @@
1514 #ifndef __ASMARM_CACHE_H
1515 #define __ASMARM_CACHE_H
1516
1517+#include <linux/const.h>
1518+
1519 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1520-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1521+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1522
1523 /*
1524 * Memory returned by kmalloc() may be used for DMA, so we must make
1525@@ -24,5 +26,6 @@
1526 #endif
1527
1528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1529+#define __read_only __attribute__ ((__section__(".data..read_only")))
1530
1531 #endif
1532diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1533index 17d0ae8..014e350 100644
1534--- a/arch/arm/include/asm/cacheflush.h
1535+++ b/arch/arm/include/asm/cacheflush.h
1536@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1537 void (*dma_unmap_area)(const void *, size_t, int);
1538
1539 void (*dma_flush_range)(const void *, const void *);
1540-};
1541+} __no_const;
1542
1543 /*
1544 * Select the calling method
1545diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1546index 6dcc164..b14d917 100644
1547--- a/arch/arm/include/asm/checksum.h
1548+++ b/arch/arm/include/asm/checksum.h
1549@@ -37,7 +37,19 @@ __wsum
1550 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1551
1552 __wsum
1553-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1554+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1555+
1556+static inline __wsum
1557+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1558+{
1559+ __wsum ret;
1560+ pax_open_userland();
1561+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1562+ pax_close_userland();
1563+ return ret;
1564+}
1565+
1566+
1567
1568 /*
1569 * Fold a partial checksum without adding pseudo headers
1570diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1571index 4f009c1..466c59b 100644
1572--- a/arch/arm/include/asm/cmpxchg.h
1573+++ b/arch/arm/include/asm/cmpxchg.h
1574@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1575
1576 #define xchg(ptr,x) \
1577 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1578+#define xchg_unchecked(ptr,x) \
1579+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1580
1581 #include <asm-generic/cmpxchg-local.h>
1582
1583diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1584index 6ddbe44..b5e38b1 100644
1585--- a/arch/arm/include/asm/domain.h
1586+++ b/arch/arm/include/asm/domain.h
1587@@ -48,18 +48,37 @@
1588 * Domain types
1589 */
1590 #define DOMAIN_NOACCESS 0
1591-#define DOMAIN_CLIENT 1
1592 #ifdef CONFIG_CPU_USE_DOMAINS
1593+#define DOMAIN_USERCLIENT 1
1594+#define DOMAIN_KERNELCLIENT 1
1595 #define DOMAIN_MANAGER 3
1596+#define DOMAIN_VECTORS DOMAIN_USER
1597 #else
1598+
1599+#ifdef CONFIG_PAX_KERNEXEC
1600 #define DOMAIN_MANAGER 1
1601+#define DOMAIN_KERNEXEC 3
1602+#else
1603+#define DOMAIN_MANAGER 1
1604+#endif
1605+
1606+#ifdef CONFIG_PAX_MEMORY_UDEREF
1607+#define DOMAIN_USERCLIENT 0
1608+#define DOMAIN_UDEREF 1
1609+#define DOMAIN_VECTORS DOMAIN_KERNEL
1610+#else
1611+#define DOMAIN_USERCLIENT 1
1612+#define DOMAIN_VECTORS DOMAIN_USER
1613+#endif
1614+#define DOMAIN_KERNELCLIENT 1
1615+
1616 #endif
1617
1618 #define domain_val(dom,type) ((type) << (2*(dom)))
1619
1620 #ifndef __ASSEMBLY__
1621
1622-#ifdef CONFIG_CPU_USE_DOMAINS
1623+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1624 static inline void set_domain(unsigned val)
1625 {
1626 asm volatile(
1627@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1628 isb();
1629 }
1630
1631-#define modify_domain(dom,type) \
1632- do { \
1633- struct thread_info *thread = current_thread_info(); \
1634- unsigned int domain = thread->cpu_domain; \
1635- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1636- thread->cpu_domain = domain | domain_val(dom, type); \
1637- set_domain(thread->cpu_domain); \
1638- } while (0)
1639-
1640+extern void modify_domain(unsigned int dom, unsigned int type);
1641 #else
1642 static inline void set_domain(unsigned val) { }
1643 static inline void modify_domain(unsigned dom, unsigned type) { }
1644diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1645index 56211f2..17e8a25 100644
1646--- a/arch/arm/include/asm/elf.h
1647+++ b/arch/arm/include/asm/elf.h
1648@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1649 the loader. We need to make sure that it is out of the way of the program
1650 that it will "exec", and that there is sufficient room for the brk. */
1651
1652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1654+
1655+#ifdef CONFIG_PAX_ASLR
1656+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1657+
1658+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1659+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1660+#endif
1661
1662 /* When the program starts, a1 contains a pointer to a function to be
1663 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1664@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1665 extern void elf_set_personality(const struct elf32_hdr *);
1666 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1667
1668-struct mm_struct;
1669-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1670-#define arch_randomize_brk arch_randomize_brk
1671-
1672 #ifdef CONFIG_MMU
1673 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1674 struct linux_binprm;
1675diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1676index de53547..52b9a28 100644
1677--- a/arch/arm/include/asm/fncpy.h
1678+++ b/arch/arm/include/asm/fncpy.h
1679@@ -81,7 +81,9 @@
1680 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1681 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1682 \
1683+ pax_open_kernel(); \
1684 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1685+ pax_close_kernel(); \
1686 flush_icache_range((unsigned long)(dest_buf), \
1687 (unsigned long)(dest_buf) + (size)); \
1688 \
1689diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1690index e42cf59..7b94b8f 100644
1691--- a/arch/arm/include/asm/futex.h
1692+++ b/arch/arm/include/asm/futex.h
1693@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1694 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1695 return -EFAULT;
1696
1697+ pax_open_userland();
1698+
1699 smp_mb();
1700 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1701 "1: ldrex %1, [%4]\n"
1702@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1703 : "cc", "memory");
1704 smp_mb();
1705
1706+ pax_close_userland();
1707+
1708 *uval = val;
1709 return ret;
1710 }
1711@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1712 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1713 return -EFAULT;
1714
1715+ pax_open_userland();
1716+
1717 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1718 "1: " TUSER(ldr) " %1, [%4]\n"
1719 " teq %1, %2\n"
1720@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1721 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1722 : "cc", "memory");
1723
1724+ pax_close_userland();
1725+
1726 *uval = val;
1727 return ret;
1728 }
1729@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1730 return -EFAULT;
1731
1732 pagefault_disable(); /* implies preempt_disable() */
1733+ pax_open_userland();
1734
1735 switch (op) {
1736 case FUTEX_OP_SET:
1737@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1738 ret = -ENOSYS;
1739 }
1740
1741+ pax_close_userland();
1742 pagefault_enable(); /* subsumes preempt_enable() */
1743
1744 if (!ret) {
1745diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746index 83eb2f7..ed77159 100644
1747--- a/arch/arm/include/asm/kmap_types.h
1748+++ b/arch/arm/include/asm/kmap_types.h
1749@@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753-#define KM_TYPE_NR 16
1754+#define KM_TYPE_NR 17
1755
1756 #endif
1757diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758index 9e614a1..3302cca 100644
1759--- a/arch/arm/include/asm/mach/dma.h
1760+++ b/arch/arm/include/asm/mach/dma.h
1761@@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765-};
1766+} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771index 2fe141f..192dc01 100644
1772--- a/arch/arm/include/asm/mach/map.h
1773+++ b/arch/arm/include/asm/mach/map.h
1774@@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778-#define MT_MEMORY 9
1779+#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781-#define MT_MEMORY_NONCACHED 11
1782+#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787+#define MT_MEMORY_RW 16
1788+#define MT_MEMORY_RX 17
1789+#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794index 12f71a1..04e063c 100644
1795--- a/arch/arm/include/asm/outercache.h
1796+++ b/arch/arm/include/asm/outercache.h
1797@@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801-};
1802+} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807index cbdc7a2..32f44fe 100644
1808--- a/arch/arm/include/asm/page.h
1809+++ b/arch/arm/include/asm/page.h
1810@@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814-};
1815+} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820index 943504f..c37a730 100644
1821--- a/arch/arm/include/asm/pgalloc.h
1822+++ b/arch/arm/include/asm/pgalloc.h
1823@@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827+#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836+{
1837+ pud_populate(mm, pud, pmd);
1838+}
1839+
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847+#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856+{
1857+#ifdef CONFIG_ARM_LPAE
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#else
1860+ if (addr & SECTION_SIZE)
1861+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862+ else
1863+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864+#endif
1865+ flush_pmd_entry(pmdp);
1866+}
1867+
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881index 5cfba15..f415e1a 100644
1882--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884@@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892+
1893 /*
1894 * - section
1895 */
1896+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900@@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908@@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917index f97ee02..cc9fe9e 100644
1918--- a/arch/arm/include/asm/pgtable-2level.h
1919+++ b/arch/arm/include/asm/pgtable-2level.h
1920@@ -126,6 +126,9 @@
1921 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1922 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1923
1924+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1925+#define L_PTE_PXN (_AT(pteval_t, 0))
1926+
1927 /*
1928 * These are the memory types, defined to be compatible with
1929 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1930diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1931index 18f5cef..25b8f43 100644
1932--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1933+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1934@@ -41,6 +41,7 @@
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942@@ -71,6 +72,7 @@
1943 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1944 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1945 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1946+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1948
1949 /*
1950diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1951index 86b8fe3..e25f975 100644
1952--- a/arch/arm/include/asm/pgtable-3level.h
1953+++ b/arch/arm/include/asm/pgtable-3level.h
1954@@ -74,6 +74,7 @@
1955 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1956 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1958+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1959 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1960 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1961 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1962@@ -82,6 +83,7 @@
1963 /*
1964 * To be used in assembly code with the upper page attributes.
1965 */
1966+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1967 #define L_PTE_XN_HIGH (1 << (54 - 32))
1968 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1969
1970diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1971index 9bcd262..fba731c 100644
1972--- a/arch/arm/include/asm/pgtable.h
1973+++ b/arch/arm/include/asm/pgtable.h
1974@@ -30,6 +30,9 @@
1975 #include <asm/pgtable-2level.h>
1976 #endif
1977
1978+#define ktla_ktva(addr) (addr)
1979+#define ktva_ktla(addr) (addr)
1980+
1981 /*
1982 * Just any arbitrary offset to the start of the vmalloc VM area: the
1983 * current 8MB value just means that there will be a 8MB "hole" after the
1984@@ -45,6 +48,9 @@
1985 #define LIBRARY_TEXT_START 0x0c000000
1986
1987 #ifndef __ASSEMBLY__
1988+extern pteval_t __supported_pte_mask;
1989+extern pmdval_t __supported_pmd_mask;
1990+
1991 extern void __pte_error(const char *file, int line, pte_t);
1992 extern void __pmd_error(const char *file, int line, pmd_t);
1993 extern void __pgd_error(const char *file, int line, pgd_t);
1994@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1995 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1996 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1997
1998+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1999+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2000+
2001+#ifdef CONFIG_PAX_KERNEXEC
2002+#include <asm/domain.h>
2003+#include <linux/thread_info.h>
2004+#include <linux/preempt.h>
2005+#endif
2006+
2007+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2008+static inline int test_domain(int domain, int domaintype)
2009+{
2010+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2011+}
2012+#endif
2013+
2014+#ifdef CONFIG_PAX_KERNEXEC
2015+static inline unsigned long pax_open_kernel(void) {
2016+#ifdef CONFIG_ARM_LPAE
2017+ /* TODO */
2018+#else
2019+ preempt_disable();
2020+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2021+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2022+#endif
2023+ return 0;
2024+}
2025+
2026+static inline unsigned long pax_close_kernel(void) {
2027+#ifdef CONFIG_ARM_LPAE
2028+ /* TODO */
2029+#else
2030+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2031+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2032+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2033+ preempt_enable_no_resched();
2034+#endif
2035+ return 0;
2036+}
2037+#else
2038+static inline unsigned long pax_open_kernel(void) { return 0; }
2039+static inline unsigned long pax_close_kernel(void) { return 0; }
2040+#endif
2041+
2042 /*
2043 * This is the lowest virtual address we can permit any user space
2044 * mapping to be mapped at. This is particularly important for
2045@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2046 /*
2047 * The pgprot_* and protection_map entries will be fixed up in runtime
2048 * to include the cachable and bufferable bits based on memory policy,
2049- * as well as any architecture dependent bits like global/ASID and SMP
2050- * shared mapping bits.
2051+ * as well as any architecture dependent bits like global/ASID, PXN,
2052+ * and SMP shared mapping bits.
2053 */
2054 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2055
2056@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2057 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2058 {
2059 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2060- L_PTE_NONE | L_PTE_VALID;
2061+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2062 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2063 return pte;
2064 }
2065diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2066index f3628fb..a0672dd 100644
2067--- a/arch/arm/include/asm/proc-fns.h
2068+++ b/arch/arm/include/asm/proc-fns.h
2069@@ -75,7 +75,7 @@ extern struct processor {
2070 unsigned int suspend_size;
2071 void (*do_suspend)(void *);
2072 void (*do_resume)(void *);
2073-} processor;
2074+} __do_const processor;
2075
2076 #ifndef MULTI_CPU
2077 extern void cpu_proc_init(void);
2078diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2079index ce0dbe7..c085b6f 100644
2080--- a/arch/arm/include/asm/psci.h
2081+++ b/arch/arm/include/asm/psci.h
2082@@ -29,7 +29,7 @@ struct psci_operations {
2083 int (*cpu_off)(struct psci_power_state state);
2084 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2085 int (*migrate)(unsigned long cpuid);
2086-};
2087+} __no_const;
2088
2089 extern struct psci_operations psci_ops;
2090
2091diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2092index d3a22be..3a69ad5 100644
2093--- a/arch/arm/include/asm/smp.h
2094+++ b/arch/arm/include/asm/smp.h
2095@@ -107,7 +107,7 @@ struct smp_operations {
2096 int (*cpu_disable)(unsigned int cpu);
2097 #endif
2098 #endif
2099-};
2100+} __no_const;
2101
2102 /*
2103 * set platform specific SMP operations
2104diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2105index f00b569..aa5bb41 100644
2106--- a/arch/arm/include/asm/thread_info.h
2107+++ b/arch/arm/include/asm/thread_info.h
2108@@ -77,9 +77,9 @@ struct thread_info {
2109 .flags = 0, \
2110 .preempt_count = INIT_PREEMPT_COUNT, \
2111 .addr_limit = KERNEL_DS, \
2112- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2113- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2114- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2115+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2116+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2117+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2118 .restart_block = { \
2119 .fn = do_no_restart_syscall, \
2120 }, \
2121@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2122 #define TIF_SYSCALL_AUDIT 9
2123 #define TIF_SYSCALL_TRACEPOINT 10
2124 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2125-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2126+/* within 8 bits of TIF_SYSCALL_TRACE
2127+ * to meet flexible second operand requirements
2128+ */
2129+#define TIF_GRSEC_SETXID 12
2130+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2131 #define TIF_USING_IWMMXT 17
2132 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2133 #define TIF_RESTORE_SIGMASK 20
2134@@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2135 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2136 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2137 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2138+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2139
2140 /* Checks for any syscall work in entry-common.S */
2141 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2142- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2143+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2144
2145 /*
2146 * Change these and you break ASM code in entry-common.S
2147diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
2148index bdf2b84..aa9b4ac 100644
2149--- a/arch/arm/include/asm/tlb.h
2150+++ b/arch/arm/include/asm/tlb.h
2151@@ -43,6 +43,7 @@ struct mmu_gather {
2152 struct mm_struct *mm;
2153 unsigned int fullmm;
2154 struct vm_area_struct *vma;
2155+ unsigned long start, end;
2156 unsigned long range_start;
2157 unsigned long range_end;
2158 unsigned int nr;
2159@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
2160 }
2161
2162 static inline void
2163-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
2164+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
2165 {
2166 tlb->mm = mm;
2167- tlb->fullmm = fullmm;
2168+ tlb->fullmm = !(start | (end+1));
2169+ tlb->start = start;
2170+ tlb->end = end;
2171 tlb->vma = NULL;
2172 tlb->max = ARRAY_SIZE(tlb->local);
2173 tlb->pages = tlb->local;
2174diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2175index 7e1f760..de33b13 100644
2176--- a/arch/arm/include/asm/uaccess.h
2177+++ b/arch/arm/include/asm/uaccess.h
2178@@ -18,6 +18,7 @@
2179 #include <asm/domain.h>
2180 #include <asm/unified.h>
2181 #include <asm/compiler.h>
2182+#include <asm/pgtable.h>
2183
2184 #define VERIFY_READ 0
2185 #define VERIFY_WRITE 1
2186@@ -63,11 +64,38 @@ extern int __put_user_bad(void);
2187 static inline void set_fs(mm_segment_t fs)
2188 {
2189 current_thread_info()->addr_limit = fs;
2190- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2191+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2192 }
2193
2194 #define segment_eq(a,b) ((a) == (b))
2195
2196+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2197+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2198+
2199+static inline void pax_open_userland(void)
2200+{
2201+
2202+#ifdef CONFIG_PAX_MEMORY_UDEREF
2203+ if (segment_eq(get_fs(), USER_DS)) {
2204+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2205+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2206+ }
2207+#endif
2208+
2209+}
2210+
2211+static inline void pax_close_userland(void)
2212+{
2213+
2214+#ifdef CONFIG_PAX_MEMORY_UDEREF
2215+ if (segment_eq(get_fs(), USER_DS)) {
2216+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2217+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2218+ }
2219+#endif
2220+
2221+}
2222+
2223 #define __addr_ok(addr) ({ \
2224 unsigned long flag; \
2225 __asm__("cmp %2, %0; movlo %0, #0" \
2226@@ -143,8 +171,12 @@ extern int __get_user_4(void *);
2227
2228 #define get_user(x,p) \
2229 ({ \
2230+ int __e; \
2231 might_fault(); \
2232- __get_user_check(x,p); \
2233+ pax_open_userland(); \
2234+ __e = __get_user_check(x,p); \
2235+ pax_close_userland(); \
2236+ __e; \
2237 })
2238
2239 extern int __put_user_1(void *, unsigned int);
2240@@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
2241
2242 #define put_user(x,p) \
2243 ({ \
2244+ int __e; \
2245 might_fault(); \
2246- __put_user_check(x,p); \
2247+ pax_open_userland(); \
2248+ __e = __put_user_check(x,p); \
2249+ pax_close_userland(); \
2250+ __e; \
2251 })
2252
2253 #else /* CONFIG_MMU */
2254@@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
2255 #define __get_user(x,ptr) \
2256 ({ \
2257 long __gu_err = 0; \
2258+ pax_open_userland(); \
2259 __get_user_err((x),(ptr),__gu_err); \
2260+ pax_close_userland(); \
2261 __gu_err; \
2262 })
2263
2264 #define __get_user_error(x,ptr,err) \
2265 ({ \
2266+ pax_open_userland(); \
2267 __get_user_err((x),(ptr),err); \
2268+ pax_close_userland(); \
2269 (void) 0; \
2270 })
2271
2272@@ -312,13 +352,17 @@ do { \
2273 #define __put_user(x,ptr) \
2274 ({ \
2275 long __pu_err = 0; \
2276+ pax_open_userland(); \
2277 __put_user_err((x),(ptr),__pu_err); \
2278+ pax_close_userland(); \
2279 __pu_err; \
2280 })
2281
2282 #define __put_user_error(x,ptr,err) \
2283 ({ \
2284+ pax_open_userland(); \
2285 __put_user_err((x),(ptr),err); \
2286+ pax_close_userland(); \
2287 (void) 0; \
2288 })
2289
2290@@ -418,11 +462,44 @@ do { \
2291
2292
2293 #ifdef CONFIG_MMU
2294-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2295-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2296+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2297+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2298+
2299+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2300+{
2301+ unsigned long ret;
2302+
2303+ check_object_size(to, n, false);
2304+ pax_open_userland();
2305+ ret = ___copy_from_user(to, from, n);
2306+ pax_close_userland();
2307+ return ret;
2308+}
2309+
2310+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2311+{
2312+ unsigned long ret;
2313+
2314+ check_object_size(from, n, true);
2315+ pax_open_userland();
2316+ ret = ___copy_to_user(to, from, n);
2317+ pax_close_userland();
2318+ return ret;
2319+}
2320+
2321 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2322-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2323+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2324 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2325+
2326+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2327+{
2328+ unsigned long ret;
2329+ pax_open_userland();
2330+ ret = ___clear_user(addr, n);
2331+ pax_close_userland();
2332+ return ret;
2333+}
2334+
2335 #else
2336 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2337 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2338@@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2339
2340 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2341 {
2342+ if ((long)n < 0)
2343+ return n;
2344+
2345 if (access_ok(VERIFY_READ, from, n))
2346 n = __copy_from_user(to, from, n);
2347 else /* security hole - plug it */
2348@@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2349
2350 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2351 {
2352+ if ((long)n < 0)
2353+ return n;
2354+
2355 if (access_ok(VERIFY_WRITE, to, n))
2356 n = __copy_to_user(to, from, n);
2357 return n;
2358diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2359index 96ee092..37f1844 100644
2360--- a/arch/arm/include/uapi/asm/ptrace.h
2361+++ b/arch/arm/include/uapi/asm/ptrace.h
2362@@ -73,7 +73,7 @@
2363 * ARMv7 groups of PSR bits
2364 */
2365 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2366-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2367+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2368 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2369 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2370
2371diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2372index 60d3b73..e5a0f22 100644
2373--- a/arch/arm/kernel/armksyms.c
2374+++ b/arch/arm/kernel/armksyms.c
2375@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2376
2377 /* networking */
2378 EXPORT_SYMBOL(csum_partial);
2379-EXPORT_SYMBOL(csum_partial_copy_from_user);
2380+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2381 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2382 EXPORT_SYMBOL(__csum_ipv6_magic);
2383
2384@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2385 #ifdef CONFIG_MMU
2386 EXPORT_SYMBOL(copy_page);
2387
2388-EXPORT_SYMBOL(__copy_from_user);
2389-EXPORT_SYMBOL(__copy_to_user);
2390-EXPORT_SYMBOL(__clear_user);
2391+EXPORT_SYMBOL(___copy_from_user);
2392+EXPORT_SYMBOL(___copy_to_user);
2393+EXPORT_SYMBOL(___clear_user);
2394
2395 EXPORT_SYMBOL(__get_user_1);
2396 EXPORT_SYMBOL(__get_user_2);
2397diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2398index d43c7e5..257c050 100644
2399--- a/arch/arm/kernel/entry-armv.S
2400+++ b/arch/arm/kernel/entry-armv.S
2401@@ -47,6 +47,87 @@
2402 9997:
2403 .endm
2404
2405+ .macro pax_enter_kernel
2406+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2407+ @ make aligned space for saved DACR
2408+ sub sp, sp, #8
2409+ @ save regs
2410+ stmdb sp!, {r1, r2}
2411+ @ read DACR from cpu_domain into r1
2412+ mov r2, sp
2413+ @ assume 8K pages, since we have to split the immediate in two
2414+ bic r2, r2, #(0x1fc0)
2415+ bic r2, r2, #(0x3f)
2416+ ldr r1, [r2, #TI_CPU_DOMAIN]
2417+ @ store old DACR on stack
2418+ str r1, [sp, #8]
2419+#ifdef CONFIG_PAX_KERNEXEC
2420+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2421+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2422+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2423+#endif
2424+#ifdef CONFIG_PAX_MEMORY_UDEREF
2425+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2426+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2427+#endif
2428+ @ write r1 to current_thread_info()->cpu_domain
2429+ str r1, [r2, #TI_CPU_DOMAIN]
2430+ @ write r1 to DACR
2431+ mcr p15, 0, r1, c3, c0, 0
2432+ @ instruction sync
2433+ instr_sync
2434+ @ restore regs
2435+ ldmia sp!, {r1, r2}
2436+#endif
2437+ .endm
2438+
2439+ .macro pax_open_userland
2440+#ifdef CONFIG_PAX_MEMORY_UDEREF
2441+ @ save regs
2442+ stmdb sp!, {r0, r1}
2443+ @ read DACR from cpu_domain into r1
2444+ mov r0, sp
2445+ @ assume 8K pages, since we have to split the immediate in two
2446+ bic r0, r0, #(0x1fc0)
2447+ bic r0, r0, #(0x3f)
2448+ ldr r1, [r0, #TI_CPU_DOMAIN]
2449+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2450+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2451+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2452+ @ write r1 to current_thread_info()->cpu_domain
2453+ str r1, [r0, #TI_CPU_DOMAIN]
2454+ @ write r1 to DACR
2455+ mcr p15, 0, r1, c3, c0, 0
2456+ @ instruction sync
2457+ instr_sync
2458+ @ restore regs
2459+ ldmia sp!, {r0, r1}
2460+#endif
2461+ .endm
2462+
2463+ .macro pax_close_userland
2464+#ifdef CONFIG_PAX_MEMORY_UDEREF
2465+ @ save regs
2466+ stmdb sp!, {r0, r1}
2467+ @ read DACR from cpu_domain into r1
2468+ mov r0, sp
2469+ @ assume 8K pages, since we have to split the immediate in two
2470+ bic r0, r0, #(0x1fc0)
2471+ bic r0, r0, #(0x3f)
2472+ ldr r1, [r0, #TI_CPU_DOMAIN]
2473+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2474+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2475+ @ write r1 to current_thread_info()->cpu_domain
2476+ str r1, [r0, #TI_CPU_DOMAIN]
2477+ @ write r1 to DACR
2478+ mcr p15, 0, r1, c3, c0, 0
2479+ @ instruction sync
2480+ instr_sync
2481+ @ restore regs
2482+ ldmia sp!, {r0, r1}
2483+#endif
2484+ .endm
2485+
2486 .macro pabt_helper
2487 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2488 #ifdef MULTI_PABORT
2489@@ -89,11 +170,15 @@
2490 * Invalid mode handlers
2491 */
2492 .macro inv_entry, reason
2493+
2494+ pax_enter_kernel
2495+
2496 sub sp, sp, #S_FRAME_SIZE
2497 ARM( stmib sp, {r1 - lr} )
2498 THUMB( stmia sp, {r0 - r12} )
2499 THUMB( str sp, [sp, #S_SP] )
2500 THUMB( str lr, [sp, #S_LR] )
2501+
2502 mov r1, #\reason
2503 .endm
2504
2505@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2506 .macro svc_entry, stack_hole=0
2507 UNWIND(.fnstart )
2508 UNWIND(.save {r0 - pc} )
2509+
2510+ pax_enter_kernel
2511+
2512 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2513+
2514 #ifdef CONFIG_THUMB2_KERNEL
2515 SPFIX( str r0, [sp] ) @ temporarily saved
2516 SPFIX( mov r0, sp )
2517@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2518 ldmia r0, {r3 - r5}
2519 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2520 mov r6, #-1 @ "" "" "" ""
2521+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2522+ @ offset sp by 8 as done in pax_enter_kernel
2523+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2524+#else
2525 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2526+#endif
2527 SPFIX( addeq r2, r2, #4 )
2528 str r3, [sp, #-4]! @ save the "real" r0 copied
2529 @ from the exception stack
2530@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2531 .macro usr_entry
2532 UNWIND(.fnstart )
2533 UNWIND(.cantunwind ) @ don't unwind the user space
2534+
2535+ pax_enter_kernel_user
2536+
2537 sub sp, sp, #S_FRAME_SIZE
2538 ARM( stmib sp, {r1 - r12} )
2539 THUMB( stmia sp, {r0 - r12} )
2540@@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
2541 .endm
2542
2543 .macro kuser_cmpxchg_check
2544-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2545+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
2546+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2547 #ifndef CONFIG_MMU
2548 #warning "NPTL on non MMU needs fixing"
2549 #else
2550@@ -414,7 +512,9 @@ __und_usr:
2551 tst r3, #PSR_T_BIT @ Thumb mode?
2552 bne __und_usr_thumb
2553 sub r4, r2, #4 @ ARM instr at LR - 4
2554+ pax_open_userland
2555 1: ldrt r0, [r4]
2556+ pax_close_userland
2557 #ifdef CONFIG_CPU_ENDIAN_BE8
2558 rev r0, r0 @ little endian instruction
2559 #endif
2560@@ -449,10 +549,14 @@ __und_usr_thumb:
2561 */
2562 .arch armv6t2
2563 #endif
2564+ pax_open_userland
2565 2: ldrht r5, [r4]
2566+ pax_close_userland
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2573 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2574 orr r0, r0, r5, lsl #16
2575@@ -481,7 +585,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: mov pc, r9
2580+4: pax_close_userland
2581+ mov pc, r9
2582 .popsection
2583 .pushsection __ex_table,"a"
2584 .long 1b, 4b
2585@@ -690,7 +795,7 @@ ENTRY(__switch_to)
2586 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2587 THUMB( str sp, [ip], #4 )
2588 THUMB( str lr, [ip], #4 )
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 set_tls r3, r4, r5
2594@@ -699,7 +804,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index bc5bc0a..d0998ca 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -10,18 +10,46 @@
2608
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2658
2659 .align 5
2660 ENTRY(vector_swi)
2661+
2662 sub sp, sp, #S_FRAME_SIZE
2663 stmia sp, {r0 - r12} @ Calling r0 - r12
2664 ARM( add r8, sp, #S_PC )
2665@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2666 ldr scno, [lr, #-4] @ get SWI instruction
2667 #endif
2668
2669+ /*
2670+ * do this here to avoid a performance hit of wrapping the code above
2671+ * that directly dereferences userland to parse the SWI instruction
2672+ */
2673+ pax_enter_kernel_user
2674+
2675 #ifdef CONFIG_ALIGNMENT_TRAP
2676 ldr ip, __cr_alignment
2677 ldr ip, [ip]
2678diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2679index 160f337..db67ee4 100644
2680--- a/arch/arm/kernel/entry-header.S
2681+++ b/arch/arm/kernel/entry-header.S
2682@@ -73,6 +73,60 @@
2683 msr cpsr_c, \rtemp @ switch back to the SVC mode
2684 .endm
2685
2686+ .macro pax_enter_kernel_user
2687+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2688+ @ save regs
2689+ stmdb sp!, {r0, r1}
2690+ @ read DACR from cpu_domain into r1
2691+ mov r0, sp
2692+ @ assume 8K pages, since we have to split the immediate in two
2693+ bic r0, r0, #(0x1fc0)
2694+ bic r0, r0, #(0x3f)
2695+ ldr r1, [r0, #TI_CPU_DOMAIN]
2696+#ifdef CONFIG_PAX_MEMORY_UDEREF
2697+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2698+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2699+#endif
2700+#ifdef CONFIG_PAX_KERNEXEC
2701+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2702+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2703+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2704+#endif
2705+ @ write r1 to current_thread_info()->cpu_domain
2706+ str r1, [r0, #TI_CPU_DOMAIN]
2707+ @ write r1 to DACR
2708+ mcr p15, 0, r1, c3, c0, 0
2709+ @ instruction sync
2710+ instr_sync
2711+ @ restore regs
2712+ ldmia sp!, {r0, r1}
2713+#endif
2714+ .endm
2715+
2716+ .macro pax_exit_kernel
2717+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2718+ @ save regs
2719+ stmdb sp!, {r0, r1}
2720+ @ read old DACR from stack into r1
2721+ ldr r1, [sp, #(8 + S_SP)]
2722+ sub r1, r1, #8
2723+ ldr r1, [r1]
2724+
2725+ @ write r1 to current_thread_info()->cpu_domain
2726+ mov r0, sp
2727+ @ assume 8K pages, since we have to split the immediate in two
2728+ bic r0, r0, #(0x1fc0)
2729+ bic r0, r0, #(0x3f)
2730+ str r1, [r0, #TI_CPU_DOMAIN]
2731+ @ write r1 to DACR
2732+ mcr p15, 0, r1, c3, c0, 0
2733+ @ instruction sync
2734+ instr_sync
2735+ @ restore regs
2736+ ldmia sp!, {r0, r1}
2737+#endif
2738+ .endm
2739+
2740 #ifndef CONFIG_THUMB2_KERNEL
2741 .macro svc_exit, rpsr, irq = 0
2742 .if \irq != 0
2743@@ -92,6 +146,9 @@
2744 blne trace_hardirqs_off
2745 #endif
2746 .endif
2747+
2748+ pax_exit_kernel
2749+
2750 msr spsr_cxsf, \rpsr
2751 #if defined(CONFIG_CPU_V6)
2752 ldr r0, [sp]
2753@@ -155,6 +212,9 @@
2754 blne trace_hardirqs_off
2755 #endif
2756 .endif
2757+
2758+ pax_exit_kernel
2759+
2760 ldr lr, [sp, #S_SP] @ top of the stack
2761 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2762 clrex @ clear the exclusive monitor
2763diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2764index 25442f4..d4948fc 100644
2765--- a/arch/arm/kernel/fiq.c
2766+++ b/arch/arm/kernel/fiq.c
2767@@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
2768
2769 void set_fiq_handler(void *start, unsigned int length)
2770 {
2771-#if defined(CONFIG_CPU_USE_DOMAINS)
2772- void *base = (void *)0xffff0000;
2773-#else
2774 void *base = vectors_page;
2775-#endif
2776 unsigned offset = FIQ_OFFSET;
2777
2778+ pax_open_kernel();
2779 memcpy(base + offset, start, length);
2780+ pax_close_kernel();
2781+
2782+ if (!cache_is_vipt_nonaliasing())
2783+ flush_icache_range(base + offset, offset + length);
2784 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
2785- if (!vectors_high())
2786- flush_icache_range(offset, offset + length);
2787 }
2788
2789 int claim_fiq(struct fiq_handler *f)
2790diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2791index 8bac553..caee108 100644
2792--- a/arch/arm/kernel/head.S
2793+++ b/arch/arm/kernel/head.S
2794@@ -52,7 +52,9 @@
2795 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2796
2797 .macro pgtbl, rd, phys
2798- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2799+ mov \rd, #TEXT_OFFSET
2800+ sub \rd, #PG_DIR_SIZE
2801+ add \rd, \rd, \phys
2802 .endm
2803
2804 /*
2805@@ -434,7 +436,7 @@ __enable_mmu:
2806 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2807 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2808 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2809- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2810+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2811 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2812 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2813 #endif
2814diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2815index 1fd749e..47adb08 100644
2816--- a/arch/arm/kernel/hw_breakpoint.c
2817+++ b/arch/arm/kernel/hw_breakpoint.c
2818@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2819 return NOTIFY_OK;
2820 }
2821
2822-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2823+static struct notifier_block dbg_reset_nb = {
2824 .notifier_call = dbg_reset_notify,
2825 };
2826
2827diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2828index 1e9be5d..03edbc2 100644
2829--- a/arch/arm/kernel/module.c
2830+++ b/arch/arm/kernel/module.c
2831@@ -37,12 +37,37 @@
2832 #endif
2833
2834 #ifdef CONFIG_MMU
2835-void *module_alloc(unsigned long size)
2836+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2837 {
2838+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2839+ return NULL;
2840 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2841- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2842+ GFP_KERNEL, prot, -1,
2843 __builtin_return_address(0));
2844 }
2845+
2846+void *module_alloc(unsigned long size)
2847+{
2848+
2849+#ifdef CONFIG_PAX_KERNEXEC
2850+ return __module_alloc(size, PAGE_KERNEL);
2851+#else
2852+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2853+#endif
2854+
2855+}
2856+
2857+#ifdef CONFIG_PAX_KERNEXEC
2858+void module_free_exec(struct module *mod, void *module_region)
2859+{
2860+ module_free(mod, module_region);
2861+}
2862+
2863+void *module_alloc_exec(unsigned long size)
2864+{
2865+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2866+}
2867+#endif
2868 #endif
2869
2870 int
2871diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2872index 07314af..c46655c 100644
2873--- a/arch/arm/kernel/patch.c
2874+++ b/arch/arm/kernel/patch.c
2875@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2876 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2877 int size;
2878
2879+ pax_open_kernel();
2880 if (thumb2 && __opcode_is_thumb16(insn)) {
2881 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2882 size = sizeof(u16);
2883@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2884 *(u32 *)addr = insn;
2885 size = sizeof(u32);
2886 }
2887+ pax_close_kernel();
2888
2889 flush_icache_range((uintptr_t)(addr),
2890 (uintptr_t)(addr) + size);
2891diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2892index d9f5cd4..e186ee1 100644
2893--- a/arch/arm/kernel/perf_event.c
2894+++ b/arch/arm/kernel/perf_event.c
2895@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
2896 static int
2897 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
2898 {
2899- int mapping = (*event_map)[config];
2900+ int mapping;
2901+
2902+ if (config >= PERF_COUNT_HW_MAX)
2903+ return -EINVAL;
2904+
2905+ mapping = (*event_map)[config];
2906 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
2907 }
2908
2909@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
2910 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
2911 struct pmu *leader_pmu = event->group_leader->pmu;
2912
2913+ if (is_software_event(event))
2914+ return 1;
2915+
2916 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
2917 return 1;
2918
2919diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2920index 1f2740e..b36e225 100644
2921--- a/arch/arm/kernel/perf_event_cpu.c
2922+++ b/arch/arm/kernel/perf_event_cpu.c
2923@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2924 return NOTIFY_OK;
2925 }
2926
2927-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2928+static struct notifier_block cpu_pmu_hotplug_notifier = {
2929 .notifier_call = cpu_pmu_notify,
2930 };
2931
2932diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2933index 5bc2615..4f1a0c2 100644
2934--- a/arch/arm/kernel/process.c
2935+++ b/arch/arm/kernel/process.c
2936@@ -28,10 +28,10 @@
2937 #include <linux/tick.h>
2938 #include <linux/utsname.h>
2939 #include <linux/uaccess.h>
2940-#include <linux/random.h>
2941 #include <linux/hw_breakpoint.h>
2942 #include <linux/cpuidle.h>
2943 #include <linux/leds.h>
2944+#include <linux/random.h>
2945
2946 #include <asm/cacheflush.h>
2947 #include <asm/idmap.h>
2948@@ -223,6 +223,7 @@ void machine_power_off(void)
2949
2950 if (pm_power_off)
2951 pm_power_off();
2952+ BUG();
2953 }
2954
2955 /*
2956@@ -236,7 +237,7 @@ void machine_power_off(void)
2957 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2958 * to use. Implementing such co-ordination would be essentially impossible.
2959 */
2960-void machine_restart(char *cmd)
2961+__noreturn void machine_restart(char *cmd)
2962 {
2963 smp_send_stop();
2964
2965@@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
2966
2967 show_regs_print_info(KERN_DEFAULT);
2968
2969- print_symbol("PC is at %s\n", instruction_pointer(regs));
2970- print_symbol("LR is at %s\n", regs->ARM_lr);
2971+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2972+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2973 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2974 "sp : %08lx ip : %08lx fp : %08lx\n",
2975 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2976@@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
2977 return 0;
2978 }
2979
2980-unsigned long arch_randomize_brk(struct mm_struct *mm)
2981-{
2982- unsigned long range_end = mm->brk + 0x02000000;
2983- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2984-}
2985-
2986 #ifdef CONFIG_MMU
2987 #ifdef CONFIG_KUSER_HELPERS
2988 /*
2989@@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
2990
2991 static int __init gate_vma_init(void)
2992 {
2993- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2994+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2995 return 0;
2996 }
2997 arch_initcall(gate_vma_init);
2998@@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
2999 {
3000 return in_gate_area(NULL, addr);
3001 }
3002-#define is_gate_vma(vma) ((vma) = &gate_vma)
3003+#define is_gate_vma(vma) ((vma) == &gate_vma)
3004 #else
3005 #define is_gate_vma(vma) 0
3006 #endif
3007
3008 const char *arch_vma_name(struct vm_area_struct *vma)
3009 {
3010- return is_gate_vma(vma) ? "[vectors]" :
3011- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3012- "[sigpage]" : NULL;
3013+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3014 }
3015
3016-static struct page *signal_page;
3017-extern struct page *get_signal_page(void);
3018-
3019 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3020 {
3021 struct mm_struct *mm = current->mm;
3022- unsigned long addr;
3023- int ret;
3024-
3025- if (!signal_page)
3026- signal_page = get_signal_page();
3027- if (!signal_page)
3028- return -ENOMEM;
3029
3030 down_write(&mm->mmap_sem);
3031- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3032- if (IS_ERR_VALUE(addr)) {
3033- ret = addr;
3034- goto up_fail;
3035- }
3036-
3037- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3038- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3039- &signal_page);
3040-
3041- if (ret == 0)
3042- mm->context.sigpage = addr;
3043-
3044- up_fail:
3045+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3046 up_write(&mm->mmap_sem);
3047- return ret;
3048+ return 0;
3049 }
3050 #endif
3051diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3052index 3653164..d83e55d 100644
3053--- a/arch/arm/kernel/psci.c
3054+++ b/arch/arm/kernel/psci.c
3055@@ -24,7 +24,7 @@
3056 #include <asm/opcodes-virt.h>
3057 #include <asm/psci.h>
3058
3059-struct psci_operations psci_ops;
3060+struct psci_operations psci_ops __read_only;
3061
3062 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3063
3064diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3065index 03deeff..741ce88 100644
3066--- a/arch/arm/kernel/ptrace.c
3067+++ b/arch/arm/kernel/ptrace.c
3068@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
3069 return current_thread_info()->syscall;
3070 }
3071
3072+#ifdef CONFIG_GRKERNSEC_SETXID
3073+extern void gr_delayed_cred_worker(void);
3074+#endif
3075+
3076 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3077 {
3078 current_thread_info()->syscall = scno;
3079
3080+#ifdef CONFIG_GRKERNSEC_SETXID
3081+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3082+ gr_delayed_cred_worker();
3083+#endif
3084+
3085 /* Do the secure computing check first; failures should be fast. */
3086 if (secure_computing(scno) == -1)
3087 return -1;
3088diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3089index b4b1d39..efdc9be 100644
3090--- a/arch/arm/kernel/setup.c
3091+++ b/arch/arm/kernel/setup.c
3092@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3093 unsigned int elf_hwcap __read_mostly;
3094 EXPORT_SYMBOL(elf_hwcap);
3095
3096+pteval_t __supported_pte_mask __read_only;
3097+pmdval_t __supported_pmd_mask __read_only;
3098
3099 #ifdef MULTI_CPU
3100-struct processor processor __read_mostly;
3101+struct processor processor;
3102 #endif
3103 #ifdef MULTI_TLB
3104-struct cpu_tlb_fns cpu_tlb __read_mostly;
3105+struct cpu_tlb_fns cpu_tlb __read_only;
3106 #endif
3107 #ifdef MULTI_USER
3108-struct cpu_user_fns cpu_user __read_mostly;
3109+struct cpu_user_fns cpu_user __read_only;
3110 #endif
3111 #ifdef MULTI_CACHE
3112-struct cpu_cache_fns cpu_cache __read_mostly;
3113+struct cpu_cache_fns cpu_cache __read_only;
3114 #endif
3115 #ifdef CONFIG_OUTER_CACHE
3116-struct outer_cache_fns outer_cache __read_mostly;
3117+struct outer_cache_fns outer_cache __read_only;
3118 EXPORT_SYMBOL(outer_cache);
3119 #endif
3120
3121@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3122 asm("mrc p15, 0, %0, c0, c1, 4"
3123 : "=r" (mmfr0));
3124 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3125- (mmfr0 & 0x000000f0) >= 0x00000030)
3126+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3127 cpu_arch = CPU_ARCH_ARMv7;
3128- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3129+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3130+ __supported_pte_mask |= L_PTE_PXN;
3131+ __supported_pmd_mask |= PMD_PXNTABLE;
3132+ }
3133+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3134 (mmfr0 & 0x000000f0) == 0x00000020)
3135 cpu_arch = CPU_ARCH_ARMv6;
3136 else
3137@@ -479,7 +485,7 @@ static void __init setup_processor(void)
3138 __cpu_architecture = __get_cpu_architecture();
3139
3140 #ifdef MULTI_CPU
3141- processor = *list->proc;
3142+ memcpy((void *)&processor, list->proc, sizeof processor);
3143 #endif
3144 #ifdef MULTI_TLB
3145 cpu_tlb = *list->tlb;
3146diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3147index 5a42c12..a2bb7c6 100644
3148--- a/arch/arm/kernel/signal.c
3149+++ b/arch/arm/kernel/signal.c
3150@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3151 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3152 };
3153
3154-static unsigned long signal_return_offset;
3155-
3156 #ifdef CONFIG_CRUNCH
3157 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3158 {
3159@@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3160 * except when the MPU has protected the vectors
3161 * page from PL0
3162 */
3163- retcode = mm->context.sigpage + signal_return_offset +
3164- (idx << 2) + thumb;
3165+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3166 } else
3167 #endif
3168 {
3169@@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3170 } while (thread_flags & _TIF_WORK_MASK);
3171 return 0;
3172 }
3173-
3174-struct page *get_signal_page(void)
3175-{
3176- unsigned long ptr;
3177- unsigned offset;
3178- struct page *page;
3179- void *addr;
3180-
3181- page = alloc_pages(GFP_KERNEL, 0);
3182-
3183- if (!page)
3184- return NULL;
3185-
3186- addr = page_address(page);
3187-
3188- /* Give the signal return code some randomness */
3189- offset = 0x200 + (get_random_int() & 0x7fc);
3190- signal_return_offset = offset;
3191-
3192- /*
3193- * Copy signal return handlers into the vector page, and
3194- * set sigreturn to be a pointer to these.
3195- */
3196- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3197-
3198- ptr = (unsigned long)addr + offset;
3199- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3200-
3201- return page;
3202-}
3203diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3204index 5919eb4..b5d6dfe 100644
3205--- a/arch/arm/kernel/smp.c
3206+++ b/arch/arm/kernel/smp.c
3207@@ -70,7 +70,7 @@ enum ipi_msg_type {
3208
3209 static DECLARE_COMPLETION(cpu_running);
3210
3211-static struct smp_operations smp_ops;
3212+static struct smp_operations smp_ops __read_only;
3213
3214 void __init smp_set_ops(struct smp_operations *ops)
3215 {
3216diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3217index 6b9567e..b8af2d6 100644
3218--- a/arch/arm/kernel/traps.c
3219+++ b/arch/arm/kernel/traps.c
3220@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3221 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3222 {
3223 #ifdef CONFIG_KALLSYMS
3224- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3225+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3226 #else
3227 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3228 #endif
3229@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3230 static int die_owner = -1;
3231 static unsigned int die_nest_count;
3232
3233+extern void gr_handle_kernel_exploit(void);
3234+
3235 static unsigned long oops_begin(void)
3236 {
3237 int cpu;
3238@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3239 panic("Fatal exception in interrupt");
3240 if (panic_on_oops)
3241 panic("Fatal exception");
3242+
3243+ gr_handle_kernel_exploit();
3244+
3245 if (signr)
3246 do_exit(signr);
3247 }
3248@@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3249 * The user helper at 0xffff0fe0 must be used instead.
3250 * (see entry-armv.S for details)
3251 */
3252+ pax_open_kernel();
3253 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3254+ pax_close_kernel();
3255 }
3256 return 0;
3257
3258@@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
3259 kuser_init(vectors_base);
3260
3261 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3262- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3263+
3264+#ifndef CONFIG_PAX_MEMORY_UDEREF
3265+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3266+#endif
3267+
3268 }
3269diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3270index 33f2ea3..0b91824 100644
3271--- a/arch/arm/kernel/vmlinux.lds.S
3272+++ b/arch/arm/kernel/vmlinux.lds.S
3273@@ -8,7 +8,11 @@
3274 #include <asm/thread_info.h>
3275 #include <asm/memory.h>
3276 #include <asm/page.h>
3277-
3278+
3279+#ifdef CONFIG_PAX_KERNEXEC
3280+#include <asm/pgtable.h>
3281+#endif
3282+
3283 #define PROC_INFO \
3284 . = ALIGN(4); \
3285 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3286@@ -94,6 +98,11 @@ SECTIONS
3287 _text = .;
3288 HEAD_TEXT
3289 }
3290+
3291+#ifdef CONFIG_PAX_KERNEXEC
3292+ . = ALIGN(1<<SECTION_SHIFT);
3293+#endif
3294+
3295 .text : { /* Real text segment */
3296 _stext = .; /* Text and read-only data */
3297 __exception_text_start = .;
3298@@ -116,6 +125,8 @@ SECTIONS
3299 ARM_CPU_KEEP(PROC_INFO)
3300 }
3301
3302+ _etext = .; /* End of text section */
3303+
3304 RO_DATA(PAGE_SIZE)
3305
3306 . = ALIGN(4);
3307@@ -146,7 +157,9 @@ SECTIONS
3308
3309 NOTES
3310
3311- _etext = .; /* End of text and rodata section */
3312+#ifdef CONFIG_PAX_KERNEXEC
3313+ . = ALIGN(1<<SECTION_SHIFT);
3314+#endif
3315
3316 #ifndef CONFIG_XIP_KERNEL
3317 . = ALIGN(PAGE_SIZE);
3318@@ -224,6 +237,11 @@ SECTIONS
3319 . = PAGE_OFFSET + TEXT_OFFSET;
3320 #else
3321 __init_end = .;
3322+
3323+#ifdef CONFIG_PAX_KERNEXEC
3324+ . = ALIGN(1<<SECTION_SHIFT);
3325+#endif
3326+
3327 . = ALIGN(THREAD_SIZE);
3328 __data_loc = .;
3329 #endif
3330diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3331index 14a0d98..7771a7d 100644
3332--- a/arch/arm/lib/clear_user.S
3333+++ b/arch/arm/lib/clear_user.S
3334@@ -12,14 +12,14 @@
3335
3336 .text
3337
3338-/* Prototype: int __clear_user(void *addr, size_t sz)
3339+/* Prototype: int ___clear_user(void *addr, size_t sz)
3340 * Purpose : clear some user memory
3341 * Params : addr - user memory address to clear
3342 * : sz - number of bytes to clear
3343 * Returns : number of bytes NOT cleared
3344 */
3345 ENTRY(__clear_user_std)
3346-WEAK(__clear_user)
3347+WEAK(___clear_user)
3348 stmfd sp!, {r1, lr}
3349 mov r2, #0
3350 cmp r1, #4
3351@@ -44,7 +44,7 @@ WEAK(__clear_user)
3352 USER( strnebt r2, [r0])
3353 mov r0, #0
3354 ldmfd sp!, {r1, pc}
3355-ENDPROC(__clear_user)
3356+ENDPROC(___clear_user)
3357 ENDPROC(__clear_user_std)
3358
3359 .pushsection .fixup,"ax"
3360diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3361index 66a477a..bee61d3 100644
3362--- a/arch/arm/lib/copy_from_user.S
3363+++ b/arch/arm/lib/copy_from_user.S
3364@@ -16,7 +16,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_from_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -84,11 +84,11 @@
3374
3375 .text
3376
3377-ENTRY(__copy_from_user)
3378+ENTRY(___copy_from_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_from_user)
3383+ENDPROC(___copy_from_user)
3384
3385 .pushsection .fixup,"ax"
3386 .align 0
3387diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3388index 6ee2f67..d1cce76 100644
3389--- a/arch/arm/lib/copy_page.S
3390+++ b/arch/arm/lib/copy_page.S
3391@@ -10,6 +10,7 @@
3392 * ASM optimised string functions
3393 */
3394 #include <linux/linkage.h>
3395+#include <linux/const.h>
3396 #include <asm/assembler.h>
3397 #include <asm/asm-offsets.h>
3398 #include <asm/cache.h>
3399diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3400index d066df6..df28194 100644
3401--- a/arch/arm/lib/copy_to_user.S
3402+++ b/arch/arm/lib/copy_to_user.S
3403@@ -16,7 +16,7 @@
3404 /*
3405 * Prototype:
3406 *
3407- * size_t __copy_to_user(void *to, const void *from, size_t n)
3408+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3409 *
3410 * Purpose:
3411 *
3412@@ -88,11 +88,11 @@
3413 .text
3414
3415 ENTRY(__copy_to_user_std)
3416-WEAK(__copy_to_user)
3417+WEAK(___copy_to_user)
3418
3419 #include "copy_template.S"
3420
3421-ENDPROC(__copy_to_user)
3422+ENDPROC(___copy_to_user)
3423 ENDPROC(__copy_to_user_std)
3424
3425 .pushsection .fixup,"ax"
3426diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3427index 7d08b43..f7ca7ea 100644
3428--- a/arch/arm/lib/csumpartialcopyuser.S
3429+++ b/arch/arm/lib/csumpartialcopyuser.S
3430@@ -57,8 +57,8 @@
3431 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3432 */
3433
3434-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3435-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3436+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3437+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3438
3439 #include "csumpartialcopygeneric.S"
3440
3441diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3442index 64dbfa5..84a3fd9 100644
3443--- a/arch/arm/lib/delay.c
3444+++ b/arch/arm/lib/delay.c
3445@@ -28,7 +28,7 @@
3446 /*
3447 * Default to the loop-based delay implementation.
3448 */
3449-struct arm_delay_ops arm_delay_ops = {
3450+struct arm_delay_ops arm_delay_ops __read_only = {
3451 .delay = __loop_delay,
3452 .const_udelay = __loop_const_udelay,
3453 .udelay = __loop_udelay,
3454diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3455index 025f742..8432b08 100644
3456--- a/arch/arm/lib/uaccess_with_memcpy.c
3457+++ b/arch/arm/lib/uaccess_with_memcpy.c
3458@@ -104,7 +104,7 @@ out:
3459 }
3460
3461 unsigned long
3462-__copy_to_user(void __user *to, const void *from, unsigned long n)
3463+___copy_to_user(void __user *to, const void *from, unsigned long n)
3464 {
3465 /*
3466 * This test is stubbed out of the main function above to keep
3467diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3468index f389228..592ef66 100644
3469--- a/arch/arm/mach-kirkwood/common.c
3470+++ b/arch/arm/mach-kirkwood/common.c
3471@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3472 clk_gate_ops.disable(hw);
3473 }
3474
3475-static struct clk_ops clk_gate_fn_ops;
3476+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3477+{
3478+ return clk_gate_ops.is_enabled(hw);
3479+}
3480+
3481+static struct clk_ops clk_gate_fn_ops = {
3482+ .enable = clk_gate_fn_enable,
3483+ .disable = clk_gate_fn_disable,
3484+ .is_enabled = clk_gate_fn_is_enabled,
3485+};
3486
3487 static struct clk __init *clk_register_gate_fn(struct device *dev,
3488 const char *name,
3489@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3490 gate_fn->fn_en = fn_en;
3491 gate_fn->fn_dis = fn_dis;
3492
3493- /* ops is the gate ops, but with our enable/disable functions */
3494- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3495- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3496- clk_gate_fn_ops = clk_gate_ops;
3497- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3498- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3499- }
3500-
3501 clk = clk_register(dev, &gate_fn->gate.hw);
3502
3503 if (IS_ERR(clk))
3504diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3505index f6eeb87..cc90868 100644
3506--- a/arch/arm/mach-omap2/board-n8x0.c
3507+++ b/arch/arm/mach-omap2/board-n8x0.c
3508@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3509 }
3510 #endif
3511
3512-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3513+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3514 .late_init = n8x0_menelaus_late_init,
3515 };
3516
3517diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3518index 6c4da12..d9ca72d 100644
3519--- a/arch/arm/mach-omap2/gpmc.c
3520+++ b/arch/arm/mach-omap2/gpmc.c
3521@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3522 };
3523
3524 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3525-static struct irq_chip gpmc_irq_chip;
3526 static unsigned gpmc_irq_start;
3527
3528 static struct resource gpmc_mem_root;
3529@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3530
3531 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3532
3533+static struct irq_chip gpmc_irq_chip = {
3534+ .name = "gpmc",
3535+ .irq_startup = gpmc_irq_noop_ret,
3536+ .irq_enable = gpmc_irq_enable,
3537+ .irq_disable = gpmc_irq_disable,
3538+ .irq_shutdown = gpmc_irq_noop,
3539+ .irq_ack = gpmc_irq_noop,
3540+ .irq_mask = gpmc_irq_noop,
3541+ .irq_unmask = gpmc_irq_noop,
3542+
3543+};
3544+
3545 static int gpmc_setup_irq(void)
3546 {
3547 int i;
3548@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3549 return gpmc_irq_start;
3550 }
3551
3552- gpmc_irq_chip.name = "gpmc";
3553- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3554- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3555- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3556- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3557- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3558- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3559- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3560-
3561 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3562 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3563
3564diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3565index f8bb3b9..831e7b8 100644
3566--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3567+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3568@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3569 return NOTIFY_OK;
3570 }
3571
3572-static struct notifier_block __refdata irq_hotplug_notifier = {
3573+static struct notifier_block irq_hotplug_notifier = {
3574 .notifier_call = irq_cpu_hotplug_notify,
3575 };
3576
3577diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3578index e6d2307..d057195 100644
3579--- a/arch/arm/mach-omap2/omap_device.c
3580+++ b/arch/arm/mach-omap2/omap_device.c
3581@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3582 struct platform_device __init *omap_device_build(const char *pdev_name,
3583 int pdev_id,
3584 struct omap_hwmod *oh,
3585- void *pdata, int pdata_len)
3586+ const void *pdata, int pdata_len)
3587 {
3588 struct omap_hwmod *ohs[] = { oh };
3589
3590@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3591 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3592 int pdev_id,
3593 struct omap_hwmod **ohs,
3594- int oh_cnt, void *pdata,
3595+ int oh_cnt, const void *pdata,
3596 int pdata_len)
3597 {
3598 int ret = -ENOMEM;
3599diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3600index 044c31d..2ee0861 100644
3601--- a/arch/arm/mach-omap2/omap_device.h
3602+++ b/arch/arm/mach-omap2/omap_device.h
3603@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3604 /* Core code interface */
3605
3606 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3607- struct omap_hwmod *oh, void *pdata,
3608+ struct omap_hwmod *oh, const void *pdata,
3609 int pdata_len);
3610
3611 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3612 struct omap_hwmod **oh, int oh_cnt,
3613- void *pdata, int pdata_len);
3614+ const void *pdata, int pdata_len);
3615
3616 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3617 struct omap_hwmod **ohs, int oh_cnt);
3618diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3619index 7341eff..fd75e34 100644
3620--- a/arch/arm/mach-omap2/omap_hwmod.c
3621+++ b/arch/arm/mach-omap2/omap_hwmod.c
3622@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3623 int (*init_clkdm)(struct omap_hwmod *oh);
3624 void (*update_context_lost)(struct omap_hwmod *oh);
3625 int (*get_context_lost)(struct omap_hwmod *oh);
3626-};
3627+} __no_const;
3628
3629 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3630-static struct omap_hwmod_soc_ops soc_ops;
3631+static struct omap_hwmod_soc_ops soc_ops __read_only;
3632
3633 /* omap_hwmod_list contains all registered struct omap_hwmods */
3634 static LIST_HEAD(omap_hwmod_list);
3635diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3636index d15c7bb..b2d1f0c 100644
3637--- a/arch/arm/mach-omap2/wd_timer.c
3638+++ b/arch/arm/mach-omap2/wd_timer.c
3639@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3640 struct omap_hwmod *oh;
3641 char *oh_name = "wd_timer2";
3642 char *dev_name = "omap_wdt";
3643- struct omap_wd_timer_platform_data pdata;
3644+ static struct omap_wd_timer_platform_data pdata = {
3645+ .read_reset_sources = prm_read_reset_sources
3646+ };
3647
3648 if (!cpu_class_is_omap2() || of_have_populated_dt())
3649 return 0;
3650@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3651 return -EINVAL;
3652 }
3653
3654- pdata.read_reset_sources = prm_read_reset_sources;
3655-
3656 pdev = omap_device_build(dev_name, id, oh, &pdata,
3657 sizeof(struct omap_wd_timer_platform_data));
3658 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3659diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3660index 0cdba8d..297993e 100644
3661--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3662+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3664 bool entered_lp2 = false;
3665
3666 if (tegra_pending_sgi())
3667- ACCESS_ONCE(abort_flag) = true;
3668+ ACCESS_ONCE_RW(abort_flag) = true;
3669
3670 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3671
3672diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3673index cad3ca86..1d79e0f 100644
3674--- a/arch/arm/mach-ux500/setup.h
3675+++ b/arch/arm/mach-ux500/setup.h
3676@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3677 .type = MT_DEVICE, \
3678 }
3679
3680-#define __MEM_DEV_DESC(x, sz) { \
3681- .virtual = IO_ADDRESS(x), \
3682- .pfn = __phys_to_pfn(x), \
3683- .length = sz, \
3684- .type = MT_MEMORY, \
3685-}
3686-
3687 extern struct smp_operations ux500_smp_ops;
3688 extern void ux500_cpu_die(unsigned int cpu);
3689
3690diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3691index 2950082..d0f0782 100644
3692--- a/arch/arm/mm/Kconfig
3693+++ b/arch/arm/mm/Kconfig
3694@@ -436,7 +436,7 @@ config CPU_32v5
3695
3696 config CPU_32v6
3697 bool
3698- select CPU_USE_DOMAINS if CPU_V6 && MMU
3699+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3700 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3701
3702 config CPU_32v6K
3703@@ -585,6 +585,7 @@ config CPU_CP15_MPU
3704
3705 config CPU_USE_DOMAINS
3706 bool
3707+ depends on !ARM_LPAE && !PAX_KERNEXEC
3708 help
3709 This option enables or disables the use of domain switching
3710 via the set_fs() function.
3711@@ -780,6 +781,7 @@ config NEED_KUSER_HELPERS
3712 config KUSER_HELPERS
3713 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3714 default y
3715+ depends on !(CPU_V6 || CPU_V6K || CPU_V7)
3716 help
3717 Warning: disabling this option may break user programs.
3718
3719@@ -790,7 +792,7 @@ config KUSER_HELPERS
3720 run on ARMv4 through to ARMv7 without modification.
3721
3722 However, the fixed address nature of these helpers can be used
3723- by ROP (return orientated programming) authors when creating
3724+ by ROP (Return Oriented Programming) authors when creating
3725 exploits.
3726
3727 If all of the binaries and libraries which run on your platform
3728diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3729index 6f4585b..7b6f52b 100644
3730--- a/arch/arm/mm/alignment.c
3731+++ b/arch/arm/mm/alignment.c
3732@@ -211,10 +211,12 @@ union offset_union {
3733 #define __get16_unaligned_check(ins,val,addr) \
3734 do { \
3735 unsigned int err = 0, v, a = addr; \
3736+ pax_open_userland(); \
3737 __get8_unaligned_check(ins,v,a,err); \
3738 val = v << ((BE) ? 8 : 0); \
3739 __get8_unaligned_check(ins,v,a,err); \
3740 val |= v << ((BE) ? 0 : 8); \
3741+ pax_close_userland(); \
3742 if (err) \
3743 goto fault; \
3744 } while (0)
3745@@ -228,6 +230,7 @@ union offset_union {
3746 #define __get32_unaligned_check(ins,val,addr) \
3747 do { \
3748 unsigned int err = 0, v, a = addr; \
3749+ pax_open_userland(); \
3750 __get8_unaligned_check(ins,v,a,err); \
3751 val = v << ((BE) ? 24 : 0); \
3752 __get8_unaligned_check(ins,v,a,err); \
3753@@ -236,6 +239,7 @@ union offset_union {
3754 val |= v << ((BE) ? 8 : 16); \
3755 __get8_unaligned_check(ins,v,a,err); \
3756 val |= v << ((BE) ? 0 : 24); \
3757+ pax_close_userland(); \
3758 if (err) \
3759 goto fault; \
3760 } while (0)
3761@@ -249,6 +253,7 @@ union offset_union {
3762 #define __put16_unaligned_check(ins,val,addr) \
3763 do { \
3764 unsigned int err = 0, v = val, a = addr; \
3765+ pax_open_userland(); \
3766 __asm__( FIRST_BYTE_16 \
3767 ARM( "1: "ins" %1, [%2], #1\n" ) \
3768 THUMB( "1: "ins" %1, [%2]\n" ) \
3769@@ -268,6 +273,7 @@ union offset_union {
3770 " .popsection\n" \
3771 : "=r" (err), "=&r" (v), "=&r" (a) \
3772 : "0" (err), "1" (v), "2" (a)); \
3773+ pax_close_userland(); \
3774 if (err) \
3775 goto fault; \
3776 } while (0)
3777@@ -281,6 +287,7 @@ union offset_union {
3778 #define __put32_unaligned_check(ins,val,addr) \
3779 do { \
3780 unsigned int err = 0, v = val, a = addr; \
3781+ pax_open_userland(); \
3782 __asm__( FIRST_BYTE_32 \
3783 ARM( "1: "ins" %1, [%2], #1\n" ) \
3784 THUMB( "1: "ins" %1, [%2]\n" ) \
3785@@ -310,6 +317,7 @@ union offset_union {
3786 " .popsection\n" \
3787 : "=r" (err), "=&r" (v), "=&r" (a) \
3788 : "0" (err), "1" (v), "2" (a)); \
3789+ pax_close_userland(); \
3790 if (err) \
3791 goto fault; \
3792 } while (0)
3793diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3794index 5dbf13f..ee1ec24 100644
3795--- a/arch/arm/mm/fault.c
3796+++ b/arch/arm/mm/fault.c
3797@@ -25,6 +25,7 @@
3798 #include <asm/system_misc.h>
3799 #include <asm/system_info.h>
3800 #include <asm/tlbflush.h>
3801+#include <asm/sections.h>
3802
3803 #include "fault.h"
3804
3805@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3806 if (fixup_exception(regs))
3807 return;
3808
3809+#ifdef CONFIG_PAX_KERNEXEC
3810+ if ((fsr & FSR_WRITE) &&
3811+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3812+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3813+ {
3814+ if (current->signal->curr_ip)
3815+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3816+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3817+ else
3818+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3819+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3820+ }
3821+#endif
3822+
3823 /*
3824 * No handler, we'll have to terminate things with extreme prejudice.
3825 */
3826@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3827 }
3828 #endif
3829
3830+#ifdef CONFIG_PAX_PAGEEXEC
3831+ if (fsr & FSR_LNX_PF) {
3832+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3833+ do_group_exit(SIGKILL);
3834+ }
3835+#endif
3836+
3837 tsk->thread.address = addr;
3838 tsk->thread.error_code = fsr;
3839 tsk->thread.trap_no = 14;
3840@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3841 }
3842 #endif /* CONFIG_MMU */
3843
3844+#ifdef CONFIG_PAX_PAGEEXEC
3845+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3846+{
3847+ long i;
3848+
3849+ printk(KERN_ERR "PAX: bytes at PC: ");
3850+ for (i = 0; i < 20; i++) {
3851+ unsigned char c;
3852+ if (get_user(c, (__force unsigned char __user *)pc+i))
3853+ printk(KERN_CONT "?? ");
3854+ else
3855+ printk(KERN_CONT "%02x ", c);
3856+ }
3857+ printk("\n");
3858+
3859+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3860+ for (i = -1; i < 20; i++) {
3861+ unsigned long c;
3862+ if (get_user(c, (__force unsigned long __user *)sp+i))
3863+ printk(KERN_CONT "???????? ");
3864+ else
3865+ printk(KERN_CONT "%08lx ", c);
3866+ }
3867+ printk("\n");
3868+}
3869+#endif
3870+
3871 /*
3872 * First Level Translation Fault Handler
3873 *
3874@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3875 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3876 struct siginfo info;
3877
3878+#ifdef CONFIG_PAX_MEMORY_UDEREF
3879+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3880+ if (current->signal->curr_ip)
3881+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3882+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3883+ else
3884+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3885+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3886+ goto die;
3887+ }
3888+#endif
3889+
3890 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3891 return;
3892
3893+die:
3894 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3895 inf->name, fsr, addr);
3896
3897@@ -569,15 +631,67 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3898 ifsr_info[nr].name = name;
3899 }
3900
3901+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3902+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3903+
3904 asmlinkage void __exception
3905 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3906 {
3907 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3908 struct siginfo info;
3909
3910+ if (user_mode(regs)) {
3911+ unsigned long sigpage = current->mm->context.sigpage;
3912+
3913+ if (sigpage <= addr && addr < sigpage + 7*4) {
3914+ if (addr < sigpage + 3*4)
3915+ sys_sigreturn(regs);
3916+ else
3917+ sys_rt_sigreturn(regs);
3918+ return;
3919+ }
3920+ if (addr == 0xffff0fe0UL) {
3921+ /*
3922+ * PaX: __kuser_get_tls emulation
3923+ */
3924+ regs->ARM_r0 = current_thread_info()->tp_value;
3925+ regs->ARM_pc = regs->ARM_lr;
3926+ return;
3927+ }
3928+ }
3929+
3930+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3931+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3932+ if (current->signal->curr_ip)
3933+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3934+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3935+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3936+ else
3937+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3938+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3939+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3940+ goto die;
3941+ }
3942+#endif
3943+
3944+#ifdef CONFIG_PAX_REFCOUNT
3945+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3946+ unsigned int bkpt;
3947+
3948+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3949+ current->thread.error_code = ifsr;
3950+ current->thread.trap_no = 0;
3951+ pax_report_refcount_overflow(regs);
3952+ fixup_exception(regs);
3953+ return;
3954+ }
3955+ }
3956+#endif
3957+
3958 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3959 return;
3960
3961+die:
3962 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3963 inf->name, ifsr, addr);
3964
3965diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3966index cf08bdf..772656c 100644
3967--- a/arch/arm/mm/fault.h
3968+++ b/arch/arm/mm/fault.h
3969@@ -3,6 +3,7 @@
3970
3971 /*
3972 * Fault status register encodings. We steal bit 31 for our own purposes.
3973+ * Set when the FSR value is from an instruction fault.
3974 */
3975 #define FSR_LNX_PF (1 << 31)
3976 #define FSR_WRITE (1 << 11)
3977@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3978 }
3979 #endif
3980
3981+/* valid for LPAE and !LPAE */
3982+static inline int is_xn_fault(unsigned int fsr)
3983+{
3984+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3985+}
3986+
3987+static inline int is_domain_fault(unsigned int fsr)
3988+{
3989+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3990+}
3991+
3992 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3993 unsigned long search_exception_table(unsigned long addr);
3994
3995diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3996index 0ecc43f..190b956 100644
3997--- a/arch/arm/mm/init.c
3998+++ b/arch/arm/mm/init.c
3999@@ -30,6 +30,8 @@
4000 #include <asm/setup.h>
4001 #include <asm/tlb.h>
4002 #include <asm/fixmap.h>
4003+#include <asm/system_info.h>
4004+#include <asm/cp15.h>
4005
4006 #include <asm/mach/arch.h>
4007 #include <asm/mach/map.h>
4008@@ -726,7 +728,46 @@ void free_initmem(void)
4009 {
4010 #ifdef CONFIG_HAVE_TCM
4011 extern char __tcm_start, __tcm_end;
4012+#endif
4013
4014+#ifdef CONFIG_PAX_KERNEXEC
4015+ unsigned long addr;
4016+ pgd_t *pgd;
4017+ pud_t *pud;
4018+ pmd_t *pmd;
4019+ int cpu_arch = cpu_architecture();
4020+ unsigned int cr = get_cr();
4021+
4022+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4023+ /* make pages tables, etc before .text NX */
4024+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4025+ pgd = pgd_offset_k(addr);
4026+ pud = pud_offset(pgd, addr);
4027+ pmd = pmd_offset(pud, addr);
4028+ __section_update(pmd, addr, PMD_SECT_XN);
4029+ }
4030+ /* make init NX */
4031+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4032+ pgd = pgd_offset_k(addr);
4033+ pud = pud_offset(pgd, addr);
4034+ pmd = pmd_offset(pud, addr);
4035+ __section_update(pmd, addr, PMD_SECT_XN);
4036+ }
4037+ /* make kernel code/rodata RX */
4038+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4039+ pgd = pgd_offset_k(addr);
4040+ pud = pud_offset(pgd, addr);
4041+ pmd = pmd_offset(pud, addr);
4042+#ifdef CONFIG_ARM_LPAE
4043+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4044+#else
4045+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4046+#endif
4047+ }
4048+ }
4049+#endif
4050+
4051+#ifdef CONFIG_HAVE_TCM
4052 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4053 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
4054 #endif
4055diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4056index 04d9006..c547d85 100644
4057--- a/arch/arm/mm/ioremap.c
4058+++ b/arch/arm/mm/ioremap.c
4059@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4060 unsigned int mtype;
4061
4062 if (cached)
4063- mtype = MT_MEMORY;
4064+ mtype = MT_MEMORY_RX;
4065 else
4066- mtype = MT_MEMORY_NONCACHED;
4067+ mtype = MT_MEMORY_NONCACHED_RX;
4068
4069 return __arm_ioremap_caller(phys_addr, size, mtype,
4070 __builtin_return_address(0));
4071diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4072index 10062ce..8695745 100644
4073--- a/arch/arm/mm/mmap.c
4074+++ b/arch/arm/mm/mmap.c
4075@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4076 struct vm_area_struct *vma;
4077 int do_align = 0;
4078 int aliasing = cache_is_vipt_aliasing();
4079+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4080 struct vm_unmapped_area_info info;
4081
4082 /*
4083@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4084 if (len > TASK_SIZE)
4085 return -ENOMEM;
4086
4087+#ifdef CONFIG_PAX_RANDMMAP
4088+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4089+#endif
4090+
4091 if (addr) {
4092 if (do_align)
4093 addr = COLOUR_ALIGN(addr, pgoff);
4094@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4095 addr = PAGE_ALIGN(addr);
4096
4097 vma = find_vma(mm, addr);
4098- if (TASK_SIZE - len >= addr &&
4099- (!vma || addr + len <= vma->vm_start))
4100+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4101 return addr;
4102 }
4103
4104@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4105 info.high_limit = TASK_SIZE;
4106 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4107 info.align_offset = pgoff << PAGE_SHIFT;
4108+ info.threadstack_offset = offset;
4109 return vm_unmapped_area(&info);
4110 }
4111
4112@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4113 unsigned long addr = addr0;
4114 int do_align = 0;
4115 int aliasing = cache_is_vipt_aliasing();
4116+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4117 struct vm_unmapped_area_info info;
4118
4119 /*
4120@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4121 return addr;
4122 }
4123
4124+#ifdef CONFIG_PAX_RANDMMAP
4125+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4126+#endif
4127+
4128 /* requesting a specific address */
4129 if (addr) {
4130 if (do_align)
4131@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4132 else
4133 addr = PAGE_ALIGN(addr);
4134 vma = find_vma(mm, addr);
4135- if (TASK_SIZE - len >= addr &&
4136- (!vma || addr + len <= vma->vm_start))
4137+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4138 return addr;
4139 }
4140
4141@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4142 info.high_limit = mm->mmap_base;
4143 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4144 info.align_offset = pgoff << PAGE_SHIFT;
4145+ info.threadstack_offset = offset;
4146 addr = vm_unmapped_area(&info);
4147
4148 /*
4149@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4150 {
4151 unsigned long random_factor = 0UL;
4152
4153+#ifdef CONFIG_PAX_RANDMMAP
4154+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4155+#endif
4156+
4157 /* 8 bits of randomness in 20 address space bits */
4158 if ((current->flags & PF_RANDOMIZE) &&
4159 !(current->personality & ADDR_NO_RANDOMIZE))
4160@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4161
4162 if (mmap_is_legacy()) {
4163 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4164+
4165+#ifdef CONFIG_PAX_RANDMMAP
4166+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4167+ mm->mmap_base += mm->delta_mmap;
4168+#endif
4169+
4170 mm->get_unmapped_area = arch_get_unmapped_area;
4171 mm->unmap_area = arch_unmap_area;
4172 } else {
4173 mm->mmap_base = mmap_base(random_factor);
4174+
4175+#ifdef CONFIG_PAX_RANDMMAP
4176+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4177+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4178+#endif
4179+
4180 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4181 mm->unmap_area = arch_unmap_area_topdown;
4182 }
4183diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4184index daf336f..4e6392c 100644
4185--- a/arch/arm/mm/mmu.c
4186+++ b/arch/arm/mm/mmu.c
4187@@ -36,6 +36,22 @@
4188 #include "mm.h"
4189 #include "tcm.h"
4190
4191+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4192+void modify_domain(unsigned int dom, unsigned int type)
4193+{
4194+ struct thread_info *thread = current_thread_info();
4195+ unsigned int domain = thread->cpu_domain;
4196+ /*
4197+ * DOMAIN_MANAGER might be defined to some other value,
4198+ * use the arch-defined constant
4199+ */
4200+ domain &= ~domain_val(dom, 3);
4201+ thread->cpu_domain = domain | domain_val(dom, type);
4202+ set_domain(thread->cpu_domain);
4203+}
4204+EXPORT_SYMBOL(modify_domain);
4205+#endif
4206+
4207 /*
4208 * empty_zero_page is a special page that is used for
4209 * zero-initialized data and COW.
4210@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4211
4212 #endif /* ifdef CONFIG_CPU_CP15 / else */
4213
4214-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4215+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4216 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4217
4218-static struct mem_type mem_types[] = {
4219+#ifdef CONFIG_PAX_KERNEXEC
4220+#define L_PTE_KERNEXEC L_PTE_RDONLY
4221+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4222+#else
4223+#define L_PTE_KERNEXEC L_PTE_DIRTY
4224+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4225+#endif
4226+
4227+static struct mem_type mem_types[] __read_only = {
4228 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4229 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4230 L_PTE_SHARED,
4231@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4232 [MT_UNCACHED] = {
4233 .prot_pte = PROT_PTE_DEVICE,
4234 .prot_l1 = PMD_TYPE_TABLE,
4235- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4236+ .prot_sect = PROT_SECT_DEVICE,
4237 .domain = DOMAIN_IO,
4238 },
4239 [MT_CACHECLEAN] = {
4240- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4241+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4242 .domain = DOMAIN_KERNEL,
4243 },
4244 #ifndef CONFIG_ARM_LPAE
4245 [MT_MINICLEAN] = {
4246- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4247+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4248 .domain = DOMAIN_KERNEL,
4249 },
4250 #endif
4251@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4252 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4253 L_PTE_RDONLY,
4254 .prot_l1 = PMD_TYPE_TABLE,
4255- .domain = DOMAIN_USER,
4256+ .domain = DOMAIN_VECTORS,
4257 },
4258 [MT_HIGH_VECTORS] = {
4259 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4260 L_PTE_USER | L_PTE_RDONLY,
4261 .prot_l1 = PMD_TYPE_TABLE,
4262- .domain = DOMAIN_USER,
4263+ .domain = DOMAIN_VECTORS,
4264 },
4265- [MT_MEMORY] = {
4266+ [MT_MEMORY_RWX] = {
4267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4268 .prot_l1 = PMD_TYPE_TABLE,
4269 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4270 .domain = DOMAIN_KERNEL,
4271 },
4272+ [MT_MEMORY_RW] = {
4273+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4274+ .prot_l1 = PMD_TYPE_TABLE,
4275+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4276+ .domain = DOMAIN_KERNEL,
4277+ },
4278+ [MT_MEMORY_RX] = {
4279+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4280+ .prot_l1 = PMD_TYPE_TABLE,
4281+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4282+ .domain = DOMAIN_KERNEL,
4283+ },
4284 [MT_ROM] = {
4285- .prot_sect = PMD_TYPE_SECT,
4286+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4287 .domain = DOMAIN_KERNEL,
4288 },
4289- [MT_MEMORY_NONCACHED] = {
4290+ [MT_MEMORY_NONCACHED_RW] = {
4291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4292 L_PTE_MT_BUFFERABLE,
4293 .prot_l1 = PMD_TYPE_TABLE,
4294 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4295 .domain = DOMAIN_KERNEL,
4296 },
4297+ [MT_MEMORY_NONCACHED_RX] = {
4298+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4299+ L_PTE_MT_BUFFERABLE,
4300+ .prot_l1 = PMD_TYPE_TABLE,
4301+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4302+ .domain = DOMAIN_KERNEL,
4303+ },
4304 [MT_MEMORY_DTCM] = {
4305- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4306- L_PTE_XN,
4307+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4308 .prot_l1 = PMD_TYPE_TABLE,
4309- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4310+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4311 .domain = DOMAIN_KERNEL,
4312 },
4313 [MT_MEMORY_ITCM] = {
4314@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4315 },
4316 [MT_MEMORY_SO] = {
4317 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4318- L_PTE_MT_UNCACHED | L_PTE_XN,
4319+ L_PTE_MT_UNCACHED,
4320 .prot_l1 = PMD_TYPE_TABLE,
4321 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4322- PMD_SECT_UNCACHED | PMD_SECT_XN,
4323+ PMD_SECT_UNCACHED,
4324 .domain = DOMAIN_KERNEL,
4325 },
4326 [MT_MEMORY_DMA_READY] = {
4327@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4328 * to prevent speculative instruction fetches.
4329 */
4330 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4331+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4332 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4333+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4334 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4335+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4336 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4337+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4338+
4339+ /* Mark other regions on ARMv6+ as execute-never */
4340+
4341+#ifdef CONFIG_PAX_KERNEXEC
4342+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4343+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4344+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4345+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4346+#ifndef CONFIG_ARM_LPAE
4347+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4348+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4349+#endif
4350+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4351+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4352+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4353+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4354+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4355+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4356+#endif
4357+
4358+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4359+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4360 }
4361 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4362 /*
4363@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4364 * from SVC mode and no access from userspace.
4365 */
4366 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4367+#ifdef CONFIG_PAX_KERNEXEC
4368+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4369+#endif
4370 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4371 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4372 #endif
4373@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4374 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4375 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4376 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4377- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4378- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4379+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4380+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4381+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4382+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4383+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4384+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4385 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4386- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4387- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4388+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4389+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4390+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4391+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4392 }
4393 }
4394
4395@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4396 if (cpu_arch >= CPU_ARCH_ARMv6) {
4397 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4398 /* Non-cacheable Normal is XCB = 001 */
4399- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4400+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4401+ PMD_SECT_BUFFERED;
4402+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4403 PMD_SECT_BUFFERED;
4404 } else {
4405 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4406- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4407+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4408+ PMD_SECT_TEX(1);
4409+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4410 PMD_SECT_TEX(1);
4411 }
4412 } else {
4413- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4414+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4415+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4416 }
4417
4418 #ifdef CONFIG_ARM_LPAE
4419@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4420 vecs_pgprot |= PTE_EXT_AF;
4421 #endif
4422
4423+ user_pgprot |= __supported_pte_mask;
4424+
4425 for (i = 0; i < 16; i++) {
4426 pteval_t v = pgprot_val(protection_map[i]);
4427 protection_map[i] = __pgprot(v | user_pgprot);
4428@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4429
4430 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4431 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4432- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4433- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4434+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4435+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4436+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4437+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4438+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4439+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4440 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4441- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4442+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4443+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4444 mem_types[MT_ROM].prot_sect |= cp->pmd;
4445
4446 switch (cp->pmd) {
4447@@ -1166,18 +1255,15 @@ void __init arm_mm_memblock_reserve(void)
4448 * called function. This means you can't use any function or debugging
4449 * method which may touch any device, otherwise the kernel _will_ crash.
4450 */
4451+
4452+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4453+
4454 static void __init devicemaps_init(struct machine_desc *mdesc)
4455 {
4456 struct map_desc map;
4457 unsigned long addr;
4458- void *vectors;
4459
4460- /*
4461- * Allocate the vector page early.
4462- */
4463- vectors = early_alloc(PAGE_SIZE * 2);
4464-
4465- early_trap_init(vectors);
4466+ early_trap_init(&vectors);
4467
4468 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4469 pmd_clear(pmd_off_k(addr));
4470@@ -1217,7 +1303,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4471 * location (0xffff0000). If we aren't using high-vectors, also
4472 * create a mapping at the low-vectors virtual address.
4473 */
4474- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4475+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4476 map.virtual = 0xffff0000;
4477 map.length = PAGE_SIZE;
4478 #ifdef CONFIG_KUSER_HELPERS
4479@@ -1287,8 +1373,39 @@ static void __init map_lowmem(void)
4480 map.pfn = __phys_to_pfn(start);
4481 map.virtual = __phys_to_virt(start);
4482 map.length = end - start;
4483- map.type = MT_MEMORY;
4484
4485+#ifdef CONFIG_PAX_KERNEXEC
4486+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4487+ struct map_desc kernel;
4488+ struct map_desc initmap;
4489+
4490+ /* when freeing initmem we will make this RW */
4491+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4492+ initmap.virtual = (unsigned long)__init_begin;
4493+ initmap.length = _sdata - __init_begin;
4494+ initmap.type = MT_MEMORY_RWX;
4495+ create_mapping(&initmap);
4496+
4497+ /* when freeing initmem we will make this RX */
4498+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4499+ kernel.virtual = (unsigned long)_stext;
4500+ kernel.length = __init_begin - _stext;
4501+ kernel.type = MT_MEMORY_RWX;
4502+ create_mapping(&kernel);
4503+
4504+ if (map.virtual < (unsigned long)_stext) {
4505+ map.length = (unsigned long)_stext - map.virtual;
4506+ map.type = MT_MEMORY_RWX;
4507+ create_mapping(&map);
4508+ }
4509+
4510+ map.pfn = __phys_to_pfn(__pa(_sdata));
4511+ map.virtual = (unsigned long)_sdata;
4512+ map.length = end - __pa(_sdata);
4513+ }
4514+#endif
4515+
4516+ map.type = MT_MEMORY_RW;
4517 create_mapping(&map);
4518 }
4519 }
4520diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4521index a5bc92d..0bb4730 100644
4522--- a/arch/arm/plat-omap/sram.c
4523+++ b/arch/arm/plat-omap/sram.c
4524@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4525 * Looks like we need to preserve some bootloader code at the
4526 * beginning of SRAM for jumping to flash for reboot to work...
4527 */
4528+ pax_open_kernel();
4529 memset_io(omap_sram_base + omap_sram_skip, 0,
4530 omap_sram_size - omap_sram_skip);
4531+ pax_close_kernel();
4532 }
4533diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4534index ce6d763..cfea917 100644
4535--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4536+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4537@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4538 int (*started)(unsigned ch);
4539 int (*flush)(unsigned ch);
4540 int (*stop)(unsigned ch);
4541-};
4542+} __no_const;
4543
4544 extern void *samsung_dmadev_get_ops(void);
4545 extern void *s3c_dma_get_ops(void);
4546diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
4547index 654f096..5546653 100644
4548--- a/arch/arm64/include/asm/tlb.h
4549+++ b/arch/arm64/include/asm/tlb.h
4550@@ -35,6 +35,7 @@ struct mmu_gather {
4551 struct mm_struct *mm;
4552 unsigned int fullmm;
4553 struct vm_area_struct *vma;
4554+ unsigned long start, end;
4555 unsigned long range_start;
4556 unsigned long range_end;
4557 unsigned int nr;
4558@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
4559 }
4560
4561 static inline void
4562-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
4563+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
4564 {
4565 tlb->mm = mm;
4566- tlb->fullmm = fullmm;
4567+ tlb->fullmm = !(start | (end+1));
4568+ tlb->start = start;
4569+ tlb->end = end;
4570 tlb->vma = NULL;
4571 tlb->max = ARRAY_SIZE(tlb->local);
4572 tlb->pages = tlb->local;
4573diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4574index f4726dc..39ed646 100644
4575--- a/arch/arm64/kernel/debug-monitors.c
4576+++ b/arch/arm64/kernel/debug-monitors.c
4577@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4578 return NOTIFY_OK;
4579 }
4580
4581-static struct notifier_block __cpuinitdata os_lock_nb = {
4582+static struct notifier_block os_lock_nb = {
4583 .notifier_call = os_lock_notify,
4584 };
4585
4586diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4587index 5ab825c..96aaec8 100644
4588--- a/arch/arm64/kernel/hw_breakpoint.c
4589+++ b/arch/arm64/kernel/hw_breakpoint.c
4590@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4591 return NOTIFY_OK;
4592 }
4593
4594-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4595+static struct notifier_block hw_breakpoint_reset_nb = {
4596 .notifier_call = hw_breakpoint_reset_notify,
4597 };
4598
4599diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4600index c3a58a1..78fbf54 100644
4601--- a/arch/avr32/include/asm/cache.h
4602+++ b/arch/avr32/include/asm/cache.h
4603@@ -1,8 +1,10 @@
4604 #ifndef __ASM_AVR32_CACHE_H
4605 #define __ASM_AVR32_CACHE_H
4606
4607+#include <linux/const.h>
4608+
4609 #define L1_CACHE_SHIFT 5
4610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4612
4613 /*
4614 * Memory returned by kmalloc() may be used for DMA, so we must make
4615diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4616index d232888..87c8df1 100644
4617--- a/arch/avr32/include/asm/elf.h
4618+++ b/arch/avr32/include/asm/elf.h
4619@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4620 the loader. We need to make sure that it is out of the way of the program
4621 that it will "exec", and that there is sufficient room for the brk. */
4622
4623-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4624+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4625
4626+#ifdef CONFIG_PAX_ASLR
4627+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4628+
4629+#define PAX_DELTA_MMAP_LEN 15
4630+#define PAX_DELTA_STACK_LEN 15
4631+#endif
4632
4633 /* This yields a mask that user programs can use to figure out what
4634 instruction set this CPU supports. This could be done in user space,
4635diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4636index 479330b..53717a8 100644
4637--- a/arch/avr32/include/asm/kmap_types.h
4638+++ b/arch/avr32/include/asm/kmap_types.h
4639@@ -2,9 +2,9 @@
4640 #define __ASM_AVR32_KMAP_TYPES_H
4641
4642 #ifdef CONFIG_DEBUG_HIGHMEM
4643-# define KM_TYPE_NR 29
4644+# define KM_TYPE_NR 30
4645 #else
4646-# define KM_TYPE_NR 14
4647+# define KM_TYPE_NR 15
4648 #endif
4649
4650 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4651diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4652index b2f2d2d..d1c85cb 100644
4653--- a/arch/avr32/mm/fault.c
4654+++ b/arch/avr32/mm/fault.c
4655@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4656
4657 int exception_trace = 1;
4658
4659+#ifdef CONFIG_PAX_PAGEEXEC
4660+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4661+{
4662+ unsigned long i;
4663+
4664+ printk(KERN_ERR "PAX: bytes at PC: ");
4665+ for (i = 0; i < 20; i++) {
4666+ unsigned char c;
4667+ if (get_user(c, (unsigned char *)pc+i))
4668+ printk(KERN_CONT "???????? ");
4669+ else
4670+ printk(KERN_CONT "%02x ", c);
4671+ }
4672+ printk("\n");
4673+}
4674+#endif
4675+
4676 /*
4677 * This routine handles page faults. It determines the address and the
4678 * problem, and then passes it off to one of the appropriate routines.
4679@@ -174,6 +191,16 @@ bad_area:
4680 up_read(&mm->mmap_sem);
4681
4682 if (user_mode(regs)) {
4683+
4684+#ifdef CONFIG_PAX_PAGEEXEC
4685+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4686+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4687+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4688+ do_group_exit(SIGKILL);
4689+ }
4690+ }
4691+#endif
4692+
4693 if (exception_trace && printk_ratelimit())
4694 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4695 "sp %08lx ecr %lu\n",
4696diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4697index 568885a..f8008df 100644
4698--- a/arch/blackfin/include/asm/cache.h
4699+++ b/arch/blackfin/include/asm/cache.h
4700@@ -7,6 +7,7 @@
4701 #ifndef __ARCH_BLACKFIN_CACHE_H
4702 #define __ARCH_BLACKFIN_CACHE_H
4703
4704+#include <linux/const.h>
4705 #include <linux/linkage.h> /* for asmlinkage */
4706
4707 /*
4708@@ -14,7 +15,7 @@
4709 * Blackfin loads 32 bytes for cache
4710 */
4711 #define L1_CACHE_SHIFT 5
4712-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4713+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4714 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4715
4716 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4717diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4718index aea2718..3639a60 100644
4719--- a/arch/cris/include/arch-v10/arch/cache.h
4720+++ b/arch/cris/include/arch-v10/arch/cache.h
4721@@ -1,8 +1,9 @@
4722 #ifndef _ASM_ARCH_CACHE_H
4723 #define _ASM_ARCH_CACHE_H
4724
4725+#include <linux/const.h>
4726 /* Etrax 100LX have 32-byte cache-lines. */
4727-#define L1_CACHE_BYTES 32
4728 #define L1_CACHE_SHIFT 5
4729+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4730
4731 #endif /* _ASM_ARCH_CACHE_H */
4732diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4733index 7caf25d..ee65ac5 100644
4734--- a/arch/cris/include/arch-v32/arch/cache.h
4735+++ b/arch/cris/include/arch-v32/arch/cache.h
4736@@ -1,11 +1,12 @@
4737 #ifndef _ASM_CRIS_ARCH_CACHE_H
4738 #define _ASM_CRIS_ARCH_CACHE_H
4739
4740+#include <linux/const.h>
4741 #include <arch/hwregs/dma.h>
4742
4743 /* A cache-line is 32 bytes. */
4744-#define L1_CACHE_BYTES 32
4745 #define L1_CACHE_SHIFT 5
4746+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4747
4748 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4749
4750diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4751index b86329d..6709906 100644
4752--- a/arch/frv/include/asm/atomic.h
4753+++ b/arch/frv/include/asm/atomic.h
4754@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4755 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4756 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4757
4758+#define atomic64_read_unchecked(v) atomic64_read(v)
4759+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4760+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4761+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4762+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4763+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4764+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4765+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4766+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4767+
4768 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4769 {
4770 int c, old;
4771diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4772index 2797163..c2a401d 100644
4773--- a/arch/frv/include/asm/cache.h
4774+++ b/arch/frv/include/asm/cache.h
4775@@ -12,10 +12,11 @@
4776 #ifndef __ASM_CACHE_H
4777 #define __ASM_CACHE_H
4778
4779+#include <linux/const.h>
4780
4781 /* bytes per L1 cache line */
4782 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4783-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4784+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4785
4786 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4787 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4788diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4789index 43901f2..0d8b865 100644
4790--- a/arch/frv/include/asm/kmap_types.h
4791+++ b/arch/frv/include/asm/kmap_types.h
4792@@ -2,6 +2,6 @@
4793 #ifndef _ASM_KMAP_TYPES_H
4794 #define _ASM_KMAP_TYPES_H
4795
4796-#define KM_TYPE_NR 17
4797+#define KM_TYPE_NR 18
4798
4799 #endif
4800diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4801index 836f147..4cf23f5 100644
4802--- a/arch/frv/mm/elf-fdpic.c
4803+++ b/arch/frv/mm/elf-fdpic.c
4804@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4805 {
4806 struct vm_area_struct *vma;
4807 struct vm_unmapped_area_info info;
4808+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4809
4810 if (len > TASK_SIZE)
4811 return -ENOMEM;
4812@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4813 if (addr) {
4814 addr = PAGE_ALIGN(addr);
4815 vma = find_vma(current->mm, addr);
4816- if (TASK_SIZE - len >= addr &&
4817- (!vma || addr + len <= vma->vm_start))
4818+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4819 goto success;
4820 }
4821
4822@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4823 info.high_limit = (current->mm->start_stack - 0x00200000);
4824 info.align_mask = 0;
4825 info.align_offset = 0;
4826+ info.threadstack_offset = offset;
4827 addr = vm_unmapped_area(&info);
4828 if (!(addr & ~PAGE_MASK))
4829 goto success;
4830diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4831index f4ca594..adc72fd6 100644
4832--- a/arch/hexagon/include/asm/cache.h
4833+++ b/arch/hexagon/include/asm/cache.h
4834@@ -21,9 +21,11 @@
4835 #ifndef __ASM_CACHE_H
4836 #define __ASM_CACHE_H
4837
4838+#include <linux/const.h>
4839+
4840 /* Bytes per L1 cache line */
4841-#define L1_CACHE_SHIFT (5)
4842-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4843+#define L1_CACHE_SHIFT 5
4844+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4845
4846 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4847 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4848diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4849index 6e6fe18..a6ae668 100644
4850--- a/arch/ia64/include/asm/atomic.h
4851+++ b/arch/ia64/include/asm/atomic.h
4852@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4853 #define atomic64_inc(v) atomic64_add(1, (v))
4854 #define atomic64_dec(v) atomic64_sub(1, (v))
4855
4856+#define atomic64_read_unchecked(v) atomic64_read(v)
4857+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4858+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4859+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4860+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4861+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4862+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4863+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4864+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4865+
4866 /* Atomic operations are already serializing */
4867 #define smp_mb__before_atomic_dec() barrier()
4868 #define smp_mb__after_atomic_dec() barrier()
4869diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4870index 988254a..e1ee885 100644
4871--- a/arch/ia64/include/asm/cache.h
4872+++ b/arch/ia64/include/asm/cache.h
4873@@ -1,6 +1,7 @@
4874 #ifndef _ASM_IA64_CACHE_H
4875 #define _ASM_IA64_CACHE_H
4876
4877+#include <linux/const.h>
4878
4879 /*
4880 * Copyright (C) 1998-2000 Hewlett-Packard Co
4881@@ -9,7 +10,7 @@
4882
4883 /* Bytes per L1 (data) cache line. */
4884 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4885-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4886+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4887
4888 #ifdef CONFIG_SMP
4889 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4890diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4891index 5a83c5c..4d7f553 100644
4892--- a/arch/ia64/include/asm/elf.h
4893+++ b/arch/ia64/include/asm/elf.h
4894@@ -42,6 +42,13 @@
4895 */
4896 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4897
4898+#ifdef CONFIG_PAX_ASLR
4899+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4900+
4901+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4902+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4903+#endif
4904+
4905 #define PT_IA_64_UNWIND 0x70000001
4906
4907 /* IA-64 relocations: */
4908diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4909index 96a8d92..617a1cf 100644
4910--- a/arch/ia64/include/asm/pgalloc.h
4911+++ b/arch/ia64/include/asm/pgalloc.h
4912@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4913 pgd_val(*pgd_entry) = __pa(pud);
4914 }
4915
4916+static inline void
4917+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4918+{
4919+ pgd_populate(mm, pgd_entry, pud);
4920+}
4921+
4922 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4923 {
4924 return quicklist_alloc(0, GFP_KERNEL, NULL);
4925@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4926 pud_val(*pud_entry) = __pa(pmd);
4927 }
4928
4929+static inline void
4930+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4931+{
4932+ pud_populate(mm, pud_entry, pmd);
4933+}
4934+
4935 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4936 {
4937 return quicklist_alloc(0, GFP_KERNEL, NULL);
4938diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4939index 815810c..d60bd4c 100644
4940--- a/arch/ia64/include/asm/pgtable.h
4941+++ b/arch/ia64/include/asm/pgtable.h
4942@@ -12,7 +12,7 @@
4943 * David Mosberger-Tang <davidm@hpl.hp.com>
4944 */
4945
4946-
4947+#include <linux/const.h>
4948 #include <asm/mman.h>
4949 #include <asm/page.h>
4950 #include <asm/processor.h>
4951@@ -142,6 +142,17 @@
4952 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4953 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4954 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4955+
4956+#ifdef CONFIG_PAX_PAGEEXEC
4957+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4958+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4959+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4960+#else
4961+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4962+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4963+# define PAGE_COPY_NOEXEC PAGE_COPY
4964+#endif
4965+
4966 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4967 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4968 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4969diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4970index 54ff557..70c88b7 100644
4971--- a/arch/ia64/include/asm/spinlock.h
4972+++ b/arch/ia64/include/asm/spinlock.h
4973@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4974 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4975
4976 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4977- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4978+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4979 }
4980
4981 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4982diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
4983index ef3a9de..bc5efc7 100644
4984--- a/arch/ia64/include/asm/tlb.h
4985+++ b/arch/ia64/include/asm/tlb.h
4986@@ -22,7 +22,7 @@
4987 * unmapping a portion of the virtual address space, these hooks are called according to
4988 * the following template:
4989 *
4990- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
4991+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
4992 * {
4993 * for each vma that needs a shootdown do {
4994 * tlb_start_vma(tlb, vma);
4995@@ -58,6 +58,7 @@ struct mmu_gather {
4996 unsigned int max;
4997 unsigned char fullmm; /* non-zero means full mm flush */
4998 unsigned char need_flush; /* really unmapped some PTEs? */
4999+ unsigned long start, end;
5000 unsigned long start_addr;
5001 unsigned long end_addr;
5002 struct page **pages;
5003@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
5004
5005
5006 static inline void
5007-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
5008+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
5009 {
5010 tlb->mm = mm;
5011 tlb->max = ARRAY_SIZE(tlb->local);
5012 tlb->pages = tlb->local;
5013 tlb->nr = 0;
5014- tlb->fullmm = full_mm_flush;
5015+ tlb->fullmm = !(start | (end+1));
5016+ tlb->start = start;
5017+ tlb->end = end;
5018 tlb->start_addr = ~0UL;
5019 }
5020
5021diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5022index 449c8c0..18965fb 100644
5023--- a/arch/ia64/include/asm/uaccess.h
5024+++ b/arch/ia64/include/asm/uaccess.h
5025@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5026 static inline unsigned long
5027 __copy_to_user (void __user *to, const void *from, unsigned long count)
5028 {
5029+ if (count > INT_MAX)
5030+ return count;
5031+
5032+ if (!__builtin_constant_p(count))
5033+ check_object_size(from, count, true);
5034+
5035 return __copy_user(to, (__force void __user *) from, count);
5036 }
5037
5038 static inline unsigned long
5039 __copy_from_user (void *to, const void __user *from, unsigned long count)
5040 {
5041+ if (count > INT_MAX)
5042+ return count;
5043+
5044+ if (!__builtin_constant_p(count))
5045+ check_object_size(to, count, false);
5046+
5047 return __copy_user((__force void __user *) to, from, count);
5048 }
5049
5050@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5051 ({ \
5052 void __user *__cu_to = (to); \
5053 const void *__cu_from = (from); \
5054- long __cu_len = (n); \
5055+ unsigned long __cu_len = (n); \
5056 \
5057- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5058+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5059+ if (!__builtin_constant_p(n)) \
5060+ check_object_size(__cu_from, __cu_len, true); \
5061 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5062+ } \
5063 __cu_len; \
5064 })
5065
5066@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5067 ({ \
5068 void *__cu_to = (to); \
5069 const void __user *__cu_from = (from); \
5070- long __cu_len = (n); \
5071+ unsigned long __cu_len = (n); \
5072 \
5073 __chk_user_ptr(__cu_from); \
5074- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5075+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5076+ if (!__builtin_constant_p(n)) \
5077+ check_object_size(__cu_to, __cu_len, false); \
5078 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5079+ } \
5080 __cu_len; \
5081 })
5082
5083diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
5084index 2d67317..07d8bfa 100644
5085--- a/arch/ia64/kernel/err_inject.c
5086+++ b/arch/ia64/kernel/err_inject.c
5087@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
5088 return NOTIFY_OK;
5089 }
5090
5091-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
5092+static struct notifier_block err_inject_cpu_notifier =
5093 {
5094 .notifier_call = err_inject_cpu_callback,
5095 };
5096diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
5097index d7396db..b33e873 100644
5098--- a/arch/ia64/kernel/mca.c
5099+++ b/arch/ia64/kernel/mca.c
5100@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
5101 return NOTIFY_OK;
5102 }
5103
5104-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
5105+static struct notifier_block mca_cpu_notifier = {
5106 .notifier_call = mca_cpu_callback
5107 };
5108
5109diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5110index 24603be..948052d 100644
5111--- a/arch/ia64/kernel/module.c
5112+++ b/arch/ia64/kernel/module.c
5113@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5114 void
5115 module_free (struct module *mod, void *module_region)
5116 {
5117- if (mod && mod->arch.init_unw_table &&
5118- module_region == mod->module_init) {
5119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5120 unw_remove_unwind_table(mod->arch.init_unw_table);
5121 mod->arch.init_unw_table = NULL;
5122 }
5123@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5124 }
5125
5126 static inline int
5127+in_init_rx (const struct module *mod, uint64_t addr)
5128+{
5129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5130+}
5131+
5132+static inline int
5133+in_init_rw (const struct module *mod, uint64_t addr)
5134+{
5135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5136+}
5137+
5138+static inline int
5139 in_init (const struct module *mod, uint64_t addr)
5140 {
5141- return addr - (uint64_t) mod->module_init < mod->init_size;
5142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5143+}
5144+
5145+static inline int
5146+in_core_rx (const struct module *mod, uint64_t addr)
5147+{
5148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5149+}
5150+
5151+static inline int
5152+in_core_rw (const struct module *mod, uint64_t addr)
5153+{
5154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5155 }
5156
5157 static inline int
5158 in_core (const struct module *mod, uint64_t addr)
5159 {
5160- return addr - (uint64_t) mod->module_core < mod->core_size;
5161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5162 }
5163
5164 static inline int
5165@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5166 break;
5167
5168 case RV_BDREL:
5169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5170+ if (in_init_rx(mod, val))
5171+ val -= (uint64_t) mod->module_init_rx;
5172+ else if (in_init_rw(mod, val))
5173+ val -= (uint64_t) mod->module_init_rw;
5174+ else if (in_core_rx(mod, val))
5175+ val -= (uint64_t) mod->module_core_rx;
5176+ else if (in_core_rw(mod, val))
5177+ val -= (uint64_t) mod->module_core_rw;
5178 break;
5179
5180 case RV_LTV:
5181@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5182 * addresses have been selected...
5183 */
5184 uint64_t gp;
5185- if (mod->core_size > MAX_LTOFF)
5186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5187 /*
5188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5189 * at the end of the module.
5190 */
5191- gp = mod->core_size - MAX_LTOFF / 2;
5192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5193 else
5194- gp = mod->core_size / 2;
5195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5198 mod->arch.gp = gp;
5199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5200 }
5201diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5202index 2b3c2d7..a318d84 100644
5203--- a/arch/ia64/kernel/palinfo.c
5204+++ b/arch/ia64/kernel/palinfo.c
5205@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5206 return NOTIFY_OK;
5207 }
5208
5209-static struct notifier_block __refdata palinfo_cpu_notifier =
5210+static struct notifier_block palinfo_cpu_notifier =
5211 {
5212 .notifier_call = palinfo_cpu_callback,
5213 .priority = 0,
5214diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5215index 4bc580a..7767f24 100644
5216--- a/arch/ia64/kernel/salinfo.c
5217+++ b/arch/ia64/kernel/salinfo.c
5218@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5219 return NOTIFY_OK;
5220 }
5221
5222-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5223+static struct notifier_block salinfo_cpu_notifier =
5224 {
5225 .notifier_call = salinfo_cpu_callback,
5226 .priority = 0,
5227diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5228index 41e33f8..65180b2 100644
5229--- a/arch/ia64/kernel/sys_ia64.c
5230+++ b/arch/ia64/kernel/sys_ia64.c
5231@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5232 unsigned long align_mask = 0;
5233 struct mm_struct *mm = current->mm;
5234 struct vm_unmapped_area_info info;
5235+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5236
5237 if (len > RGN_MAP_LIMIT)
5238 return -ENOMEM;
5239@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5240 if (REGION_NUMBER(addr) == RGN_HPAGE)
5241 addr = 0;
5242 #endif
5243+
5244+#ifdef CONFIG_PAX_RANDMMAP
5245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5246+ addr = mm->free_area_cache;
5247+ else
5248+#endif
5249+
5250 if (!addr)
5251 addr = TASK_UNMAPPED_BASE;
5252
5253@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5254 info.high_limit = TASK_SIZE;
5255 info.align_mask = align_mask;
5256 info.align_offset = 0;
5257+ info.threadstack_offset = offset;
5258 return vm_unmapped_area(&info);
5259 }
5260
5261diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5262index dc00b2c..cce53c2 100644
5263--- a/arch/ia64/kernel/topology.c
5264+++ b/arch/ia64/kernel/topology.c
5265@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5266 return NOTIFY_OK;
5267 }
5268
5269-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5270+static struct notifier_block cache_cpu_notifier =
5271 {
5272 .notifier_call = cache_cpu_callback
5273 };
5274diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5275index 0ccb28f..8992469 100644
5276--- a/arch/ia64/kernel/vmlinux.lds.S
5277+++ b/arch/ia64/kernel/vmlinux.lds.S
5278@@ -198,7 +198,7 @@ SECTIONS {
5279 /* Per-cpu data: */
5280 . = ALIGN(PERCPU_PAGE_SIZE);
5281 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5282- __phys_per_cpu_start = __per_cpu_load;
5283+ __phys_per_cpu_start = per_cpu_load;
5284 /*
5285 * ensure percpu data fits
5286 * into percpu page size
5287diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5288index 6cf0341..d352594 100644
5289--- a/arch/ia64/mm/fault.c
5290+++ b/arch/ia64/mm/fault.c
5291@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5292 return pte_present(pte);
5293 }
5294
5295+#ifdef CONFIG_PAX_PAGEEXEC
5296+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5297+{
5298+ unsigned long i;
5299+
5300+ printk(KERN_ERR "PAX: bytes at PC: ");
5301+ for (i = 0; i < 8; i++) {
5302+ unsigned int c;
5303+ if (get_user(c, (unsigned int *)pc+i))
5304+ printk(KERN_CONT "???????? ");
5305+ else
5306+ printk(KERN_CONT "%08x ", c);
5307+ }
5308+ printk("\n");
5309+}
5310+#endif
5311+
5312 # define VM_READ_BIT 0
5313 # define VM_WRITE_BIT 1
5314 # define VM_EXEC_BIT 2
5315@@ -149,8 +166,21 @@ retry:
5316 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5317 goto bad_area;
5318
5319- if ((vma->vm_flags & mask) != mask)
5320+ if ((vma->vm_flags & mask) != mask) {
5321+
5322+#ifdef CONFIG_PAX_PAGEEXEC
5323+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5324+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5325+ goto bad_area;
5326+
5327+ up_read(&mm->mmap_sem);
5328+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5329+ do_group_exit(SIGKILL);
5330+ }
5331+#endif
5332+
5333 goto bad_area;
5334+ }
5335
5336 /*
5337 * If for any reason at all we couldn't handle the fault, make
5338diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5339index 76069c1..c2aa816 100644
5340--- a/arch/ia64/mm/hugetlbpage.c
5341+++ b/arch/ia64/mm/hugetlbpage.c
5342@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5343 unsigned long pgoff, unsigned long flags)
5344 {
5345 struct vm_unmapped_area_info info;
5346+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5347
5348 if (len > RGN_MAP_LIMIT)
5349 return -ENOMEM;
5350@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5351 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5352 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5353 info.align_offset = 0;
5354+ info.threadstack_offset = offset;
5355 return vm_unmapped_area(&info);
5356 }
5357
5358diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5359index d1fe4b4..2628f37 100644
5360--- a/arch/ia64/mm/init.c
5361+++ b/arch/ia64/mm/init.c
5362@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5363 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5364 vma->vm_end = vma->vm_start + PAGE_SIZE;
5365 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5366+
5367+#ifdef CONFIG_PAX_PAGEEXEC
5368+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5369+ vma->vm_flags &= ~VM_EXEC;
5370+
5371+#ifdef CONFIG_PAX_MPROTECT
5372+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5373+ vma->vm_flags &= ~VM_MAYEXEC;
5374+#endif
5375+
5376+ }
5377+#endif
5378+
5379 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5380 down_write(&current->mm->mmap_sem);
5381 if (insert_vm_struct(current->mm, vma)) {
5382diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5383index 40b3ee9..8c2c112 100644
5384--- a/arch/m32r/include/asm/cache.h
5385+++ b/arch/m32r/include/asm/cache.h
5386@@ -1,8 +1,10 @@
5387 #ifndef _ASM_M32R_CACHE_H
5388 #define _ASM_M32R_CACHE_H
5389
5390+#include <linux/const.h>
5391+
5392 /* L1 cache line size */
5393 #define L1_CACHE_SHIFT 4
5394-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5395+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5396
5397 #endif /* _ASM_M32R_CACHE_H */
5398diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5399index 82abd15..d95ae5d 100644
5400--- a/arch/m32r/lib/usercopy.c
5401+++ b/arch/m32r/lib/usercopy.c
5402@@ -14,6 +14,9 @@
5403 unsigned long
5404 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5405 {
5406+ if ((long)n < 0)
5407+ return n;
5408+
5409 prefetch(from);
5410 if (access_ok(VERIFY_WRITE, to, n))
5411 __copy_user(to,from,n);
5412@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5413 unsigned long
5414 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5415 {
5416+ if ((long)n < 0)
5417+ return n;
5418+
5419 prefetchw(to);
5420 if (access_ok(VERIFY_READ, from, n))
5421 __copy_user_zeroing(to,from,n);
5422diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5423index 0395c51..5f26031 100644
5424--- a/arch/m68k/include/asm/cache.h
5425+++ b/arch/m68k/include/asm/cache.h
5426@@ -4,9 +4,11 @@
5427 #ifndef __ARCH_M68K_CACHE_H
5428 #define __ARCH_M68K_CACHE_H
5429
5430+#include <linux/const.h>
5431+
5432 /* bytes per L1 cache line */
5433 #define L1_CACHE_SHIFT 4
5434-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5435+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5436
5437 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5438
5439diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5440index 3c52fa6..11b2ad8 100644
5441--- a/arch/metag/mm/hugetlbpage.c
5442+++ b/arch/metag/mm/hugetlbpage.c
5443@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5444 info.high_limit = TASK_SIZE;
5445 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5446 info.align_offset = 0;
5447+ info.threadstack_offset = 0;
5448 return vm_unmapped_area(&info);
5449 }
5450
5451diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5452index 4efe96a..60e8699 100644
5453--- a/arch/microblaze/include/asm/cache.h
5454+++ b/arch/microblaze/include/asm/cache.h
5455@@ -13,11 +13,12 @@
5456 #ifndef _ASM_MICROBLAZE_CACHE_H
5457 #define _ASM_MICROBLAZE_CACHE_H
5458
5459+#include <linux/const.h>
5460 #include <asm/registers.h>
5461
5462 #define L1_CACHE_SHIFT 5
5463 /* word-granular cache in microblaze */
5464-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5465+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5466
5467 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5468
5469diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5470index 08b6079..eb272cf 100644
5471--- a/arch/mips/include/asm/atomic.h
5472+++ b/arch/mips/include/asm/atomic.h
5473@@ -21,6 +21,10 @@
5474 #include <asm/cmpxchg.h>
5475 #include <asm/war.h>
5476
5477+#ifdef CONFIG_GENERIC_ATOMIC64
5478+#include <asm-generic/atomic64.h>
5479+#endif
5480+
5481 #define ATOMIC_INIT(i) { (i) }
5482
5483 /*
5484@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5485 */
5486 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5487
5488+#define atomic64_read_unchecked(v) atomic64_read(v)
5489+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5490+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5491+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5492+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5493+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5494+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5495+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5496+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5497+
5498 #endif /* CONFIG_64BIT */
5499
5500 /*
5501diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5502index b4db69f..8f3b093 100644
5503--- a/arch/mips/include/asm/cache.h
5504+++ b/arch/mips/include/asm/cache.h
5505@@ -9,10 +9,11 @@
5506 #ifndef _ASM_CACHE_H
5507 #define _ASM_CACHE_H
5508
5509+#include <linux/const.h>
5510 #include <kmalloc.h>
5511
5512 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5513-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5515
5516 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5517 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5518diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5519index cf3ae24..238d22f 100644
5520--- a/arch/mips/include/asm/elf.h
5521+++ b/arch/mips/include/asm/elf.h
5522@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5523 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5524 #endif
5525
5526+#ifdef CONFIG_PAX_ASLR
5527+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5528+
5529+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5530+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5531+#endif
5532+
5533 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5534 struct linux_binprm;
5535 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5536 int uses_interp);
5537
5538-struct mm_struct;
5539-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5540-#define arch_randomize_brk arch_randomize_brk
5541-
5542 #endif /* _ASM_ELF_H */
5543diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5544index c1f6afa..38cc6e9 100644
5545--- a/arch/mips/include/asm/exec.h
5546+++ b/arch/mips/include/asm/exec.h
5547@@ -12,6 +12,6 @@
5548 #ifndef _ASM_EXEC_H
5549 #define _ASM_EXEC_H
5550
5551-extern unsigned long arch_align_stack(unsigned long sp);
5552+#define arch_align_stack(x) ((x) & ~0xfUL)
5553
5554 #endif /* _ASM_EXEC_H */
5555diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
5556index d44622c..64990d2 100644
5557--- a/arch/mips/include/asm/local.h
5558+++ b/arch/mips/include/asm/local.h
5559@@ -12,15 +12,25 @@ typedef struct
5560 atomic_long_t a;
5561 } local_t;
5562
5563+typedef struct {
5564+ atomic_long_unchecked_t a;
5565+} local_unchecked_t;
5566+
5567 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
5568
5569 #define local_read(l) atomic_long_read(&(l)->a)
5570+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
5571 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
5572+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
5573
5574 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
5575+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
5576 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
5577+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
5578 #define local_inc(l) atomic_long_inc(&(l)->a)
5579+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
5580 #define local_dec(l) atomic_long_dec(&(l)->a)
5581+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
5582
5583 /*
5584 * Same as above, but return the result value
5585@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
5586 return result;
5587 }
5588
5589+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
5590+{
5591+ unsigned long result;
5592+
5593+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5594+ unsigned long temp;
5595+
5596+ __asm__ __volatile__(
5597+ " .set mips3 \n"
5598+ "1:" __LL "%1, %2 # local_add_return \n"
5599+ " addu %0, %1, %3 \n"
5600+ __SC "%0, %2 \n"
5601+ " beqzl %0, 1b \n"
5602+ " addu %0, %1, %3 \n"
5603+ " .set mips0 \n"
5604+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
5605+ : "Ir" (i), "m" (l->a.counter)
5606+ : "memory");
5607+ } else if (kernel_uses_llsc) {
5608+ unsigned long temp;
5609+
5610+ __asm__ __volatile__(
5611+ " .set mips3 \n"
5612+ "1:" __LL "%1, %2 # local_add_return \n"
5613+ " addu %0, %1, %3 \n"
5614+ __SC "%0, %2 \n"
5615+ " beqz %0, 1b \n"
5616+ " addu %0, %1, %3 \n"
5617+ " .set mips0 \n"
5618+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
5619+ : "Ir" (i), "m" (l->a.counter)
5620+ : "memory");
5621+ } else {
5622+ unsigned long flags;
5623+
5624+ local_irq_save(flags);
5625+ result = l->a.counter;
5626+ result += i;
5627+ l->a.counter = result;
5628+ local_irq_restore(flags);
5629+ }
5630+
5631+ return result;
5632+}
5633+
5634 static __inline__ long local_sub_return(long i, local_t * l)
5635 {
5636 unsigned long result;
5637@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
5638
5639 #define local_cmpxchg(l, o, n) \
5640 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
5641+#define local_cmpxchg_unchecked(l, o, n) \
5642+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
5643 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
5644
5645 /**
5646diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5647index f59552f..3abe9b9 100644
5648--- a/arch/mips/include/asm/page.h
5649+++ b/arch/mips/include/asm/page.h
5650@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5651 #ifdef CONFIG_CPU_MIPS32
5652 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5653 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5654- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5655+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5656 #else
5657 typedef struct { unsigned long long pte; } pte_t;
5658 #define pte_val(x) ((x).pte)
5659diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5660index 881d18b..cea38bc 100644
5661--- a/arch/mips/include/asm/pgalloc.h
5662+++ b/arch/mips/include/asm/pgalloc.h
5663@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5664 {
5665 set_pud(pud, __pud((unsigned long)pmd));
5666 }
5667+
5668+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5669+{
5670+ pud_populate(mm, pud, pmd);
5671+}
5672 #endif
5673
5674 /*
5675diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5676index 895320e..bf63e10 100644
5677--- a/arch/mips/include/asm/thread_info.h
5678+++ b/arch/mips/include/asm/thread_info.h
5679@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
5680 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5681 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5682 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5683+/* li takes a 32bit immediate */
5684+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5685 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5686
5687 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5688@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
5689 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5690 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5691 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5692+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5693+
5694+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5695
5696 /* work to do in syscall_trace_leave() */
5697-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5698+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5699
5700 /* work to do on interrupt/exception return */
5701 #define _TIF_WORK_MASK \
5702 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5703 /* work to do on any return to u-space */
5704-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5705+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5706
5707 #endif /* __KERNEL__ */
5708
5709diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5710index 1188e00..41cf144 100644
5711--- a/arch/mips/kernel/binfmt_elfn32.c
5712+++ b/arch/mips/kernel/binfmt_elfn32.c
5713@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5714 #undef ELF_ET_DYN_BASE
5715 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5716
5717+#ifdef CONFIG_PAX_ASLR
5718+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5719+
5720+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5721+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5722+#endif
5723+
5724 #include <asm/processor.h>
5725 #include <linux/module.h>
5726 #include <linux/elfcore.h>
5727diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5728index 202e581..689ca79 100644
5729--- a/arch/mips/kernel/binfmt_elfo32.c
5730+++ b/arch/mips/kernel/binfmt_elfo32.c
5731@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5732 #undef ELF_ET_DYN_BASE
5733 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5734
5735+#ifdef CONFIG_PAX_ASLR
5736+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5737+
5738+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5739+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5740+#endif
5741+
5742 #include <asm/processor.h>
5743
5744 /*
5745diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5746index c6a041d..b3e7318 100644
5747--- a/arch/mips/kernel/process.c
5748+++ b/arch/mips/kernel/process.c
5749@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
5750 out:
5751 return pc;
5752 }
5753-
5754-/*
5755- * Don't forget that the stack pointer must be aligned on a 8 bytes
5756- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5757- */
5758-unsigned long arch_align_stack(unsigned long sp)
5759-{
5760- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5761- sp -= get_random_int() & ~PAGE_MASK;
5762-
5763- return sp & ALMASK;
5764-}
5765diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5766index 9c6299c..2fb4c22 100644
5767--- a/arch/mips/kernel/ptrace.c
5768+++ b/arch/mips/kernel/ptrace.c
5769@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5770 return arch;
5771 }
5772
5773+#ifdef CONFIG_GRKERNSEC_SETXID
5774+extern void gr_delayed_cred_worker(void);
5775+#endif
5776+
5777 /*
5778 * Notification of system call entry/exit
5779 * - triggered by current->work.syscall_trace
5780@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5781 /* do the secure computing check first */
5782 secure_computing_strict(regs->regs[2]);
5783
5784+#ifdef CONFIG_GRKERNSEC_SETXID
5785+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5786+ gr_delayed_cred_worker();
5787+#endif
5788+
5789 if (!(current->ptrace & PT_PTRACED))
5790 goto out;
5791
5792diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5793index 9b36424..e7f4154 100644
5794--- a/arch/mips/kernel/scall32-o32.S
5795+++ b/arch/mips/kernel/scall32-o32.S
5796@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5797
5798 stack_done:
5799 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5800- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5801+ li t1, _TIF_SYSCALL_WORK
5802 and t0, t1
5803 bnez t0, syscall_trace_entry # -> yes
5804
5805diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5806index 97a5909..59622f8 100644
5807--- a/arch/mips/kernel/scall64-64.S
5808+++ b/arch/mips/kernel/scall64-64.S
5809@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5810
5811 sd a3, PT_R26(sp) # save a3 for syscall restarting
5812
5813- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5814+ li t1, _TIF_SYSCALL_WORK
5815 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5816 and t0, t1, t0
5817 bnez t0, syscall_trace_entry
5818diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5819index edcb659..fb2ab09 100644
5820--- a/arch/mips/kernel/scall64-n32.S
5821+++ b/arch/mips/kernel/scall64-n32.S
5822@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5823
5824 sd a3, PT_R26(sp) # save a3 for syscall restarting
5825
5826- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5827+ li t1, _TIF_SYSCALL_WORK
5828 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5829 and t0, t1, t0
5830 bnez t0, n32_syscall_trace_entry
5831diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5832index 74f485d..47d2c38 100644
5833--- a/arch/mips/kernel/scall64-o32.S
5834+++ b/arch/mips/kernel/scall64-o32.S
5835@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5836 PTR 4b, bad_stack
5837 .previous
5838
5839- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5840+ li t1, _TIF_SYSCALL_WORK
5841 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5842 and t0, t1, t0
5843 bnez t0, trace_a_syscall
5844diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5845index 0fead53..eeb00a6 100644
5846--- a/arch/mips/mm/fault.c
5847+++ b/arch/mips/mm/fault.c
5848@@ -27,6 +27,23 @@
5849 #include <asm/highmem.h> /* For VMALLOC_END */
5850 #include <linux/kdebug.h>
5851
5852+#ifdef CONFIG_PAX_PAGEEXEC
5853+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5854+{
5855+ unsigned long i;
5856+
5857+ printk(KERN_ERR "PAX: bytes at PC: ");
5858+ for (i = 0; i < 5; i++) {
5859+ unsigned int c;
5860+ if (get_user(c, (unsigned int *)pc+i))
5861+ printk(KERN_CONT "???????? ");
5862+ else
5863+ printk(KERN_CONT "%08x ", c);
5864+ }
5865+ printk("\n");
5866+}
5867+#endif
5868+
5869 /*
5870 * This routine handles page faults. It determines the address,
5871 * and the problem, and then passes it off to one of the appropriate
5872@@ -196,6 +213,14 @@ bad_area:
5873 bad_area_nosemaphore:
5874 /* User mode accesses just cause a SIGSEGV */
5875 if (user_mode(regs)) {
5876+
5877+#ifdef CONFIG_PAX_PAGEEXEC
5878+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
5879+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
5880+ do_group_exit(SIGKILL);
5881+ }
5882+#endif
5883+
5884 tsk->thread.cp0_badvaddr = address;
5885 tsk->thread.error_code = write;
5886 #if 0
5887diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5888index 7e5fe27..9656513 100644
5889--- a/arch/mips/mm/mmap.c
5890+++ b/arch/mips/mm/mmap.c
5891@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5892 struct vm_area_struct *vma;
5893 unsigned long addr = addr0;
5894 int do_color_align;
5895+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5896 struct vm_unmapped_area_info info;
5897
5898 if (unlikely(len > TASK_SIZE))
5899@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5900 do_color_align = 1;
5901
5902 /* requesting a specific address */
5903+
5904+#ifdef CONFIG_PAX_RANDMMAP
5905+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5906+#endif
5907+
5908 if (addr) {
5909 if (do_color_align)
5910 addr = COLOUR_ALIGN(addr, pgoff);
5911@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5912 addr = PAGE_ALIGN(addr);
5913
5914 vma = find_vma(mm, addr);
5915- if (TASK_SIZE - len >= addr &&
5916- (!vma || addr + len <= vma->vm_start))
5917+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5918 return addr;
5919 }
5920
5921 info.length = len;
5922 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
5923 info.align_offset = pgoff << PAGE_SHIFT;
5924+ info.threadstack_offset = offset;
5925
5926 if (dir == DOWN) {
5927 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
5928@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5929 {
5930 unsigned long random_factor = 0UL;
5931
5932+#ifdef CONFIG_PAX_RANDMMAP
5933+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5934+#endif
5935+
5936 if (current->flags & PF_RANDOMIZE) {
5937 random_factor = get_random_int();
5938 random_factor = random_factor << PAGE_SHIFT;
5939@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5940
5941 if (mmap_is_legacy()) {
5942 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5943+
5944+#ifdef CONFIG_PAX_RANDMMAP
5945+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5946+ mm->mmap_base += mm->delta_mmap;
5947+#endif
5948+
5949 mm->get_unmapped_area = arch_get_unmapped_area;
5950 mm->unmap_area = arch_unmap_area;
5951 } else {
5952 mm->mmap_base = mmap_base(random_factor);
5953+
5954+#ifdef CONFIG_PAX_RANDMMAP
5955+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5956+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5957+#endif
5958+
5959 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5960 mm->unmap_area = arch_unmap_area_topdown;
5961 }
5962 }
5963
5964-static inline unsigned long brk_rnd(void)
5965-{
5966- unsigned long rnd = get_random_int();
5967-
5968- rnd = rnd << PAGE_SHIFT;
5969- /* 8MB for 32bit, 256MB for 64bit */
5970- if (TASK_IS_32BIT_ADDR)
5971- rnd = rnd & 0x7ffffful;
5972- else
5973- rnd = rnd & 0xffffffful;
5974-
5975- return rnd;
5976-}
5977-
5978-unsigned long arch_randomize_brk(struct mm_struct *mm)
5979-{
5980- unsigned long base = mm->brk;
5981- unsigned long ret;
5982-
5983- ret = PAGE_ALIGN(base + brk_rnd());
5984-
5985- if (ret < mm->brk)
5986- return mm->brk;
5987-
5988- return ret;
5989-}
5990-
5991 int __virt_addr_valid(const volatile void *kaddr)
5992 {
5993 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5994diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5995index 967d144..db12197 100644
5996--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5997+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5998@@ -11,12 +11,14 @@
5999 #ifndef _ASM_PROC_CACHE_H
6000 #define _ASM_PROC_CACHE_H
6001
6002+#include <linux/const.h>
6003+
6004 /* L1 cache */
6005
6006 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6007 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6008-#define L1_CACHE_BYTES 16 /* bytes per entry */
6009 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6010+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6011 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6012
6013 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6014diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6015index bcb5df2..84fabd2 100644
6016--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6017+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6018@@ -16,13 +16,15 @@
6019 #ifndef _ASM_PROC_CACHE_H
6020 #define _ASM_PROC_CACHE_H
6021
6022+#include <linux/const.h>
6023+
6024 /*
6025 * L1 cache
6026 */
6027 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6028 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6029-#define L1_CACHE_BYTES 32 /* bytes per entry */
6030 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
6031+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6032 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
6033
6034 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6035diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
6036index 4ce7a01..449202a 100644
6037--- a/arch/openrisc/include/asm/cache.h
6038+++ b/arch/openrisc/include/asm/cache.h
6039@@ -19,11 +19,13 @@
6040 #ifndef __ASM_OPENRISC_CACHE_H
6041 #define __ASM_OPENRISC_CACHE_H
6042
6043+#include <linux/const.h>
6044+
6045 /* FIXME: How can we replace these with values from the CPU...
6046 * they shouldn't be hard-coded!
6047 */
6048
6049-#define L1_CACHE_BYTES 16
6050 #define L1_CACHE_SHIFT 4
6051+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6052
6053 #endif /* __ASM_OPENRISC_CACHE_H */
6054diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
6055index 472886c..00e7df9 100644
6056--- a/arch/parisc/include/asm/atomic.h
6057+++ b/arch/parisc/include/asm/atomic.h
6058@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
6059 return dec;
6060 }
6061
6062+#define atomic64_read_unchecked(v) atomic64_read(v)
6063+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6064+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6065+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6066+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6067+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6068+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6069+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6070+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6071+
6072 #endif /* !CONFIG_64BIT */
6073
6074
6075diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
6076index 47f11c7..3420df2 100644
6077--- a/arch/parisc/include/asm/cache.h
6078+++ b/arch/parisc/include/asm/cache.h
6079@@ -5,6 +5,7 @@
6080 #ifndef __ARCH_PARISC_CACHE_H
6081 #define __ARCH_PARISC_CACHE_H
6082
6083+#include <linux/const.h>
6084
6085 /*
6086 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
6087@@ -15,13 +16,13 @@
6088 * just ruin performance.
6089 */
6090 #ifdef CONFIG_PA20
6091-#define L1_CACHE_BYTES 64
6092 #define L1_CACHE_SHIFT 6
6093 #else
6094-#define L1_CACHE_BYTES 32
6095 #define L1_CACHE_SHIFT 5
6096 #endif
6097
6098+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6099+
6100 #ifndef __ASSEMBLY__
6101
6102 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6103diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
6104index ad2b503..bdf1651 100644
6105--- a/arch/parisc/include/asm/elf.h
6106+++ b/arch/parisc/include/asm/elf.h
6107@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
6108
6109 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
6110
6111+#ifdef CONFIG_PAX_ASLR
6112+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6113+
6114+#define PAX_DELTA_MMAP_LEN 16
6115+#define PAX_DELTA_STACK_LEN 16
6116+#endif
6117+
6118 /* This yields a mask that user programs can use to figure out what
6119 instruction set this CPU supports. This could be done in user space,
6120 but it's not easy, and we've already done it here. */
6121diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6122index fc987a1..6e068ef 100644
6123--- a/arch/parisc/include/asm/pgalloc.h
6124+++ b/arch/parisc/include/asm/pgalloc.h
6125@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6126 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6127 }
6128
6129+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6130+{
6131+ pgd_populate(mm, pgd, pmd);
6132+}
6133+
6134 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6135 {
6136 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6137@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6138 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6139 #define pmd_free(mm, x) do { } while (0)
6140 #define pgd_populate(mm, pmd, pte) BUG()
6141+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6142
6143 #endif
6144
6145diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6146index 1e40d7f..a3eb445 100644
6147--- a/arch/parisc/include/asm/pgtable.h
6148+++ b/arch/parisc/include/asm/pgtable.h
6149@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6150 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6151 #define PAGE_COPY PAGE_EXECREAD
6152 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6153+
6154+#ifdef CONFIG_PAX_PAGEEXEC
6155+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6156+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6157+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6158+#else
6159+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6160+# define PAGE_COPY_NOEXEC PAGE_COPY
6161+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6162+#endif
6163+
6164 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6165 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6166 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6167diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6168index e0a8235..ce2f1e1 100644
6169--- a/arch/parisc/include/asm/uaccess.h
6170+++ b/arch/parisc/include/asm/uaccess.h
6171@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6172 const void __user *from,
6173 unsigned long n)
6174 {
6175- int sz = __compiletime_object_size(to);
6176+ size_t sz = __compiletime_object_size(to);
6177 int ret = -EFAULT;
6178
6179- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6180+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6181 ret = __copy_from_user(to, from, n);
6182 else
6183 copy_from_user_overflow();
6184diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6185index 2a625fb..9908930 100644
6186--- a/arch/parisc/kernel/module.c
6187+++ b/arch/parisc/kernel/module.c
6188@@ -98,16 +98,38 @@
6189
6190 /* three functions to determine where in the module core
6191 * or init pieces the location is */
6192+static inline int in_init_rx(struct module *me, void *loc)
6193+{
6194+ return (loc >= me->module_init_rx &&
6195+ loc < (me->module_init_rx + me->init_size_rx));
6196+}
6197+
6198+static inline int in_init_rw(struct module *me, void *loc)
6199+{
6200+ return (loc >= me->module_init_rw &&
6201+ loc < (me->module_init_rw + me->init_size_rw));
6202+}
6203+
6204 static inline int in_init(struct module *me, void *loc)
6205 {
6206- return (loc >= me->module_init &&
6207- loc <= (me->module_init + me->init_size));
6208+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6209+}
6210+
6211+static inline int in_core_rx(struct module *me, void *loc)
6212+{
6213+ return (loc >= me->module_core_rx &&
6214+ loc < (me->module_core_rx + me->core_size_rx));
6215+}
6216+
6217+static inline int in_core_rw(struct module *me, void *loc)
6218+{
6219+ return (loc >= me->module_core_rw &&
6220+ loc < (me->module_core_rw + me->core_size_rw));
6221 }
6222
6223 static inline int in_core(struct module *me, void *loc)
6224 {
6225- return (loc >= me->module_core &&
6226- loc <= (me->module_core + me->core_size));
6227+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6228 }
6229
6230 static inline int in_local(struct module *me, void *loc)
6231@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6232 }
6233
6234 /* align things a bit */
6235- me->core_size = ALIGN(me->core_size, 16);
6236- me->arch.got_offset = me->core_size;
6237- me->core_size += gots * sizeof(struct got_entry);
6238+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6239+ me->arch.got_offset = me->core_size_rw;
6240+ me->core_size_rw += gots * sizeof(struct got_entry);
6241
6242- me->core_size = ALIGN(me->core_size, 16);
6243- me->arch.fdesc_offset = me->core_size;
6244- me->core_size += fdescs * sizeof(Elf_Fdesc);
6245+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6246+ me->arch.fdesc_offset = me->core_size_rw;
6247+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
6248
6249 me->arch.got_max = gots;
6250 me->arch.fdesc_max = fdescs;
6251@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6252
6253 BUG_ON(value == 0);
6254
6255- got = me->module_core + me->arch.got_offset;
6256+ got = me->module_core_rw + me->arch.got_offset;
6257 for (i = 0; got[i].addr; i++)
6258 if (got[i].addr == value)
6259 goto out;
6260@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6261 #ifdef CONFIG_64BIT
6262 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6263 {
6264- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
6265+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
6266
6267 if (!value) {
6268 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
6269@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6270
6271 /* Create new one */
6272 fdesc->addr = value;
6273- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6274+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6275 return (Elf_Addr)fdesc;
6276 }
6277 #endif /* CONFIG_64BIT */
6278@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
6279
6280 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
6281 end = table + sechdrs[me->arch.unwind_section].sh_size;
6282- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6283+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6284
6285 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
6286 me->arch.unwind_section, table, end, gp);
6287diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
6288index 5dfd248..64914ac 100644
6289--- a/arch/parisc/kernel/sys_parisc.c
6290+++ b/arch/parisc/kernel/sys_parisc.c
6291@@ -33,9 +33,11 @@
6292 #include <linux/utsname.h>
6293 #include <linux/personality.h>
6294
6295-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6296+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
6297+ unsigned long flags)
6298 {
6299 struct vm_unmapped_area_info info;
6300+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6301
6302 info.flags = 0;
6303 info.length = len;
6304@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6305 info.high_limit = TASK_SIZE;
6306 info.align_mask = 0;
6307 info.align_offset = 0;
6308+ info.threadstack_offset = offset;
6309 return vm_unmapped_area(&info);
6310 }
6311
6312@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
6313 return (unsigned long) mapping >> 8;
6314 }
6315
6316-static unsigned long get_shared_area(struct address_space *mapping,
6317- unsigned long addr, unsigned long len, unsigned long pgoff)
6318+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
6319+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6320 {
6321 struct vm_unmapped_area_info info;
6322+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6323
6324 info.flags = 0;
6325 info.length = len;
6326@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
6327 info.high_limit = TASK_SIZE;
6328 info.align_mask = PAGE_MASK & (SHMLBA - 1);
6329 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
6330+ info.threadstack_offset = offset;
6331 return vm_unmapped_area(&info);
6332 }
6333
6334@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6335 return -EINVAL;
6336 return addr;
6337 }
6338- if (!addr)
6339+ if (!addr) {
6340 addr = TASK_UNMAPPED_BASE;
6341
6342+#ifdef CONFIG_PAX_RANDMMAP
6343+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
6344+ addr += current->mm->delta_mmap;
6345+#endif
6346+
6347+ }
6348+
6349 if (filp) {
6350- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6351+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6352 } else if(flags & MAP_SHARED) {
6353- addr = get_shared_area(NULL, addr, len, pgoff);
6354+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6355 } else {
6356- addr = get_unshared_area(addr, len);
6357+ addr = get_unshared_area(filp, addr, len, flags);
6358 }
6359 return addr;
6360 }
6361diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6362index 04e47c6..7a8faf6 100644
6363--- a/arch/parisc/kernel/traps.c
6364+++ b/arch/parisc/kernel/traps.c
6365@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6366
6367 down_read(&current->mm->mmap_sem);
6368 vma = find_vma(current->mm,regs->iaoq[0]);
6369- if (vma && (regs->iaoq[0] >= vma->vm_start)
6370- && (vma->vm_flags & VM_EXEC)) {
6371-
6372+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6373 fault_address = regs->iaoq[0];
6374 fault_space = regs->iasq[0];
6375
6376diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6377index f247a34..dc0f219 100644
6378--- a/arch/parisc/mm/fault.c
6379+++ b/arch/parisc/mm/fault.c
6380@@ -15,6 +15,7 @@
6381 #include <linux/sched.h>
6382 #include <linux/interrupt.h>
6383 #include <linux/module.h>
6384+#include <linux/unistd.h>
6385
6386 #include <asm/uaccess.h>
6387 #include <asm/traps.h>
6388@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6389 static unsigned long
6390 parisc_acctyp(unsigned long code, unsigned int inst)
6391 {
6392- if (code == 6 || code == 16)
6393+ if (code == 6 || code == 7 || code == 16)
6394 return VM_EXEC;
6395
6396 switch (inst & 0xf0000000) {
6397@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6398 }
6399 #endif
6400
6401+#ifdef CONFIG_PAX_PAGEEXEC
6402+/*
6403+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6404+ *
6405+ * returns 1 when task should be killed
6406+ * 2 when rt_sigreturn trampoline was detected
6407+ * 3 when unpatched PLT trampoline was detected
6408+ */
6409+static int pax_handle_fetch_fault(struct pt_regs *regs)
6410+{
6411+
6412+#ifdef CONFIG_PAX_EMUPLT
6413+ int err;
6414+
6415+ do { /* PaX: unpatched PLT emulation */
6416+ unsigned int bl, depwi;
6417+
6418+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6419+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6420+
6421+ if (err)
6422+ break;
6423+
6424+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6425+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6426+
6427+ err = get_user(ldw, (unsigned int *)addr);
6428+ err |= get_user(bv, (unsigned int *)(addr+4));
6429+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6430+
6431+ if (err)
6432+ break;
6433+
6434+ if (ldw == 0x0E801096U &&
6435+ bv == 0xEAC0C000U &&
6436+ ldw2 == 0x0E881095U)
6437+ {
6438+ unsigned int resolver, map;
6439+
6440+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6441+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6442+ if (err)
6443+ break;
6444+
6445+ regs->gr[20] = instruction_pointer(regs)+8;
6446+ regs->gr[21] = map;
6447+ regs->gr[22] = resolver;
6448+ regs->iaoq[0] = resolver | 3UL;
6449+ regs->iaoq[1] = regs->iaoq[0] + 4;
6450+ return 3;
6451+ }
6452+ }
6453+ } while (0);
6454+#endif
6455+
6456+#ifdef CONFIG_PAX_EMUTRAMP
6457+
6458+#ifndef CONFIG_PAX_EMUSIGRT
6459+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6460+ return 1;
6461+#endif
6462+
6463+ do { /* PaX: rt_sigreturn emulation */
6464+ unsigned int ldi1, ldi2, bel, nop;
6465+
6466+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6467+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6468+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6469+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6470+
6471+ if (err)
6472+ break;
6473+
6474+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6475+ ldi2 == 0x3414015AU &&
6476+ bel == 0xE4008200U &&
6477+ nop == 0x08000240U)
6478+ {
6479+ regs->gr[25] = (ldi1 & 2) >> 1;
6480+ regs->gr[20] = __NR_rt_sigreturn;
6481+ regs->gr[31] = regs->iaoq[1] + 16;
6482+ regs->sr[0] = regs->iasq[1];
6483+ regs->iaoq[0] = 0x100UL;
6484+ regs->iaoq[1] = regs->iaoq[0] + 4;
6485+ regs->iasq[0] = regs->sr[2];
6486+ regs->iasq[1] = regs->sr[2];
6487+ return 2;
6488+ }
6489+ } while (0);
6490+#endif
6491+
6492+ return 1;
6493+}
6494+
6495+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6496+{
6497+ unsigned long i;
6498+
6499+ printk(KERN_ERR "PAX: bytes at PC: ");
6500+ for (i = 0; i < 5; i++) {
6501+ unsigned int c;
6502+ if (get_user(c, (unsigned int *)pc+i))
6503+ printk(KERN_CONT "???????? ");
6504+ else
6505+ printk(KERN_CONT "%08x ", c);
6506+ }
6507+ printk("\n");
6508+}
6509+#endif
6510+
6511 int fixup_exception(struct pt_regs *regs)
6512 {
6513 const struct exception_table_entry *fix;
6514@@ -194,8 +305,33 @@ good_area:
6515
6516 acc_type = parisc_acctyp(code,regs->iir);
6517
6518- if ((vma->vm_flags & acc_type) != acc_type)
6519+ if ((vma->vm_flags & acc_type) != acc_type) {
6520+
6521+#ifdef CONFIG_PAX_PAGEEXEC
6522+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6523+ (address & ~3UL) == instruction_pointer(regs))
6524+ {
6525+ up_read(&mm->mmap_sem);
6526+ switch (pax_handle_fetch_fault(regs)) {
6527+
6528+#ifdef CONFIG_PAX_EMUPLT
6529+ case 3:
6530+ return;
6531+#endif
6532+
6533+#ifdef CONFIG_PAX_EMUTRAMP
6534+ case 2:
6535+ return;
6536+#endif
6537+
6538+ }
6539+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6540+ do_group_exit(SIGKILL);
6541+ }
6542+#endif
6543+
6544 goto bad_area;
6545+ }
6546
6547 /*
6548 * If for any reason at all we couldn't handle the fault, make
6549diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6550index e3b1d41..8e81edf 100644
6551--- a/arch/powerpc/include/asm/atomic.h
6552+++ b/arch/powerpc/include/asm/atomic.h
6553@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6554 return t1;
6555 }
6556
6557+#define atomic64_read_unchecked(v) atomic64_read(v)
6558+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6559+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6560+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6561+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6562+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6563+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6564+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6565+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6566+
6567 #endif /* __powerpc64__ */
6568
6569 #endif /* __KERNEL__ */
6570diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6571index 9e495c9..b6878e5 100644
6572--- a/arch/powerpc/include/asm/cache.h
6573+++ b/arch/powerpc/include/asm/cache.h
6574@@ -3,6 +3,7 @@
6575
6576 #ifdef __KERNEL__
6577
6578+#include <linux/const.h>
6579
6580 /* bytes per L1 cache line */
6581 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6582@@ -22,7 +23,7 @@
6583 #define L1_CACHE_SHIFT 7
6584 #endif
6585
6586-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6587+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6588
6589 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6590
6591diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6592index cc0655a..13eac2e 100644
6593--- a/arch/powerpc/include/asm/elf.h
6594+++ b/arch/powerpc/include/asm/elf.h
6595@@ -28,8 +28,19 @@
6596 the loader. We need to make sure that it is out of the way of the program
6597 that it will "exec", and that there is sufficient room for the brk. */
6598
6599-extern unsigned long randomize_et_dyn(unsigned long base);
6600-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6601+#define ELF_ET_DYN_BASE (0x20000000)
6602+
6603+#ifdef CONFIG_PAX_ASLR
6604+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6605+
6606+#ifdef __powerpc64__
6607+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6608+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6609+#else
6610+#define PAX_DELTA_MMAP_LEN 15
6611+#define PAX_DELTA_STACK_LEN 15
6612+#endif
6613+#endif
6614
6615 /*
6616 * Our registers are always unsigned longs, whether we're a 32 bit
6617@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6618 (0x7ff >> (PAGE_SHIFT - 12)) : \
6619 (0x3ffff >> (PAGE_SHIFT - 12)))
6620
6621-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6622-#define arch_randomize_brk arch_randomize_brk
6623-
6624-
6625 #ifdef CONFIG_SPU_BASE
6626 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6627 #define NT_SPU 1
6628diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6629index 8196e9c..d83a9f3 100644
6630--- a/arch/powerpc/include/asm/exec.h
6631+++ b/arch/powerpc/include/asm/exec.h
6632@@ -4,6 +4,6 @@
6633 #ifndef _ASM_POWERPC_EXEC_H
6634 #define _ASM_POWERPC_EXEC_H
6635
6636-extern unsigned long arch_align_stack(unsigned long sp);
6637+#define arch_align_stack(x) ((x) & ~0xfUL)
6638
6639 #endif /* _ASM_POWERPC_EXEC_H */
6640diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6641index 5acabbd..7ea14fa 100644
6642--- a/arch/powerpc/include/asm/kmap_types.h
6643+++ b/arch/powerpc/include/asm/kmap_types.h
6644@@ -10,7 +10,7 @@
6645 * 2 of the License, or (at your option) any later version.
6646 */
6647
6648-#define KM_TYPE_NR 16
6649+#define KM_TYPE_NR 17
6650
6651 #endif /* __KERNEL__ */
6652 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6653diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6654index 8565c25..2865190 100644
6655--- a/arch/powerpc/include/asm/mman.h
6656+++ b/arch/powerpc/include/asm/mman.h
6657@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6658 }
6659 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6660
6661-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6662+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6663 {
6664 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6665 }
6666diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6667index 988c812..63c7d70 100644
6668--- a/arch/powerpc/include/asm/page.h
6669+++ b/arch/powerpc/include/asm/page.h
6670@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6671 * and needs to be executable. This means the whole heap ends
6672 * up being executable.
6673 */
6674-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6675- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6676+#define VM_DATA_DEFAULT_FLAGS32 \
6677+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6678+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6679
6680 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6681 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6682@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6683 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6684 #endif
6685
6686+#define ktla_ktva(addr) (addr)
6687+#define ktva_ktla(addr) (addr)
6688+
6689 #ifndef CONFIG_PPC_BOOK3S_64
6690 /*
6691 * Use the top bit of the higher-level page table entries to indicate whether
6692diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6693index 88693ce..ac6f9ab 100644
6694--- a/arch/powerpc/include/asm/page_64.h
6695+++ b/arch/powerpc/include/asm/page_64.h
6696@@ -153,15 +153,18 @@ do { \
6697 * stack by default, so in the absence of a PT_GNU_STACK program header
6698 * we turn execute permission off.
6699 */
6700-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6701- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6702+#define VM_STACK_DEFAULT_FLAGS32 \
6703+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6704+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6705
6706 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6707 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6708
6709+#ifndef CONFIG_PAX_PAGEEXEC
6710 #define VM_STACK_DEFAULT_FLAGS \
6711 (is_32bit_task() ? \
6712 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6713+#endif
6714
6715 #include <asm-generic/getorder.h>
6716
6717diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6718index b66ae72..4a378cd 100644
6719--- a/arch/powerpc/include/asm/pgalloc-64.h
6720+++ b/arch/powerpc/include/asm/pgalloc-64.h
6721@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6722 #ifndef CONFIG_PPC_64K_PAGES
6723
6724 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6725+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6726
6727 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6728 {
6729@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6730 pud_set(pud, (unsigned long)pmd);
6731 }
6732
6733+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6734+{
6735+ pud_populate(mm, pud, pmd);
6736+}
6737+
6738 #define pmd_populate(mm, pmd, pte_page) \
6739 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6740 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6741@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
6742 #endif
6743
6744 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6745+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6746
6747 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6748 pte_t *pte)
6749diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6750index 7aeb955..19f748e 100644
6751--- a/arch/powerpc/include/asm/pgtable.h
6752+++ b/arch/powerpc/include/asm/pgtable.h
6753@@ -2,6 +2,7 @@
6754 #define _ASM_POWERPC_PGTABLE_H
6755 #ifdef __KERNEL__
6756
6757+#include <linux/const.h>
6758 #ifndef __ASSEMBLY__
6759 #include <asm/processor.h> /* For TASK_SIZE */
6760 #include <asm/mmu.h>
6761diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6762index 4aad413..85d86bf 100644
6763--- a/arch/powerpc/include/asm/pte-hash32.h
6764+++ b/arch/powerpc/include/asm/pte-hash32.h
6765@@ -21,6 +21,7 @@
6766 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6767 #define _PAGE_USER 0x004 /* usermode access allowed */
6768 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6769+#define _PAGE_EXEC _PAGE_GUARDED
6770 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6771 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6772 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6773diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6774index e1fb161..2290d1d 100644
6775--- a/arch/powerpc/include/asm/reg.h
6776+++ b/arch/powerpc/include/asm/reg.h
6777@@ -234,6 +234,7 @@
6778 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6779 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6780 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6781+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6782 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6783 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6784 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6785diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6786index 48cfc85..891382f 100644
6787--- a/arch/powerpc/include/asm/smp.h
6788+++ b/arch/powerpc/include/asm/smp.h
6789@@ -50,7 +50,7 @@ struct smp_ops_t {
6790 int (*cpu_disable)(void);
6791 void (*cpu_die)(unsigned int nr);
6792 int (*cpu_bootable)(unsigned int nr);
6793-};
6794+} __no_const;
6795
6796 extern void smp_send_debugger_break(void);
6797 extern void start_secondary_resume(void);
6798diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6799index ba7b197..d292e26 100644
6800--- a/arch/powerpc/include/asm/thread_info.h
6801+++ b/arch/powerpc/include/asm/thread_info.h
6802@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
6803 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
6804 TIF_NEED_RESCHED */
6805 #define TIF_32BIT 4 /* 32 bit binary */
6806-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
6807 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6808 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6809 #define TIF_SINGLESTEP 8 /* singlestepping active */
6810@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6811 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6812 for stack store? */
6813 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6814+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
6815+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6816+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
6817
6818 /* as above, but as bit values */
6819 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6820@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
6821 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6822 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6823 #define _TIF_NOHZ (1<<TIF_NOHZ)
6824+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6825 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6826 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6827- _TIF_NOHZ)
6828+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
6829
6830 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6831 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6832diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6833index 4db4959..aba5c41 100644
6834--- a/arch/powerpc/include/asm/uaccess.h
6835+++ b/arch/powerpc/include/asm/uaccess.h
6836@@ -318,52 +318,6 @@ do { \
6837 extern unsigned long __copy_tofrom_user(void __user *to,
6838 const void __user *from, unsigned long size);
6839
6840-#ifndef __powerpc64__
6841-
6842-static inline unsigned long copy_from_user(void *to,
6843- const void __user *from, unsigned long n)
6844-{
6845- unsigned long over;
6846-
6847- if (access_ok(VERIFY_READ, from, n))
6848- return __copy_tofrom_user((__force void __user *)to, from, n);
6849- if ((unsigned long)from < TASK_SIZE) {
6850- over = (unsigned long)from + n - TASK_SIZE;
6851- return __copy_tofrom_user((__force void __user *)to, from,
6852- n - over) + over;
6853- }
6854- return n;
6855-}
6856-
6857-static inline unsigned long copy_to_user(void __user *to,
6858- const void *from, unsigned long n)
6859-{
6860- unsigned long over;
6861-
6862- if (access_ok(VERIFY_WRITE, to, n))
6863- return __copy_tofrom_user(to, (__force void __user *)from, n);
6864- if ((unsigned long)to < TASK_SIZE) {
6865- over = (unsigned long)to + n - TASK_SIZE;
6866- return __copy_tofrom_user(to, (__force void __user *)from,
6867- n - over) + over;
6868- }
6869- return n;
6870-}
6871-
6872-#else /* __powerpc64__ */
6873-
6874-#define __copy_in_user(to, from, size) \
6875- __copy_tofrom_user((to), (from), (size))
6876-
6877-extern unsigned long copy_from_user(void *to, const void __user *from,
6878- unsigned long n);
6879-extern unsigned long copy_to_user(void __user *to, const void *from,
6880- unsigned long n);
6881-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6882- unsigned long n);
6883-
6884-#endif /* __powerpc64__ */
6885-
6886 static inline unsigned long __copy_from_user_inatomic(void *to,
6887 const void __user *from, unsigned long n)
6888 {
6889@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6890 if (ret == 0)
6891 return 0;
6892 }
6893+
6894+ if (!__builtin_constant_p(n))
6895+ check_object_size(to, n, false);
6896+
6897 return __copy_tofrom_user((__force void __user *)to, from, n);
6898 }
6899
6900@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6901 if (ret == 0)
6902 return 0;
6903 }
6904+
6905+ if (!__builtin_constant_p(n))
6906+ check_object_size(from, n, true);
6907+
6908 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6909 }
6910
6911@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6912 return __copy_to_user_inatomic(to, from, size);
6913 }
6914
6915+#ifndef __powerpc64__
6916+
6917+static inline unsigned long __must_check copy_from_user(void *to,
6918+ const void __user *from, unsigned long n)
6919+{
6920+ unsigned long over;
6921+
6922+ if ((long)n < 0)
6923+ return n;
6924+
6925+ if (access_ok(VERIFY_READ, from, n)) {
6926+ if (!__builtin_constant_p(n))
6927+ check_object_size(to, n, false);
6928+ return __copy_tofrom_user((__force void __user *)to, from, n);
6929+ }
6930+ if ((unsigned long)from < TASK_SIZE) {
6931+ over = (unsigned long)from + n - TASK_SIZE;
6932+ if (!__builtin_constant_p(n - over))
6933+ check_object_size(to, n - over, false);
6934+ return __copy_tofrom_user((__force void __user *)to, from,
6935+ n - over) + over;
6936+ }
6937+ return n;
6938+}
6939+
6940+static inline unsigned long __must_check copy_to_user(void __user *to,
6941+ const void *from, unsigned long n)
6942+{
6943+ unsigned long over;
6944+
6945+ if ((long)n < 0)
6946+ return n;
6947+
6948+ if (access_ok(VERIFY_WRITE, to, n)) {
6949+ if (!__builtin_constant_p(n))
6950+ check_object_size(from, n, true);
6951+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6952+ }
6953+ if ((unsigned long)to < TASK_SIZE) {
6954+ over = (unsigned long)to + n - TASK_SIZE;
6955+ if (!__builtin_constant_p(n))
6956+ check_object_size(from, n - over, true);
6957+ return __copy_tofrom_user(to, (__force void __user *)from,
6958+ n - over) + over;
6959+ }
6960+ return n;
6961+}
6962+
6963+#else /* __powerpc64__ */
6964+
6965+#define __copy_in_user(to, from, size) \
6966+ __copy_tofrom_user((to), (from), (size))
6967+
6968+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6969+{
6970+ if ((long)n < 0 || n > INT_MAX)
6971+ return n;
6972+
6973+ if (!__builtin_constant_p(n))
6974+ check_object_size(to, n, false);
6975+
6976+ if (likely(access_ok(VERIFY_READ, from, n)))
6977+ n = __copy_from_user(to, from, n);
6978+ else
6979+ memset(to, 0, n);
6980+ return n;
6981+}
6982+
6983+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6984+{
6985+ if ((long)n < 0 || n > INT_MAX)
6986+ return n;
6987+
6988+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6989+ if (!__builtin_constant_p(n))
6990+ check_object_size(from, n, true);
6991+ n = __copy_to_user(to, from, n);
6992+ }
6993+ return n;
6994+}
6995+
6996+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6997+ unsigned long n);
6998+
6999+#endif /* __powerpc64__ */
7000+
7001 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7002
7003 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7004diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7005index 645170a..6cf0271 100644
7006--- a/arch/powerpc/kernel/exceptions-64e.S
7007+++ b/arch/powerpc/kernel/exceptions-64e.S
7008@@ -757,6 +757,7 @@ storage_fault_common:
7009 std r14,_DAR(r1)
7010 std r15,_DSISR(r1)
7011 addi r3,r1,STACK_FRAME_OVERHEAD
7012+ bl .save_nvgprs
7013 mr r4,r14
7014 mr r5,r15
7015 ld r14,PACA_EXGEN+EX_R14(r13)
7016@@ -765,8 +766,7 @@ storage_fault_common:
7017 cmpdi r3,0
7018 bne- 1f
7019 b .ret_from_except_lite
7020-1: bl .save_nvgprs
7021- mr r5,r3
7022+1: mr r5,r3
7023 addi r3,r1,STACK_FRAME_OVERHEAD
7024 ld r4,_DAR(r1)
7025 bl .bad_page_fault
7026diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
7027index 902ca3c..e942155 100644
7028--- a/arch/powerpc/kernel/exceptions-64s.S
7029+++ b/arch/powerpc/kernel/exceptions-64s.S
7030@@ -1357,10 +1357,10 @@ handle_page_fault:
7031 11: ld r4,_DAR(r1)
7032 ld r5,_DSISR(r1)
7033 addi r3,r1,STACK_FRAME_OVERHEAD
7034+ bl .save_nvgprs
7035 bl .do_page_fault
7036 cmpdi r3,0
7037 beq+ 12f
7038- bl .save_nvgprs
7039 mr r5,r3
7040 addi r3,r1,STACK_FRAME_OVERHEAD
7041 lwz r4,_DAR(r1)
7042diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
7043index 2e3200c..72095ce 100644
7044--- a/arch/powerpc/kernel/module_32.c
7045+++ b/arch/powerpc/kernel/module_32.c
7046@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
7047 me->arch.core_plt_section = i;
7048 }
7049 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
7050- printk("Module doesn't contain .plt or .init.plt sections.\n");
7051+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
7052 return -ENOEXEC;
7053 }
7054
7055@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
7056
7057 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
7058 /* Init, or core PLT? */
7059- if (location >= mod->module_core
7060- && location < mod->module_core + mod->core_size)
7061+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
7062+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
7063 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
7064- else
7065+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
7066+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
7067 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
7068+ else {
7069+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
7070+ return ~0UL;
7071+ }
7072
7073 /* Find this entry, or if that fails, the next avail. entry */
7074 while (entry->jump[0]) {
7075diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
7076index 7baa27b..f6b394a 100644
7077--- a/arch/powerpc/kernel/process.c
7078+++ b/arch/powerpc/kernel/process.c
7079@@ -884,8 +884,8 @@ void show_regs(struct pt_regs * regs)
7080 * Lookup NIP late so we have the best change of getting the
7081 * above info out without failing
7082 */
7083- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
7084- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
7085+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
7086+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
7087 #endif
7088 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7089 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
7090@@ -1345,10 +1345,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7091 newsp = stack[0];
7092 ip = stack[STACK_FRAME_LR_SAVE];
7093 if (!firstframe || ip != lr) {
7094- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
7095+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
7096 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7097 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
7098- printk(" (%pS)",
7099+ printk(" (%pA)",
7100 (void *)current->ret_stack[curr_frame].ret);
7101 curr_frame--;
7102 }
7103@@ -1368,7 +1368,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7104 struct pt_regs *regs = (struct pt_regs *)
7105 (sp + STACK_FRAME_OVERHEAD);
7106 lr = regs->link;
7107- printk("--- Exception: %lx at %pS\n LR = %pS\n",
7108+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
7109 regs->trap, (void *)regs->nip, (void *)lr);
7110 firstframe = 1;
7111 }
7112@@ -1404,58 +1404,3 @@ void notrace __ppc64_runlatch_off(void)
7113 mtspr(SPRN_CTRLT, ctrl);
7114 }
7115 #endif /* CONFIG_PPC64 */
7116-
7117-unsigned long arch_align_stack(unsigned long sp)
7118-{
7119- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7120- sp -= get_random_int() & ~PAGE_MASK;
7121- return sp & ~0xf;
7122-}
7123-
7124-static inline unsigned long brk_rnd(void)
7125-{
7126- unsigned long rnd = 0;
7127-
7128- /* 8MB for 32bit, 1GB for 64bit */
7129- if (is_32bit_task())
7130- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7131- else
7132- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7133-
7134- return rnd << PAGE_SHIFT;
7135-}
7136-
7137-unsigned long arch_randomize_brk(struct mm_struct *mm)
7138-{
7139- unsigned long base = mm->brk;
7140- unsigned long ret;
7141-
7142-#ifdef CONFIG_PPC_STD_MMU_64
7143- /*
7144- * If we are using 1TB segments and we are allowed to randomise
7145- * the heap, we can put it above 1TB so it is backed by a 1TB
7146- * segment. Otherwise the heap will be in the bottom 1TB
7147- * which always uses 256MB segments and this may result in a
7148- * performance penalty.
7149- */
7150- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7151- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7152-#endif
7153-
7154- ret = PAGE_ALIGN(base + brk_rnd());
7155-
7156- if (ret < mm->brk)
7157- return mm->brk;
7158-
7159- return ret;
7160-}
7161-
7162-unsigned long randomize_et_dyn(unsigned long base)
7163-{
7164- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7165-
7166- if (ret < base)
7167- return base;
7168-
7169- return ret;
7170-}
7171diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7172index 64f7bd5..8dd550f 100644
7173--- a/arch/powerpc/kernel/ptrace.c
7174+++ b/arch/powerpc/kernel/ptrace.c
7175@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
7176 return ret;
7177 }
7178
7179+#ifdef CONFIG_GRKERNSEC_SETXID
7180+extern void gr_delayed_cred_worker(void);
7181+#endif
7182+
7183 /*
7184 * We must return the syscall number to actually look up in the table.
7185 * This can be -1L to skip running any syscall at all.
7186@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7187
7188 secure_computing_strict(regs->gpr[0]);
7189
7190+#ifdef CONFIG_GRKERNSEC_SETXID
7191+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7192+ gr_delayed_cred_worker();
7193+#endif
7194+
7195 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7196 tracehook_report_syscall_entry(regs))
7197 /*
7198@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7199 {
7200 int step;
7201
7202+#ifdef CONFIG_GRKERNSEC_SETXID
7203+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7204+ gr_delayed_cred_worker();
7205+#endif
7206+
7207 audit_syscall_exit(regs);
7208
7209 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7210diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7211index 0f83122..c0aca6a 100644
7212--- a/arch/powerpc/kernel/signal_32.c
7213+++ b/arch/powerpc/kernel/signal_32.c
7214@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7215 /* Save user registers on the stack */
7216 frame = &rt_sf->uc.uc_mcontext;
7217 addr = frame;
7218- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7219+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7220 sigret = 0;
7221 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7222 } else {
7223diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7224index 887e99d..310bc11 100644
7225--- a/arch/powerpc/kernel/signal_64.c
7226+++ b/arch/powerpc/kernel/signal_64.c
7227@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7228 #endif
7229
7230 /* Set up to return from userspace. */
7231- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7232+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7233 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7234 } else {
7235 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
7236diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
7237index e68a845..8b140e6 100644
7238--- a/arch/powerpc/kernel/sysfs.c
7239+++ b/arch/powerpc/kernel/sysfs.c
7240@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
7241 return NOTIFY_OK;
7242 }
7243
7244-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
7245+static struct notifier_block sysfs_cpu_nb = {
7246 .notifier_call = sysfs_cpu_notify,
7247 };
7248
7249diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
7250index 88929b1..bece8f8 100644
7251--- a/arch/powerpc/kernel/traps.c
7252+++ b/arch/powerpc/kernel/traps.c
7253@@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
7254 return flags;
7255 }
7256
7257+extern void gr_handle_kernel_exploit(void);
7258+
7259 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7260 int signr)
7261 {
7262@@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7263 panic("Fatal exception in interrupt");
7264 if (panic_on_oops)
7265 panic("Fatal exception");
7266+
7267+ gr_handle_kernel_exploit();
7268+
7269 do_exit(signr);
7270 }
7271
7272diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
7273index d4f463a..8fb7431 100644
7274--- a/arch/powerpc/kernel/vdso.c
7275+++ b/arch/powerpc/kernel/vdso.c
7276@@ -34,6 +34,7 @@
7277 #include <asm/firmware.h>
7278 #include <asm/vdso.h>
7279 #include <asm/vdso_datapage.h>
7280+#include <asm/mman.h>
7281
7282 #include "setup.h"
7283
7284@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7285 vdso_base = VDSO32_MBASE;
7286 #endif
7287
7288- current->mm->context.vdso_base = 0;
7289+ current->mm->context.vdso_base = ~0UL;
7290
7291 /* vDSO has a problem and was disabled, just don't "enable" it for the
7292 * process
7293@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7294 vdso_base = get_unmapped_area(NULL, vdso_base,
7295 (vdso_pages << PAGE_SHIFT) +
7296 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
7297- 0, 0);
7298+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
7299 if (IS_ERR_VALUE(vdso_base)) {
7300 rc = vdso_base;
7301 goto fail_mmapsem;
7302diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
7303index 5eea6f3..5d10396 100644
7304--- a/arch/powerpc/lib/usercopy_64.c
7305+++ b/arch/powerpc/lib/usercopy_64.c
7306@@ -9,22 +9,6 @@
7307 #include <linux/module.h>
7308 #include <asm/uaccess.h>
7309
7310-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7311-{
7312- if (likely(access_ok(VERIFY_READ, from, n)))
7313- n = __copy_from_user(to, from, n);
7314- else
7315- memset(to, 0, n);
7316- return n;
7317-}
7318-
7319-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7320-{
7321- if (likely(access_ok(VERIFY_WRITE, to, n)))
7322- n = __copy_to_user(to, from, n);
7323- return n;
7324-}
7325-
7326 unsigned long copy_in_user(void __user *to, const void __user *from,
7327 unsigned long n)
7328 {
7329@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
7330 return n;
7331 }
7332
7333-EXPORT_SYMBOL(copy_from_user);
7334-EXPORT_SYMBOL(copy_to_user);
7335 EXPORT_SYMBOL(copy_in_user);
7336
7337diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
7338index 8726779..a33c512 100644
7339--- a/arch/powerpc/mm/fault.c
7340+++ b/arch/powerpc/mm/fault.c
7341@@ -33,6 +33,10 @@
7342 #include <linux/magic.h>
7343 #include <linux/ratelimit.h>
7344 #include <linux/context_tracking.h>
7345+#include <linux/slab.h>
7346+#include <linux/pagemap.h>
7347+#include <linux/compiler.h>
7348+#include <linux/unistd.h>
7349
7350 #include <asm/firmware.h>
7351 #include <asm/page.h>
7352@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7353 }
7354 #endif
7355
7356+#ifdef CONFIG_PAX_PAGEEXEC
7357+/*
7358+ * PaX: decide what to do with offenders (regs->nip = fault address)
7359+ *
7360+ * returns 1 when task should be killed
7361+ */
7362+static int pax_handle_fetch_fault(struct pt_regs *regs)
7363+{
7364+ return 1;
7365+}
7366+
7367+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7368+{
7369+ unsigned long i;
7370+
7371+ printk(KERN_ERR "PAX: bytes at PC: ");
7372+ for (i = 0; i < 5; i++) {
7373+ unsigned int c;
7374+ if (get_user(c, (unsigned int __user *)pc+i))
7375+ printk(KERN_CONT "???????? ");
7376+ else
7377+ printk(KERN_CONT "%08x ", c);
7378+ }
7379+ printk("\n");
7380+}
7381+#endif
7382+
7383 /*
7384 * Check whether the instruction at regs->nip is a store using
7385 * an update addressing form which will update r1.
7386@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7387 * indicate errors in DSISR but can validly be set in SRR1.
7388 */
7389 if (trap == 0x400)
7390- error_code &= 0x48200000;
7391+ error_code &= 0x58200000;
7392 else
7393 is_write = error_code & DSISR_ISSTORE;
7394 #else
7395@@ -371,7 +402,7 @@ good_area:
7396 * "undefined". Of those that can be set, this is the only
7397 * one which seems bad.
7398 */
7399- if (error_code & 0x10000000)
7400+ if (error_code & DSISR_GUARDED)
7401 /* Guarded storage error. */
7402 goto bad_area;
7403 #endif /* CONFIG_8xx */
7404@@ -386,7 +417,7 @@ good_area:
7405 * processors use the same I/D cache coherency mechanism
7406 * as embedded.
7407 */
7408- if (error_code & DSISR_PROTFAULT)
7409+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7410 goto bad_area;
7411 #endif /* CONFIG_PPC_STD_MMU */
7412
7413@@ -471,6 +502,23 @@ bad_area:
7414 bad_area_nosemaphore:
7415 /* User mode accesses cause a SIGSEGV */
7416 if (user_mode(regs)) {
7417+
7418+#ifdef CONFIG_PAX_PAGEEXEC
7419+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7420+#ifdef CONFIG_PPC_STD_MMU
7421+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7422+#else
7423+ if (is_exec && regs->nip == address) {
7424+#endif
7425+ switch (pax_handle_fetch_fault(regs)) {
7426+ }
7427+
7428+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7429+ do_group_exit(SIGKILL);
7430+ }
7431+ }
7432+#endif
7433+
7434 _exception(SIGSEGV, regs, code, address);
7435 goto bail;
7436 }
7437diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7438index 67a42ed..cd463e0 100644
7439--- a/arch/powerpc/mm/mmap_64.c
7440+++ b/arch/powerpc/mm/mmap_64.c
7441@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7442 {
7443 unsigned long rnd = 0;
7444
7445+#ifdef CONFIG_PAX_RANDMMAP
7446+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7447+#endif
7448+
7449 if (current->flags & PF_RANDOMIZE) {
7450 /* 8MB for 32bit, 1GB for 64bit */
7451 if (is_32bit_task())
7452@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7453 */
7454 if (mmap_is_legacy()) {
7455 mm->mmap_base = TASK_UNMAPPED_BASE;
7456+
7457+#ifdef CONFIG_PAX_RANDMMAP
7458+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7459+ mm->mmap_base += mm->delta_mmap;
7460+#endif
7461+
7462 mm->get_unmapped_area = arch_get_unmapped_area;
7463 mm->unmap_area = arch_unmap_area;
7464 } else {
7465 mm->mmap_base = mmap_base();
7466+
7467+#ifdef CONFIG_PAX_RANDMMAP
7468+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7469+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7470+#endif
7471+
7472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7473 mm->unmap_area = arch_unmap_area_topdown;
7474 }
7475diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7476index e779642..e5bb889 100644
7477--- a/arch/powerpc/mm/mmu_context_nohash.c
7478+++ b/arch/powerpc/mm/mmu_context_nohash.c
7479@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7480 return NOTIFY_OK;
7481 }
7482
7483-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7484+static struct notifier_block mmu_context_cpu_nb = {
7485 .notifier_call = mmu_context_cpu_notify,
7486 };
7487
7488diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7489index cafad40..9cbc0fc 100644
7490--- a/arch/powerpc/mm/numa.c
7491+++ b/arch/powerpc/mm/numa.c
7492@@ -920,7 +920,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7493 return ret;
7494 }
7495
7496-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7497+static struct notifier_block ppc64_numa_nb = {
7498 .notifier_call = cpu_numa_callback,
7499 .priority = 1 /* Must run before sched domains notifier. */
7500 };
7501diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7502index 3e99c14..f00953c 100644
7503--- a/arch/powerpc/mm/slice.c
7504+++ b/arch/powerpc/mm/slice.c
7505@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7506 if ((mm->task_size - len) < addr)
7507 return 0;
7508 vma = find_vma(mm, addr);
7509- return (!vma || (addr + len) <= vma->vm_start);
7510+ return check_heap_stack_gap(vma, addr, len, 0);
7511 }
7512
7513 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7514@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
7515 info.align_offset = 0;
7516
7517 addr = TASK_UNMAPPED_BASE;
7518+
7519+#ifdef CONFIG_PAX_RANDMMAP
7520+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7521+ addr += mm->delta_mmap;
7522+#endif
7523+
7524 while (addr < TASK_SIZE) {
7525 info.low_limit = addr;
7526 if (!slice_scan_available(addr, available, 1, &addr))
7527@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7528 if (fixed && addr > (mm->task_size - len))
7529 return -EINVAL;
7530
7531+#ifdef CONFIG_PAX_RANDMMAP
7532+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7533+ addr = 0;
7534+#endif
7535+
7536 /* If hint, make sure it matches our alignment restrictions */
7537 if (!fixed && addr) {
7538 addr = _ALIGN_UP(addr, 1ul << pshift);
7539diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7540index 9098692..3d54cd1 100644
7541--- a/arch/powerpc/platforms/cell/spufs/file.c
7542+++ b/arch/powerpc/platforms/cell/spufs/file.c
7543@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7544 return VM_FAULT_NOPAGE;
7545 }
7546
7547-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7548+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7549 unsigned long address,
7550- void *buf, int len, int write)
7551+ void *buf, size_t len, int write)
7552 {
7553 struct spu_context *ctx = vma->vm_file->private_data;
7554 unsigned long offset = address - vma->vm_start;
7555diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7556index bdb738a..49c9f95 100644
7557--- a/arch/powerpc/platforms/powermac/smp.c
7558+++ b/arch/powerpc/platforms/powermac/smp.c
7559@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7560 return NOTIFY_OK;
7561 }
7562
7563-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7564+static struct notifier_block smp_core99_cpu_nb = {
7565 .notifier_call = smp_core99_cpu_notify,
7566 };
7567 #endif /* CONFIG_HOTPLUG_CPU */
7568diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7569index c797832..ce575c8 100644
7570--- a/arch/s390/include/asm/atomic.h
7571+++ b/arch/s390/include/asm/atomic.h
7572@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7573 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7574 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7575
7576+#define atomic64_read_unchecked(v) atomic64_read(v)
7577+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7578+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7579+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7580+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7581+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7582+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7583+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7584+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7585+
7586 #define smp_mb__before_atomic_dec() smp_mb()
7587 #define smp_mb__after_atomic_dec() smp_mb()
7588 #define smp_mb__before_atomic_inc() smp_mb()
7589diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7590index 4d7ccac..d03d0ad 100644
7591--- a/arch/s390/include/asm/cache.h
7592+++ b/arch/s390/include/asm/cache.h
7593@@ -9,8 +9,10 @@
7594 #ifndef __ARCH_S390_CACHE_H
7595 #define __ARCH_S390_CACHE_H
7596
7597-#define L1_CACHE_BYTES 256
7598+#include <linux/const.h>
7599+
7600 #define L1_CACHE_SHIFT 8
7601+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7602 #define NET_SKB_PAD 32
7603
7604 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7605diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7606index 78f4f87..598ce39 100644
7607--- a/arch/s390/include/asm/elf.h
7608+++ b/arch/s390/include/asm/elf.h
7609@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
7610 the loader. We need to make sure that it is out of the way of the program
7611 that it will "exec", and that there is sufficient room for the brk. */
7612
7613-extern unsigned long randomize_et_dyn(unsigned long base);
7614-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7615+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7616+
7617+#ifdef CONFIG_PAX_ASLR
7618+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7619+
7620+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7621+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7622+#endif
7623
7624 /* This yields a mask that user programs can use to figure out what
7625 instruction set this CPU supports. */
7626@@ -222,9 +228,6 @@ struct linux_binprm;
7627 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7628 int arch_setup_additional_pages(struct linux_binprm *, int);
7629
7630-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7631-#define arch_randomize_brk arch_randomize_brk
7632-
7633 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7634
7635 #endif
7636diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7637index c4a93d6..4d2a9b4 100644
7638--- a/arch/s390/include/asm/exec.h
7639+++ b/arch/s390/include/asm/exec.h
7640@@ -7,6 +7,6 @@
7641 #ifndef __ASM_EXEC_H
7642 #define __ASM_EXEC_H
7643
7644-extern unsigned long arch_align_stack(unsigned long sp);
7645+#define arch_align_stack(x) ((x) & ~0xfUL)
7646
7647 #endif /* __ASM_EXEC_H */
7648diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
7649index b75d7d6..6d6d92b 100644
7650--- a/arch/s390/include/asm/tlb.h
7651+++ b/arch/s390/include/asm/tlb.h
7652@@ -32,6 +32,7 @@ struct mmu_gather {
7653 struct mm_struct *mm;
7654 struct mmu_table_batch *batch;
7655 unsigned int fullmm;
7656+ unsigned long start, end;
7657 };
7658
7659 struct mmu_table_batch {
7660@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
7661
7662 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
7663 struct mm_struct *mm,
7664- unsigned int full_mm_flush)
7665+ unsigned long start,
7666+ unsigned long end)
7667 {
7668 tlb->mm = mm;
7669- tlb->fullmm = full_mm_flush;
7670+ tlb->start = start;
7671+ tlb->end = end;
7672+ tlb->fullmm = !(start | (end+1));
7673 tlb->batch = NULL;
7674 if (tlb->fullmm)
7675 __tlb_flush_mm(mm);
7676diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7677index 9c33ed4..e40cbef 100644
7678--- a/arch/s390/include/asm/uaccess.h
7679+++ b/arch/s390/include/asm/uaccess.h
7680@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7681 copy_to_user(void __user *to, const void *from, unsigned long n)
7682 {
7683 might_fault();
7684+
7685+ if ((long)n < 0)
7686+ return n;
7687+
7688 return __copy_to_user(to, from, n);
7689 }
7690
7691@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7692 static inline unsigned long __must_check
7693 __copy_from_user(void *to, const void __user *from, unsigned long n)
7694 {
7695+ if ((long)n < 0)
7696+ return n;
7697+
7698 if (__builtin_constant_p(n) && (n <= 256))
7699 return uaccess.copy_from_user_small(n, from, to);
7700 else
7701@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7702 static inline unsigned long __must_check
7703 copy_from_user(void *to, const void __user *from, unsigned long n)
7704 {
7705- unsigned int sz = __compiletime_object_size(to);
7706+ size_t sz = __compiletime_object_size(to);
7707
7708 might_fault();
7709- if (unlikely(sz != -1 && sz < n)) {
7710+
7711+ if ((long)n < 0)
7712+ return n;
7713+
7714+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7715 copy_from_user_overflow();
7716 return n;
7717 }
7718diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7719index 7845e15..59c4353 100644
7720--- a/arch/s390/kernel/module.c
7721+++ b/arch/s390/kernel/module.c
7722@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7723
7724 /* Increase core size by size of got & plt and set start
7725 offsets for got and plt. */
7726- me->core_size = ALIGN(me->core_size, 4);
7727- me->arch.got_offset = me->core_size;
7728- me->core_size += me->arch.got_size;
7729- me->arch.plt_offset = me->core_size;
7730- me->core_size += me->arch.plt_size;
7731+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7732+ me->arch.got_offset = me->core_size_rw;
7733+ me->core_size_rw += me->arch.got_size;
7734+ me->arch.plt_offset = me->core_size_rx;
7735+ me->core_size_rx += me->arch.plt_size;
7736 return 0;
7737 }
7738
7739@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7740 if (info->got_initialized == 0) {
7741 Elf_Addr *gotent;
7742
7743- gotent = me->module_core + me->arch.got_offset +
7744+ gotent = me->module_core_rw + me->arch.got_offset +
7745 info->got_offset;
7746 *gotent = val;
7747 info->got_initialized = 1;
7748@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7749 rc = apply_rela_bits(loc, val, 0, 64, 0);
7750 else if (r_type == R_390_GOTENT ||
7751 r_type == R_390_GOTPLTENT) {
7752- val += (Elf_Addr) me->module_core - loc;
7753+ val += (Elf_Addr) me->module_core_rw - loc;
7754 rc = apply_rela_bits(loc, val, 1, 32, 1);
7755 }
7756 break;
7757@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7758 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7759 if (info->plt_initialized == 0) {
7760 unsigned int *ip;
7761- ip = me->module_core + me->arch.plt_offset +
7762+ ip = me->module_core_rx + me->arch.plt_offset +
7763 info->plt_offset;
7764 #ifndef CONFIG_64BIT
7765 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7766@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7767 val - loc + 0xffffUL < 0x1ffffeUL) ||
7768 (r_type == R_390_PLT32DBL &&
7769 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7770- val = (Elf_Addr) me->module_core +
7771+ val = (Elf_Addr) me->module_core_rx +
7772 me->arch.plt_offset +
7773 info->plt_offset;
7774 val += rela->r_addend - loc;
7775@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7776 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7777 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7778 val = val + rela->r_addend -
7779- ((Elf_Addr) me->module_core + me->arch.got_offset);
7780+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7781 if (r_type == R_390_GOTOFF16)
7782 rc = apply_rela_bits(loc, val, 0, 16, 0);
7783 else if (r_type == R_390_GOTOFF32)
7784@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7785 break;
7786 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7787 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7788- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7789+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7790 rela->r_addend - loc;
7791 if (r_type == R_390_GOTPC)
7792 rc = apply_rela_bits(loc, val, 1, 32, 0);
7793diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7794index 2bc3edd..ab9d598 100644
7795--- a/arch/s390/kernel/process.c
7796+++ b/arch/s390/kernel/process.c
7797@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
7798 }
7799 return 0;
7800 }
7801-
7802-unsigned long arch_align_stack(unsigned long sp)
7803-{
7804- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7805- sp -= get_random_int() & ~PAGE_MASK;
7806- return sp & ~0xf;
7807-}
7808-
7809-static inline unsigned long brk_rnd(void)
7810-{
7811- /* 8MB for 32bit, 1GB for 64bit */
7812- if (is_32bit_task())
7813- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7814- else
7815- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7816-}
7817-
7818-unsigned long arch_randomize_brk(struct mm_struct *mm)
7819-{
7820- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7821-
7822- if (ret < mm->brk)
7823- return mm->brk;
7824- return ret;
7825-}
7826-
7827-unsigned long randomize_et_dyn(unsigned long base)
7828-{
7829- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7830-
7831- if (!(current->flags & PF_RANDOMIZE))
7832- return base;
7833- if (ret < base)
7834- return base;
7835- return ret;
7836-}
7837diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7838index 06bafec..2bca531 100644
7839--- a/arch/s390/mm/mmap.c
7840+++ b/arch/s390/mm/mmap.c
7841@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7842 */
7843 if (mmap_is_legacy()) {
7844 mm->mmap_base = TASK_UNMAPPED_BASE;
7845+
7846+#ifdef CONFIG_PAX_RANDMMAP
7847+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7848+ mm->mmap_base += mm->delta_mmap;
7849+#endif
7850+
7851 mm->get_unmapped_area = arch_get_unmapped_area;
7852 mm->unmap_area = arch_unmap_area;
7853 } else {
7854 mm->mmap_base = mmap_base();
7855+
7856+#ifdef CONFIG_PAX_RANDMMAP
7857+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7858+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7859+#endif
7860+
7861 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7862 mm->unmap_area = arch_unmap_area_topdown;
7863 }
7864@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7865 */
7866 if (mmap_is_legacy()) {
7867 mm->mmap_base = TASK_UNMAPPED_BASE;
7868+
7869+#ifdef CONFIG_PAX_RANDMMAP
7870+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7871+ mm->mmap_base += mm->delta_mmap;
7872+#endif
7873+
7874 mm->get_unmapped_area = s390_get_unmapped_area;
7875 mm->unmap_area = arch_unmap_area;
7876 } else {
7877 mm->mmap_base = mmap_base();
7878+
7879+#ifdef CONFIG_PAX_RANDMMAP
7880+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7881+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7882+#endif
7883+
7884 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7885 mm->unmap_area = arch_unmap_area_topdown;
7886 }
7887diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7888index ae3d59f..f65f075 100644
7889--- a/arch/score/include/asm/cache.h
7890+++ b/arch/score/include/asm/cache.h
7891@@ -1,7 +1,9 @@
7892 #ifndef _ASM_SCORE_CACHE_H
7893 #define _ASM_SCORE_CACHE_H
7894
7895+#include <linux/const.h>
7896+
7897 #define L1_CACHE_SHIFT 4
7898-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7899+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7900
7901 #endif /* _ASM_SCORE_CACHE_H */
7902diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7903index f9f3cd5..58ff438 100644
7904--- a/arch/score/include/asm/exec.h
7905+++ b/arch/score/include/asm/exec.h
7906@@ -1,6 +1,6 @@
7907 #ifndef _ASM_SCORE_EXEC_H
7908 #define _ASM_SCORE_EXEC_H
7909
7910-extern unsigned long arch_align_stack(unsigned long sp);
7911+#define arch_align_stack(x) (x)
7912
7913 #endif /* _ASM_SCORE_EXEC_H */
7914diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7915index f4c6d02..e9355c3 100644
7916--- a/arch/score/kernel/process.c
7917+++ b/arch/score/kernel/process.c
7918@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
7919
7920 return task_pt_regs(task)->cp0_epc;
7921 }
7922-
7923-unsigned long arch_align_stack(unsigned long sp)
7924-{
7925- return sp;
7926-}
7927diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7928index ef9e555..331bd29 100644
7929--- a/arch/sh/include/asm/cache.h
7930+++ b/arch/sh/include/asm/cache.h
7931@@ -9,10 +9,11 @@
7932 #define __ASM_SH_CACHE_H
7933 #ifdef __KERNEL__
7934
7935+#include <linux/const.h>
7936 #include <linux/init.h>
7937 #include <cpu/cache.h>
7938
7939-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7940+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7941
7942 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7943
7944diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
7945index e61d43d..362192e 100644
7946--- a/arch/sh/include/asm/tlb.h
7947+++ b/arch/sh/include/asm/tlb.h
7948@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
7949 }
7950
7951 static inline void
7952-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
7953+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
7954 {
7955 tlb->mm = mm;
7956- tlb->fullmm = full_mm_flush;
7957+ tlb->start = start;
7958+ tlb->end = end;
7959+ tlb->fullmm = !(start | (end+1));
7960
7961 init_tlb_gather(tlb);
7962 }
7963diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7964index 03f2b55..b0270327 100644
7965--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7966+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7967@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7968 return NOTIFY_OK;
7969 }
7970
7971-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7972+static struct notifier_block shx3_cpu_notifier = {
7973 .notifier_call = shx3_cpu_callback,
7974 };
7975
7976diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7977index 6777177..cb5e44f 100644
7978--- a/arch/sh/mm/mmap.c
7979+++ b/arch/sh/mm/mmap.c
7980@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7981 struct mm_struct *mm = current->mm;
7982 struct vm_area_struct *vma;
7983 int do_colour_align;
7984+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7985 struct vm_unmapped_area_info info;
7986
7987 if (flags & MAP_FIXED) {
7988@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7989 if (filp || (flags & MAP_SHARED))
7990 do_colour_align = 1;
7991
7992+#ifdef CONFIG_PAX_RANDMMAP
7993+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7994+#endif
7995+
7996 if (addr) {
7997 if (do_colour_align)
7998 addr = COLOUR_ALIGN(addr, pgoff);
7999@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8000 addr = PAGE_ALIGN(addr);
8001
8002 vma = find_vma(mm, addr);
8003- if (TASK_SIZE - len >= addr &&
8004- (!vma || addr + len <= vma->vm_start))
8005+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8006 return addr;
8007 }
8008
8009 info.flags = 0;
8010 info.length = len;
8011- info.low_limit = TASK_UNMAPPED_BASE;
8012+ info.low_limit = mm->mmap_base;
8013 info.high_limit = TASK_SIZE;
8014 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8015 info.align_offset = pgoff << PAGE_SHIFT;
8016@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8017 struct mm_struct *mm = current->mm;
8018 unsigned long addr = addr0;
8019 int do_colour_align;
8020+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8021 struct vm_unmapped_area_info info;
8022
8023 if (flags & MAP_FIXED) {
8024@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8025 if (filp || (flags & MAP_SHARED))
8026 do_colour_align = 1;
8027
8028+#ifdef CONFIG_PAX_RANDMMAP
8029+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8030+#endif
8031+
8032 /* requesting a specific address */
8033 if (addr) {
8034 if (do_colour_align)
8035@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8036 addr = PAGE_ALIGN(addr);
8037
8038 vma = find_vma(mm, addr);
8039- if (TASK_SIZE - len >= addr &&
8040- (!vma || addr + len <= vma->vm_start))
8041+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8042 return addr;
8043 }
8044
8045@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8046 VM_BUG_ON(addr != -ENOMEM);
8047 info.flags = 0;
8048 info.low_limit = TASK_UNMAPPED_BASE;
8049+
8050+#ifdef CONFIG_PAX_RANDMMAP
8051+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8052+ info.low_limit += mm->delta_mmap;
8053+#endif
8054+
8055 info.high_limit = TASK_SIZE;
8056 addr = vm_unmapped_area(&info);
8057 }
8058diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8059index be56a24..443328f 100644
8060--- a/arch/sparc/include/asm/atomic_64.h
8061+++ b/arch/sparc/include/asm/atomic_64.h
8062@@ -14,18 +14,40 @@
8063 #define ATOMIC64_INIT(i) { (i) }
8064
8065 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8066+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8067+{
8068+ return v->counter;
8069+}
8070 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8071+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8072+{
8073+ return v->counter;
8074+}
8075
8076 #define atomic_set(v, i) (((v)->counter) = i)
8077+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8078+{
8079+ v->counter = i;
8080+}
8081 #define atomic64_set(v, i) (((v)->counter) = i)
8082+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8083+{
8084+ v->counter = i;
8085+}
8086
8087 extern void atomic_add(int, atomic_t *);
8088+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8089 extern void atomic64_add(long, atomic64_t *);
8090+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8091 extern void atomic_sub(int, atomic_t *);
8092+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8093 extern void atomic64_sub(long, atomic64_t *);
8094+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8095
8096 extern int atomic_add_ret(int, atomic_t *);
8097+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8098 extern long atomic64_add_ret(long, atomic64_t *);
8099+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8100 extern int atomic_sub_ret(int, atomic_t *);
8101 extern long atomic64_sub_ret(long, atomic64_t *);
8102
8103@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8104 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8105
8106 #define atomic_inc_return(v) atomic_add_ret(1, v)
8107+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8108+{
8109+ return atomic_add_ret_unchecked(1, v);
8110+}
8111 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8112+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8113+{
8114+ return atomic64_add_ret_unchecked(1, v);
8115+}
8116
8117 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8118 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8119
8120 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8121+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8122+{
8123+ return atomic_add_ret_unchecked(i, v);
8124+}
8125 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8126+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8127+{
8128+ return atomic64_add_ret_unchecked(i, v);
8129+}
8130
8131 /*
8132 * atomic_inc_and_test - increment and test
8133@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8134 * other cases.
8135 */
8136 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8137+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8138+{
8139+ return atomic_inc_return_unchecked(v) == 0;
8140+}
8141 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8142
8143 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8144@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8145 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8146
8147 #define atomic_inc(v) atomic_add(1, v)
8148+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8149+{
8150+ atomic_add_unchecked(1, v);
8151+}
8152 #define atomic64_inc(v) atomic64_add(1, v)
8153+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8154+{
8155+ atomic64_add_unchecked(1, v);
8156+}
8157
8158 #define atomic_dec(v) atomic_sub(1, v)
8159+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8160+{
8161+ atomic_sub_unchecked(1, v);
8162+}
8163 #define atomic64_dec(v) atomic64_sub(1, v)
8164+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8165+{
8166+ atomic64_sub_unchecked(1, v);
8167+}
8168
8169 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8170 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8171
8172 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8173+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8174+{
8175+ return cmpxchg(&v->counter, old, new);
8176+}
8177 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8178+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8179+{
8180+ return xchg(&v->counter, new);
8181+}
8182
8183 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8184 {
8185- int c, old;
8186+ int c, old, new;
8187 c = atomic_read(v);
8188 for (;;) {
8189- if (unlikely(c == (u)))
8190+ if (unlikely(c == u))
8191 break;
8192- old = atomic_cmpxchg((v), c, c + (a));
8193+
8194+ asm volatile("addcc %2, %0, %0\n"
8195+
8196+#ifdef CONFIG_PAX_REFCOUNT
8197+ "tvs %%icc, 6\n"
8198+#endif
8199+
8200+ : "=r" (new)
8201+ : "0" (c), "ir" (a)
8202+ : "cc");
8203+
8204+ old = atomic_cmpxchg(v, c, new);
8205 if (likely(old == c))
8206 break;
8207 c = old;
8208@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8209 #define atomic64_cmpxchg(v, o, n) \
8210 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8211 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8212+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8213+{
8214+ return xchg(&v->counter, new);
8215+}
8216
8217 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8218 {
8219- long c, old;
8220+ long c, old, new;
8221 c = atomic64_read(v);
8222 for (;;) {
8223- if (unlikely(c == (u)))
8224+ if (unlikely(c == u))
8225 break;
8226- old = atomic64_cmpxchg((v), c, c + (a));
8227+
8228+ asm volatile("addcc %2, %0, %0\n"
8229+
8230+#ifdef CONFIG_PAX_REFCOUNT
8231+ "tvs %%xcc, 6\n"
8232+#endif
8233+
8234+ : "=r" (new)
8235+ : "0" (c), "ir" (a)
8236+ : "cc");
8237+
8238+ old = atomic64_cmpxchg(v, c, new);
8239 if (likely(old == c))
8240 break;
8241 c = old;
8242 }
8243- return c != (u);
8244+ return c != u;
8245 }
8246
8247 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8248diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8249index 5bb6991..5c2132e 100644
8250--- a/arch/sparc/include/asm/cache.h
8251+++ b/arch/sparc/include/asm/cache.h
8252@@ -7,10 +7,12 @@
8253 #ifndef _SPARC_CACHE_H
8254 #define _SPARC_CACHE_H
8255
8256+#include <linux/const.h>
8257+
8258 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8259
8260 #define L1_CACHE_SHIFT 5
8261-#define L1_CACHE_BYTES 32
8262+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8263
8264 #ifdef CONFIG_SPARC32
8265 #define SMP_CACHE_BYTES_SHIFT 5
8266diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8267index a24e41f..47677ff 100644
8268--- a/arch/sparc/include/asm/elf_32.h
8269+++ b/arch/sparc/include/asm/elf_32.h
8270@@ -114,6 +114,13 @@ typedef struct {
8271
8272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8273
8274+#ifdef CONFIG_PAX_ASLR
8275+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8276+
8277+#define PAX_DELTA_MMAP_LEN 16
8278+#define PAX_DELTA_STACK_LEN 16
8279+#endif
8280+
8281 /* This yields a mask that user programs can use to figure out what
8282 instruction set this cpu supports. This can NOT be done in userspace
8283 on Sparc. */
8284diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
8285index 370ca1e..d4f4a98 100644
8286--- a/arch/sparc/include/asm/elf_64.h
8287+++ b/arch/sparc/include/asm/elf_64.h
8288@@ -189,6 +189,13 @@ typedef struct {
8289 #define ELF_ET_DYN_BASE 0x0000010000000000UL
8290 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
8291
8292+#ifdef CONFIG_PAX_ASLR
8293+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
8294+
8295+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
8296+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
8297+#endif
8298+
8299 extern unsigned long sparc64_elf_hwcap;
8300 #define ELF_HWCAP sparc64_elf_hwcap
8301
8302diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
8303index 9b1c36d..209298b 100644
8304--- a/arch/sparc/include/asm/pgalloc_32.h
8305+++ b/arch/sparc/include/asm/pgalloc_32.h
8306@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
8307 }
8308
8309 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
8310+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
8311
8312 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
8313 unsigned long address)
8314diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
8315index bcfe063..b333142 100644
8316--- a/arch/sparc/include/asm/pgalloc_64.h
8317+++ b/arch/sparc/include/asm/pgalloc_64.h
8318@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8319 }
8320
8321 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
8322+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
8323
8324 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
8325 {
8326diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
8327index 6fc1348..390c50a 100644
8328--- a/arch/sparc/include/asm/pgtable_32.h
8329+++ b/arch/sparc/include/asm/pgtable_32.h
8330@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
8331 #define PAGE_SHARED SRMMU_PAGE_SHARED
8332 #define PAGE_COPY SRMMU_PAGE_COPY
8333 #define PAGE_READONLY SRMMU_PAGE_RDONLY
8334+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
8335+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
8336+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
8337 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
8338
8339 /* Top-level page directory - dummy used by init-mm.
8340@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
8341
8342 /* xwr */
8343 #define __P000 PAGE_NONE
8344-#define __P001 PAGE_READONLY
8345-#define __P010 PAGE_COPY
8346-#define __P011 PAGE_COPY
8347+#define __P001 PAGE_READONLY_NOEXEC
8348+#define __P010 PAGE_COPY_NOEXEC
8349+#define __P011 PAGE_COPY_NOEXEC
8350 #define __P100 PAGE_READONLY
8351 #define __P101 PAGE_READONLY
8352 #define __P110 PAGE_COPY
8353 #define __P111 PAGE_COPY
8354
8355 #define __S000 PAGE_NONE
8356-#define __S001 PAGE_READONLY
8357-#define __S010 PAGE_SHARED
8358-#define __S011 PAGE_SHARED
8359+#define __S001 PAGE_READONLY_NOEXEC
8360+#define __S010 PAGE_SHARED_NOEXEC
8361+#define __S011 PAGE_SHARED_NOEXEC
8362 #define __S100 PAGE_READONLY
8363 #define __S101 PAGE_READONLY
8364 #define __S110 PAGE_SHARED
8365diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
8366index 79da178..c2eede8 100644
8367--- a/arch/sparc/include/asm/pgtsrmmu.h
8368+++ b/arch/sparc/include/asm/pgtsrmmu.h
8369@@ -115,6 +115,11 @@
8370 SRMMU_EXEC | SRMMU_REF)
8371 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
8372 SRMMU_EXEC | SRMMU_REF)
8373+
8374+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
8375+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8376+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8377+
8378 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
8379 SRMMU_DIRTY | SRMMU_REF)
8380
8381diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
8382index 9689176..63c18ea 100644
8383--- a/arch/sparc/include/asm/spinlock_64.h
8384+++ b/arch/sparc/include/asm/spinlock_64.h
8385@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8386
8387 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8388
8389-static void inline arch_read_lock(arch_rwlock_t *lock)
8390+static inline void arch_read_lock(arch_rwlock_t *lock)
8391 {
8392 unsigned long tmp1, tmp2;
8393
8394 __asm__ __volatile__ (
8395 "1: ldsw [%2], %0\n"
8396 " brlz,pn %0, 2f\n"
8397-"4: add %0, 1, %1\n"
8398+"4: addcc %0, 1, %1\n"
8399+
8400+#ifdef CONFIG_PAX_REFCOUNT
8401+" tvs %%icc, 6\n"
8402+#endif
8403+
8404 " cas [%2], %0, %1\n"
8405 " cmp %0, %1\n"
8406 " bne,pn %%icc, 1b\n"
8407@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8408 " .previous"
8409 : "=&r" (tmp1), "=&r" (tmp2)
8410 : "r" (lock)
8411- : "memory");
8412+ : "memory", "cc");
8413 }
8414
8415-static int inline arch_read_trylock(arch_rwlock_t *lock)
8416+static inline int arch_read_trylock(arch_rwlock_t *lock)
8417 {
8418 int tmp1, tmp2;
8419
8420@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8421 "1: ldsw [%2], %0\n"
8422 " brlz,a,pn %0, 2f\n"
8423 " mov 0, %0\n"
8424-" add %0, 1, %1\n"
8425+" addcc %0, 1, %1\n"
8426+
8427+#ifdef CONFIG_PAX_REFCOUNT
8428+" tvs %%icc, 6\n"
8429+#endif
8430+
8431 " cas [%2], %0, %1\n"
8432 " cmp %0, %1\n"
8433 " bne,pn %%icc, 1b\n"
8434@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8435 return tmp1;
8436 }
8437
8438-static void inline arch_read_unlock(arch_rwlock_t *lock)
8439+static inline void arch_read_unlock(arch_rwlock_t *lock)
8440 {
8441 unsigned long tmp1, tmp2;
8442
8443 __asm__ __volatile__(
8444 "1: lduw [%2], %0\n"
8445-" sub %0, 1, %1\n"
8446+" subcc %0, 1, %1\n"
8447+
8448+#ifdef CONFIG_PAX_REFCOUNT
8449+" tvs %%icc, 6\n"
8450+#endif
8451+
8452 " cas [%2], %0, %1\n"
8453 " cmp %0, %1\n"
8454 " bne,pn %%xcc, 1b\n"
8455@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8456 : "memory");
8457 }
8458
8459-static void inline arch_write_lock(arch_rwlock_t *lock)
8460+static inline void arch_write_lock(arch_rwlock_t *lock)
8461 {
8462 unsigned long mask, tmp1, tmp2;
8463
8464@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8465 : "memory");
8466 }
8467
8468-static void inline arch_write_unlock(arch_rwlock_t *lock)
8469+static inline void arch_write_unlock(arch_rwlock_t *lock)
8470 {
8471 __asm__ __volatile__(
8472 " stw %%g0, [%0]"
8473@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8474 : "memory");
8475 }
8476
8477-static int inline arch_write_trylock(arch_rwlock_t *lock)
8478+static inline int arch_write_trylock(arch_rwlock_t *lock)
8479 {
8480 unsigned long mask, tmp1, tmp2, result;
8481
8482diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8483index dd38075..e7cac83 100644
8484--- a/arch/sparc/include/asm/thread_info_32.h
8485+++ b/arch/sparc/include/asm/thread_info_32.h
8486@@ -49,6 +49,8 @@ struct thread_info {
8487 unsigned long w_saved;
8488
8489 struct restart_block restart_block;
8490+
8491+ unsigned long lowest_stack;
8492 };
8493
8494 /*
8495diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8496index d5e5042..9bfee76 100644
8497--- a/arch/sparc/include/asm/thread_info_64.h
8498+++ b/arch/sparc/include/asm/thread_info_64.h
8499@@ -63,6 +63,8 @@ struct thread_info {
8500 struct pt_regs *kern_una_regs;
8501 unsigned int kern_una_insn;
8502
8503+ unsigned long lowest_stack;
8504+
8505 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8506 };
8507
8508@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8509 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8510 /* flag bit 6 is available */
8511 #define TIF_32BIT 7 /* 32-bit binary */
8512-/* flag bit 8 is available */
8513+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8514 #define TIF_SECCOMP 9 /* secure computing */
8515 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8516 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8517+
8518 /* NOTE: Thread flags >= 12 should be ones we have no interest
8519 * in using in assembly, else we can't use the mask as
8520 * an immediate value in instructions such as andcc.
8521@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8522 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8523 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8524 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8525+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8526
8527 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8528 _TIF_DO_NOTIFY_RESUME_MASK | \
8529 _TIF_NEED_RESCHED)
8530 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8531
8532+#define _TIF_WORK_SYSCALL \
8533+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8534+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8535+
8536+
8537 /*
8538 * Thread-synchronous status.
8539 *
8540diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8541index 0167d26..767bb0c 100644
8542--- a/arch/sparc/include/asm/uaccess.h
8543+++ b/arch/sparc/include/asm/uaccess.h
8544@@ -1,5 +1,6 @@
8545 #ifndef ___ASM_SPARC_UACCESS_H
8546 #define ___ASM_SPARC_UACCESS_H
8547+
8548 #if defined(__sparc__) && defined(__arch64__)
8549 #include <asm/uaccess_64.h>
8550 #else
8551diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8552index 53a28dd..50c38c3 100644
8553--- a/arch/sparc/include/asm/uaccess_32.h
8554+++ b/arch/sparc/include/asm/uaccess_32.h
8555@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8556
8557 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8558 {
8559- if (n && __access_ok((unsigned long) to, n))
8560+ if ((long)n < 0)
8561+ return n;
8562+
8563+ if (n && __access_ok((unsigned long) to, n)) {
8564+ if (!__builtin_constant_p(n))
8565+ check_object_size(from, n, true);
8566 return __copy_user(to, (__force void __user *) from, n);
8567- else
8568+ } else
8569 return n;
8570 }
8571
8572 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8573 {
8574+ if ((long)n < 0)
8575+ return n;
8576+
8577+ if (!__builtin_constant_p(n))
8578+ check_object_size(from, n, true);
8579+
8580 return __copy_user(to, (__force void __user *) from, n);
8581 }
8582
8583 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8584 {
8585- if (n && __access_ok((unsigned long) from, n))
8586+ if ((long)n < 0)
8587+ return n;
8588+
8589+ if (n && __access_ok((unsigned long) from, n)) {
8590+ if (!__builtin_constant_p(n))
8591+ check_object_size(to, n, false);
8592 return __copy_user((__force void __user *) to, from, n);
8593- else
8594+ } else
8595 return n;
8596 }
8597
8598 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8599 {
8600+ if ((long)n < 0)
8601+ return n;
8602+
8603 return __copy_user((__force void __user *) to, from, n);
8604 }
8605
8606diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8607index e562d3c..191f176 100644
8608--- a/arch/sparc/include/asm/uaccess_64.h
8609+++ b/arch/sparc/include/asm/uaccess_64.h
8610@@ -10,6 +10,7 @@
8611 #include <linux/compiler.h>
8612 #include <linux/string.h>
8613 #include <linux/thread_info.h>
8614+#include <linux/kernel.h>
8615 #include <asm/asi.h>
8616 #include <asm/spitfire.h>
8617 #include <asm-generic/uaccess-unaligned.h>
8618@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8619 static inline unsigned long __must_check
8620 copy_from_user(void *to, const void __user *from, unsigned long size)
8621 {
8622- unsigned long ret = ___copy_from_user(to, from, size);
8623+ unsigned long ret;
8624
8625+ if ((long)size < 0 || size > INT_MAX)
8626+ return size;
8627+
8628+ if (!__builtin_constant_p(size))
8629+ check_object_size(to, size, false);
8630+
8631+ ret = ___copy_from_user(to, from, size);
8632 if (unlikely(ret))
8633 ret = copy_from_user_fixup(to, from, size);
8634
8635@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8636 static inline unsigned long __must_check
8637 copy_to_user(void __user *to, const void *from, unsigned long size)
8638 {
8639- unsigned long ret = ___copy_to_user(to, from, size);
8640+ unsigned long ret;
8641
8642+ if ((long)size < 0 || size > INT_MAX)
8643+ return size;
8644+
8645+ if (!__builtin_constant_p(size))
8646+ check_object_size(from, size, true);
8647+
8648+ ret = ___copy_to_user(to, from, size);
8649 if (unlikely(ret))
8650 ret = copy_to_user_fixup(to, from, size);
8651 return ret;
8652diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8653index d432fb2..6056af1 100644
8654--- a/arch/sparc/kernel/Makefile
8655+++ b/arch/sparc/kernel/Makefile
8656@@ -3,7 +3,7 @@
8657 #
8658
8659 asflags-y := -ansi
8660-ccflags-y := -Werror
8661+#ccflags-y := -Werror
8662
8663 extra-y := head_$(BITS).o
8664
8665diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
8666index 5ef48da..11d460f 100644
8667--- a/arch/sparc/kernel/ds.c
8668+++ b/arch/sparc/kernel/ds.c
8669@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
8670 char *base, *p;
8671 int msg_len, loops;
8672
8673+ if (strlen(var) + strlen(value) + 2 >
8674+ sizeof(pkt) - sizeof(pkt.header)) {
8675+ printk(KERN_ERR PFX
8676+ "contents length: %zu, which more than max: %lu,"
8677+ "so could not set (%s) variable to (%s).\n",
8678+ strlen(var) + strlen(value) + 2,
8679+ sizeof(pkt) - sizeof(pkt.header), var, value);
8680+ return;
8681+ }
8682+
8683 memset(&pkt, 0, sizeof(pkt));
8684 pkt.header.data.tag.type = DS_DATA;
8685 pkt.header.data.handle = cp->handle;
8686diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8687index fdd819d..5af08c8 100644
8688--- a/arch/sparc/kernel/process_32.c
8689+++ b/arch/sparc/kernel/process_32.c
8690@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
8691
8692 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8693 r->psr, r->pc, r->npc, r->y, print_tainted());
8694- printk("PC: <%pS>\n", (void *) r->pc);
8695+ printk("PC: <%pA>\n", (void *) r->pc);
8696 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8697 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8698 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8699 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8700 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8701 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8702- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8703+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8704
8705 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8706 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8707@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8708 rw = (struct reg_window32 *) fp;
8709 pc = rw->ins[7];
8710 printk("[%08lx : ", pc);
8711- printk("%pS ] ", (void *) pc);
8712+ printk("%pA ] ", (void *) pc);
8713 fp = rw->ins[6];
8714 } while (++count < 16);
8715 printk("\n");
8716diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8717index baebab2..9cd13b1 100644
8718--- a/arch/sparc/kernel/process_64.c
8719+++ b/arch/sparc/kernel/process_64.c
8720@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
8721 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8722 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8723 if (regs->tstate & TSTATE_PRIV)
8724- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8725+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8726 }
8727
8728 void show_regs(struct pt_regs *regs)
8729@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
8730
8731 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8732 regs->tpc, regs->tnpc, regs->y, print_tainted());
8733- printk("TPC: <%pS>\n", (void *) regs->tpc);
8734+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8735 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8736 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8737 regs->u_regs[3]);
8738@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
8739 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8740 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8741 regs->u_regs[15]);
8742- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8743+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8744 show_regwindow(regs);
8745 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8746 }
8747@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
8748 ((tp && tp->task) ? tp->task->pid : -1));
8749
8750 if (gp->tstate & TSTATE_PRIV) {
8751- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8752+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8753 (void *) gp->tpc,
8754 (void *) gp->o7,
8755 (void *) gp->i7,
8756diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8757index 79cc0d1..ec62734 100644
8758--- a/arch/sparc/kernel/prom_common.c
8759+++ b/arch/sparc/kernel/prom_common.c
8760@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8761
8762 unsigned int prom_early_allocated __initdata;
8763
8764-static struct of_pdt_ops prom_sparc_ops __initdata = {
8765+static struct of_pdt_ops prom_sparc_ops __initconst = {
8766 .nextprop = prom_common_nextprop,
8767 .getproplen = prom_getproplen,
8768 .getproperty = prom_getproperty,
8769diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8770index 7ff45e4..a58f271 100644
8771--- a/arch/sparc/kernel/ptrace_64.c
8772+++ b/arch/sparc/kernel/ptrace_64.c
8773@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8774 return ret;
8775 }
8776
8777+#ifdef CONFIG_GRKERNSEC_SETXID
8778+extern void gr_delayed_cred_worker(void);
8779+#endif
8780+
8781 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8782 {
8783 int ret = 0;
8784@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8785 /* do the secure computing check first */
8786 secure_computing_strict(regs->u_regs[UREG_G1]);
8787
8788+#ifdef CONFIG_GRKERNSEC_SETXID
8789+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8790+ gr_delayed_cred_worker();
8791+#endif
8792+
8793 if (test_thread_flag(TIF_SYSCALL_TRACE))
8794 ret = tracehook_report_syscall_entry(regs);
8795
8796@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8797
8798 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8799 {
8800+#ifdef CONFIG_GRKERNSEC_SETXID
8801+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8802+ gr_delayed_cred_worker();
8803+#endif
8804+
8805 audit_syscall_exit(regs);
8806
8807 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8808diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8809index 3a8d184..49498a8 100644
8810--- a/arch/sparc/kernel/sys_sparc_32.c
8811+++ b/arch/sparc/kernel/sys_sparc_32.c
8812@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8813 if (len > TASK_SIZE - PAGE_SIZE)
8814 return -ENOMEM;
8815 if (!addr)
8816- addr = TASK_UNMAPPED_BASE;
8817+ addr = current->mm->mmap_base;
8818
8819 info.flags = 0;
8820 info.length = len;
8821diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8822index 2daaaa6..4fb84dc 100644
8823--- a/arch/sparc/kernel/sys_sparc_64.c
8824+++ b/arch/sparc/kernel/sys_sparc_64.c
8825@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8826 struct vm_area_struct * vma;
8827 unsigned long task_size = TASK_SIZE;
8828 int do_color_align;
8829+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8830 struct vm_unmapped_area_info info;
8831
8832 if (flags & MAP_FIXED) {
8833 /* We do not accept a shared mapping if it would violate
8834 * cache aliasing constraints.
8835 */
8836- if ((flags & MAP_SHARED) &&
8837+ if ((filp || (flags & MAP_SHARED)) &&
8838 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8839 return -EINVAL;
8840 return addr;
8841@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8842 if (filp || (flags & MAP_SHARED))
8843 do_color_align = 1;
8844
8845+#ifdef CONFIG_PAX_RANDMMAP
8846+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8847+#endif
8848+
8849 if (addr) {
8850 if (do_color_align)
8851 addr = COLOR_ALIGN(addr, pgoff);
8852@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8853 addr = PAGE_ALIGN(addr);
8854
8855 vma = find_vma(mm, addr);
8856- if (task_size - len >= addr &&
8857- (!vma || addr + len <= vma->vm_start))
8858+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8859 return addr;
8860 }
8861
8862 info.flags = 0;
8863 info.length = len;
8864- info.low_limit = TASK_UNMAPPED_BASE;
8865+ info.low_limit = mm->mmap_base;
8866 info.high_limit = min(task_size, VA_EXCLUDE_START);
8867 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8868 info.align_offset = pgoff << PAGE_SHIFT;
8869+ info.threadstack_offset = offset;
8870 addr = vm_unmapped_area(&info);
8871
8872 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8873 VM_BUG_ON(addr != -ENOMEM);
8874 info.low_limit = VA_EXCLUDE_END;
8875+
8876+#ifdef CONFIG_PAX_RANDMMAP
8877+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8878+ info.low_limit += mm->delta_mmap;
8879+#endif
8880+
8881 info.high_limit = task_size;
8882 addr = vm_unmapped_area(&info);
8883 }
8884@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8885 unsigned long task_size = STACK_TOP32;
8886 unsigned long addr = addr0;
8887 int do_color_align;
8888+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8889 struct vm_unmapped_area_info info;
8890
8891 /* This should only ever run for 32-bit processes. */
8892@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8893 /* We do not accept a shared mapping if it would violate
8894 * cache aliasing constraints.
8895 */
8896- if ((flags & MAP_SHARED) &&
8897+ if ((filp || (flags & MAP_SHARED)) &&
8898 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8899 return -EINVAL;
8900 return addr;
8901@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8902 if (filp || (flags & MAP_SHARED))
8903 do_color_align = 1;
8904
8905+#ifdef CONFIG_PAX_RANDMMAP
8906+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8907+#endif
8908+
8909 /* requesting a specific address */
8910 if (addr) {
8911 if (do_color_align)
8912@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8913 addr = PAGE_ALIGN(addr);
8914
8915 vma = find_vma(mm, addr);
8916- if (task_size - len >= addr &&
8917- (!vma || addr + len <= vma->vm_start))
8918+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8919 return addr;
8920 }
8921
8922@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8923 info.high_limit = mm->mmap_base;
8924 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8925 info.align_offset = pgoff << PAGE_SHIFT;
8926+ info.threadstack_offset = offset;
8927 addr = vm_unmapped_area(&info);
8928
8929 /*
8930@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8931 VM_BUG_ON(addr != -ENOMEM);
8932 info.flags = 0;
8933 info.low_limit = TASK_UNMAPPED_BASE;
8934+
8935+#ifdef CONFIG_PAX_RANDMMAP
8936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8937+ info.low_limit += mm->delta_mmap;
8938+#endif
8939+
8940 info.high_limit = STACK_TOP32;
8941 addr = vm_unmapped_area(&info);
8942 }
8943@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
8944 EXPORT_SYMBOL(get_fb_unmapped_area);
8945
8946 /* Essentially the same as PowerPC. */
8947-static unsigned long mmap_rnd(void)
8948+static unsigned long mmap_rnd(struct mm_struct *mm)
8949 {
8950 unsigned long rnd = 0UL;
8951
8952+#ifdef CONFIG_PAX_RANDMMAP
8953+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8954+#endif
8955+
8956 if (current->flags & PF_RANDOMIZE) {
8957 unsigned long val = get_random_int();
8958 if (test_thread_flag(TIF_32BIT))
8959@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
8960
8961 void arch_pick_mmap_layout(struct mm_struct *mm)
8962 {
8963- unsigned long random_factor = mmap_rnd();
8964+ unsigned long random_factor = mmap_rnd(mm);
8965 unsigned long gap;
8966
8967 /*
8968@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8969 gap == RLIM_INFINITY ||
8970 sysctl_legacy_va_layout) {
8971 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8972+
8973+#ifdef CONFIG_PAX_RANDMMAP
8974+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8975+ mm->mmap_base += mm->delta_mmap;
8976+#endif
8977+
8978 mm->get_unmapped_area = arch_get_unmapped_area;
8979 mm->unmap_area = arch_unmap_area;
8980 } else {
8981@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8982 gap = (task_size / 6 * 5);
8983
8984 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8985+
8986+#ifdef CONFIG_PAX_RANDMMAP
8987+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8988+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8989+#endif
8990+
8991 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8992 mm->unmap_area = arch_unmap_area_topdown;
8993 }
8994diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8995index 22a1098..6255eb9 100644
8996--- a/arch/sparc/kernel/syscalls.S
8997+++ b/arch/sparc/kernel/syscalls.S
8998@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
8999 #endif
9000 .align 32
9001 1: ldx [%g6 + TI_FLAGS], %l5
9002- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9003+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9004 be,pt %icc, rtrap
9005 nop
9006 call syscall_trace_leave
9007@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9008
9009 srl %i5, 0, %o5 ! IEU1
9010 srl %i2, 0, %o2 ! IEU0 Group
9011- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9012+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9013 bne,pn %icc, linux_syscall_trace32 ! CTI
9014 mov %i0, %l5 ! IEU1
9015 call %l7 ! CTI Group brk forced
9016@@ -207,7 +207,7 @@ linux_sparc_syscall:
9017
9018 mov %i3, %o3 ! IEU1
9019 mov %i4, %o4 ! IEU0 Group
9020- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9021+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9022 bne,pn %icc, linux_syscall_trace ! CTI Group
9023 mov %i0, %l5 ! IEU0
9024 2: call %l7 ! CTI Group brk forced
9025@@ -223,7 +223,7 @@ ret_sys_call:
9026
9027 cmp %o0, -ERESTART_RESTARTBLOCK
9028 bgeu,pn %xcc, 1f
9029- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9030+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9031 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9032
9033 2:
9034diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9035index 654e8aa..45f431b 100644
9036--- a/arch/sparc/kernel/sysfs.c
9037+++ b/arch/sparc/kernel/sysfs.c
9038@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9039 return NOTIFY_OK;
9040 }
9041
9042-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9043+static struct notifier_block sysfs_cpu_nb = {
9044 .notifier_call = sysfs_cpu_notify,
9045 };
9046
9047diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9048index 6629829..036032d 100644
9049--- a/arch/sparc/kernel/traps_32.c
9050+++ b/arch/sparc/kernel/traps_32.c
9051@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9052 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9053 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9054
9055+extern void gr_handle_kernel_exploit(void);
9056+
9057 void die_if_kernel(char *str, struct pt_regs *regs)
9058 {
9059 static int die_counter;
9060@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9061 count++ < 30 &&
9062 (((unsigned long) rw) >= PAGE_OFFSET) &&
9063 !(((unsigned long) rw) & 0x7)) {
9064- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9065+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9066 (void *) rw->ins[7]);
9067 rw = (struct reg_window32 *)rw->ins[6];
9068 }
9069 }
9070 printk("Instruction DUMP:");
9071 instruction_dump ((unsigned long *) regs->pc);
9072- if(regs->psr & PSR_PS)
9073+ if(regs->psr & PSR_PS) {
9074+ gr_handle_kernel_exploit();
9075 do_exit(SIGKILL);
9076+ }
9077 do_exit(SIGSEGV);
9078 }
9079
9080diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9081index b3f833a..ac74b2d 100644
9082--- a/arch/sparc/kernel/traps_64.c
9083+++ b/arch/sparc/kernel/traps_64.c
9084@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9085 i + 1,
9086 p->trapstack[i].tstate, p->trapstack[i].tpc,
9087 p->trapstack[i].tnpc, p->trapstack[i].tt);
9088- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9089+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9090 }
9091 }
9092
9093@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9094
9095 lvl -= 0x100;
9096 if (regs->tstate & TSTATE_PRIV) {
9097+
9098+#ifdef CONFIG_PAX_REFCOUNT
9099+ if (lvl == 6)
9100+ pax_report_refcount_overflow(regs);
9101+#endif
9102+
9103 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9104 die_if_kernel(buffer, regs);
9105 }
9106@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9107 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9108 {
9109 char buffer[32];
9110-
9111+
9112 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9113 0, lvl, SIGTRAP) == NOTIFY_STOP)
9114 return;
9115
9116+#ifdef CONFIG_PAX_REFCOUNT
9117+ if (lvl == 6)
9118+ pax_report_refcount_overflow(regs);
9119+#endif
9120+
9121 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
9122
9123 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
9124@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
9125 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
9126 printk("%s" "ERROR(%d): ",
9127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
9128- printk("TPC<%pS>\n", (void *) regs->tpc);
9129+ printk("TPC<%pA>\n", (void *) regs->tpc);
9130 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
9131 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
9132 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
9133@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9134 smp_processor_id(),
9135 (type & 0x1) ? 'I' : 'D',
9136 regs->tpc);
9137- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
9138+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
9139 panic("Irrecoverable Cheetah+ parity error.");
9140 }
9141
9142@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9143 smp_processor_id(),
9144 (type & 0x1) ? 'I' : 'D',
9145 regs->tpc);
9146- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
9147+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
9148 }
9149
9150 struct sun4v_error_entry {
9151@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
9152
9153 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
9154 regs->tpc, tl);
9155- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
9156+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
9157 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9158- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
9159+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
9160 (void *) regs->u_regs[UREG_I7]);
9161 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
9162 "pte[%lx] error[%lx]\n",
9163@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
9164
9165 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
9166 regs->tpc, tl);
9167- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
9168+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
9169 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9170- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
9171+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
9172 (void *) regs->u_regs[UREG_I7]);
9173 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
9174 "pte[%lx] error[%lx]\n",
9175@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9176 fp = (unsigned long)sf->fp + STACK_BIAS;
9177 }
9178
9179- printk(" [%016lx] %pS\n", pc, (void *) pc);
9180+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9181 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9182 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
9183 int index = tsk->curr_ret_stack;
9184 if (tsk->ret_stack && index >= graph) {
9185 pc = tsk->ret_stack[index - graph].ret;
9186- printk(" [%016lx] %pS\n", pc, (void *) pc);
9187+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9188 graph++;
9189 }
9190 }
9191@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9192 return (struct reg_window *) (fp + STACK_BIAS);
9193 }
9194
9195+extern void gr_handle_kernel_exploit(void);
9196+
9197 void die_if_kernel(char *str, struct pt_regs *regs)
9198 {
9199 static int die_counter;
9200@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9201 while (rw &&
9202 count++ < 30 &&
9203 kstack_valid(tp, (unsigned long) rw)) {
9204- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9205+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9206 (void *) rw->ins[7]);
9207
9208 rw = kernel_stack_up(rw);
9209@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9210 }
9211 user_instruction_dump ((unsigned int __user *) regs->tpc);
9212 }
9213- if (regs->tstate & TSTATE_PRIV)
9214+ if (regs->tstate & TSTATE_PRIV) {
9215+ gr_handle_kernel_exploit();
9216 do_exit(SIGKILL);
9217+ }
9218 do_exit(SIGSEGV);
9219 }
9220 EXPORT_SYMBOL(die_if_kernel);
9221diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9222index 8201c25e..072a2a7 100644
9223--- a/arch/sparc/kernel/unaligned_64.c
9224+++ b/arch/sparc/kernel/unaligned_64.c
9225@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9226 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9227
9228 if (__ratelimit(&ratelimit)) {
9229- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9230+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9231 regs->tpc, (void *) regs->tpc);
9232 }
9233 }
9234diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9235index dbe119b..089c7c1 100644
9236--- a/arch/sparc/lib/Makefile
9237+++ b/arch/sparc/lib/Makefile
9238@@ -2,7 +2,7 @@
9239 #
9240
9241 asflags-y := -ansi -DST_DIV0=0x02
9242-ccflags-y := -Werror
9243+#ccflags-y := -Werror
9244
9245 lib-$(CONFIG_SPARC32) += ashrdi3.o
9246 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9247diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9248index 85c233d..68500e0 100644
9249--- a/arch/sparc/lib/atomic_64.S
9250+++ b/arch/sparc/lib/atomic_64.S
9251@@ -17,7 +17,12 @@
9252 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9253 BACKOFF_SETUP(%o2)
9254 1: lduw [%o1], %g1
9255- add %g1, %o0, %g7
9256+ addcc %g1, %o0, %g7
9257+
9258+#ifdef CONFIG_PAX_REFCOUNT
9259+ tvs %icc, 6
9260+#endif
9261+
9262 cas [%o1], %g1, %g7
9263 cmp %g1, %g7
9264 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9265@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9266 2: BACKOFF_SPIN(%o2, %o3, 1b)
9267 ENDPROC(atomic_add)
9268
9269+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9270+ BACKOFF_SETUP(%o2)
9271+1: lduw [%o1], %g1
9272+ add %g1, %o0, %g7
9273+ cas [%o1], %g1, %g7
9274+ cmp %g1, %g7
9275+ bne,pn %icc, 2f
9276+ nop
9277+ retl
9278+ nop
9279+2: BACKOFF_SPIN(%o2, %o3, 1b)
9280+ENDPROC(atomic_add_unchecked)
9281+
9282 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9283 BACKOFF_SETUP(%o2)
9284 1: lduw [%o1], %g1
9285- sub %g1, %o0, %g7
9286+ subcc %g1, %o0, %g7
9287+
9288+#ifdef CONFIG_PAX_REFCOUNT
9289+ tvs %icc, 6
9290+#endif
9291+
9292 cas [%o1], %g1, %g7
9293 cmp %g1, %g7
9294 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9295@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9296 2: BACKOFF_SPIN(%o2, %o3, 1b)
9297 ENDPROC(atomic_sub)
9298
9299+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9300+ BACKOFF_SETUP(%o2)
9301+1: lduw [%o1], %g1
9302+ sub %g1, %o0, %g7
9303+ cas [%o1], %g1, %g7
9304+ cmp %g1, %g7
9305+ bne,pn %icc, 2f
9306+ nop
9307+ retl
9308+ nop
9309+2: BACKOFF_SPIN(%o2, %o3, 1b)
9310+ENDPROC(atomic_sub_unchecked)
9311+
9312 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9313 BACKOFF_SETUP(%o2)
9314 1: lduw [%o1], %g1
9315- add %g1, %o0, %g7
9316+ addcc %g1, %o0, %g7
9317+
9318+#ifdef CONFIG_PAX_REFCOUNT
9319+ tvs %icc, 6
9320+#endif
9321+
9322 cas [%o1], %g1, %g7
9323 cmp %g1, %g7
9324 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9325@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9326 2: BACKOFF_SPIN(%o2, %o3, 1b)
9327 ENDPROC(atomic_add_ret)
9328
9329+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9330+ BACKOFF_SETUP(%o2)
9331+1: lduw [%o1], %g1
9332+ addcc %g1, %o0, %g7
9333+ cas [%o1], %g1, %g7
9334+ cmp %g1, %g7
9335+ bne,pn %icc, 2f
9336+ add %g7, %o0, %g7
9337+ sra %g7, 0, %o0
9338+ retl
9339+ nop
9340+2: BACKOFF_SPIN(%o2, %o3, 1b)
9341+ENDPROC(atomic_add_ret_unchecked)
9342+
9343 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9344 BACKOFF_SETUP(%o2)
9345 1: lduw [%o1], %g1
9346- sub %g1, %o0, %g7
9347+ subcc %g1, %o0, %g7
9348+
9349+#ifdef CONFIG_PAX_REFCOUNT
9350+ tvs %icc, 6
9351+#endif
9352+
9353 cas [%o1], %g1, %g7
9354 cmp %g1, %g7
9355 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9356@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9357 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9358 BACKOFF_SETUP(%o2)
9359 1: ldx [%o1], %g1
9360- add %g1, %o0, %g7
9361+ addcc %g1, %o0, %g7
9362+
9363+#ifdef CONFIG_PAX_REFCOUNT
9364+ tvs %xcc, 6
9365+#endif
9366+
9367 casx [%o1], %g1, %g7
9368 cmp %g1, %g7
9369 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9370@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9371 2: BACKOFF_SPIN(%o2, %o3, 1b)
9372 ENDPROC(atomic64_add)
9373
9374+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9375+ BACKOFF_SETUP(%o2)
9376+1: ldx [%o1], %g1
9377+ addcc %g1, %o0, %g7
9378+ casx [%o1], %g1, %g7
9379+ cmp %g1, %g7
9380+ bne,pn %xcc, 2f
9381+ nop
9382+ retl
9383+ nop
9384+2: BACKOFF_SPIN(%o2, %o3, 1b)
9385+ENDPROC(atomic64_add_unchecked)
9386+
9387 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9388 BACKOFF_SETUP(%o2)
9389 1: ldx [%o1], %g1
9390- sub %g1, %o0, %g7
9391+ subcc %g1, %o0, %g7
9392+
9393+#ifdef CONFIG_PAX_REFCOUNT
9394+ tvs %xcc, 6
9395+#endif
9396+
9397 casx [%o1], %g1, %g7
9398 cmp %g1, %g7
9399 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9400@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9401 2: BACKOFF_SPIN(%o2, %o3, 1b)
9402 ENDPROC(atomic64_sub)
9403
9404+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9405+ BACKOFF_SETUP(%o2)
9406+1: ldx [%o1], %g1
9407+ subcc %g1, %o0, %g7
9408+ casx [%o1], %g1, %g7
9409+ cmp %g1, %g7
9410+ bne,pn %xcc, 2f
9411+ nop
9412+ retl
9413+ nop
9414+2: BACKOFF_SPIN(%o2, %o3, 1b)
9415+ENDPROC(atomic64_sub_unchecked)
9416+
9417 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9418 BACKOFF_SETUP(%o2)
9419 1: ldx [%o1], %g1
9420- add %g1, %o0, %g7
9421+ addcc %g1, %o0, %g7
9422+
9423+#ifdef CONFIG_PAX_REFCOUNT
9424+ tvs %xcc, 6
9425+#endif
9426+
9427 casx [%o1], %g1, %g7
9428 cmp %g1, %g7
9429 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9430@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9431 2: BACKOFF_SPIN(%o2, %o3, 1b)
9432 ENDPROC(atomic64_add_ret)
9433
9434+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9435+ BACKOFF_SETUP(%o2)
9436+1: ldx [%o1], %g1
9437+ addcc %g1, %o0, %g7
9438+ casx [%o1], %g1, %g7
9439+ cmp %g1, %g7
9440+ bne,pn %xcc, 2f
9441+ add %g7, %o0, %g7
9442+ mov %g7, %o0
9443+ retl
9444+ nop
9445+2: BACKOFF_SPIN(%o2, %o3, 1b)
9446+ENDPROC(atomic64_add_ret_unchecked)
9447+
9448 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9449 BACKOFF_SETUP(%o2)
9450 1: ldx [%o1], %g1
9451- sub %g1, %o0, %g7
9452+ subcc %g1, %o0, %g7
9453+
9454+#ifdef CONFIG_PAX_REFCOUNT
9455+ tvs %xcc, 6
9456+#endif
9457+
9458 casx [%o1], %g1, %g7
9459 cmp %g1, %g7
9460 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9461diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9462index 0c4e35e..745d3e4 100644
9463--- a/arch/sparc/lib/ksyms.c
9464+++ b/arch/sparc/lib/ksyms.c
9465@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9466
9467 /* Atomic counter implementation. */
9468 EXPORT_SYMBOL(atomic_add);
9469+EXPORT_SYMBOL(atomic_add_unchecked);
9470 EXPORT_SYMBOL(atomic_add_ret);
9471+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9472 EXPORT_SYMBOL(atomic_sub);
9473+EXPORT_SYMBOL(atomic_sub_unchecked);
9474 EXPORT_SYMBOL(atomic_sub_ret);
9475 EXPORT_SYMBOL(atomic64_add);
9476+EXPORT_SYMBOL(atomic64_add_unchecked);
9477 EXPORT_SYMBOL(atomic64_add_ret);
9478+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9479 EXPORT_SYMBOL(atomic64_sub);
9480+EXPORT_SYMBOL(atomic64_sub_unchecked);
9481 EXPORT_SYMBOL(atomic64_sub_ret);
9482 EXPORT_SYMBOL(atomic64_dec_if_positive);
9483
9484diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9485index 30c3ecc..736f015 100644
9486--- a/arch/sparc/mm/Makefile
9487+++ b/arch/sparc/mm/Makefile
9488@@ -2,7 +2,7 @@
9489 #
9490
9491 asflags-y := -ansi
9492-ccflags-y := -Werror
9493+#ccflags-y := -Werror
9494
9495 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9496 obj-y += fault_$(BITS).o
9497diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9498index e98bfda..ea8d221 100644
9499--- a/arch/sparc/mm/fault_32.c
9500+++ b/arch/sparc/mm/fault_32.c
9501@@ -21,6 +21,9 @@
9502 #include <linux/perf_event.h>
9503 #include <linux/interrupt.h>
9504 #include <linux/kdebug.h>
9505+#include <linux/slab.h>
9506+#include <linux/pagemap.h>
9507+#include <linux/compiler.h>
9508
9509 #include <asm/page.h>
9510 #include <asm/pgtable.h>
9511@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9512 return safe_compute_effective_address(regs, insn);
9513 }
9514
9515+#ifdef CONFIG_PAX_PAGEEXEC
9516+#ifdef CONFIG_PAX_DLRESOLVE
9517+static void pax_emuplt_close(struct vm_area_struct *vma)
9518+{
9519+ vma->vm_mm->call_dl_resolve = 0UL;
9520+}
9521+
9522+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9523+{
9524+ unsigned int *kaddr;
9525+
9526+ vmf->page = alloc_page(GFP_HIGHUSER);
9527+ if (!vmf->page)
9528+ return VM_FAULT_OOM;
9529+
9530+ kaddr = kmap(vmf->page);
9531+ memset(kaddr, 0, PAGE_SIZE);
9532+ kaddr[0] = 0x9DE3BFA8U; /* save */
9533+ flush_dcache_page(vmf->page);
9534+ kunmap(vmf->page);
9535+ return VM_FAULT_MAJOR;
9536+}
9537+
9538+static const struct vm_operations_struct pax_vm_ops = {
9539+ .close = pax_emuplt_close,
9540+ .fault = pax_emuplt_fault
9541+};
9542+
9543+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9544+{
9545+ int ret;
9546+
9547+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9548+ vma->vm_mm = current->mm;
9549+ vma->vm_start = addr;
9550+ vma->vm_end = addr + PAGE_SIZE;
9551+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9552+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9553+ vma->vm_ops = &pax_vm_ops;
9554+
9555+ ret = insert_vm_struct(current->mm, vma);
9556+ if (ret)
9557+ return ret;
9558+
9559+ ++current->mm->total_vm;
9560+ return 0;
9561+}
9562+#endif
9563+
9564+/*
9565+ * PaX: decide what to do with offenders (regs->pc = fault address)
9566+ *
9567+ * returns 1 when task should be killed
9568+ * 2 when patched PLT trampoline was detected
9569+ * 3 when unpatched PLT trampoline was detected
9570+ */
9571+static int pax_handle_fetch_fault(struct pt_regs *regs)
9572+{
9573+
9574+#ifdef CONFIG_PAX_EMUPLT
9575+ int err;
9576+
9577+ do { /* PaX: patched PLT emulation #1 */
9578+ unsigned int sethi1, sethi2, jmpl;
9579+
9580+ err = get_user(sethi1, (unsigned int *)regs->pc);
9581+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9582+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9583+
9584+ if (err)
9585+ break;
9586+
9587+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9588+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9589+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9590+ {
9591+ unsigned int addr;
9592+
9593+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9594+ addr = regs->u_regs[UREG_G1];
9595+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9596+ regs->pc = addr;
9597+ regs->npc = addr+4;
9598+ return 2;
9599+ }
9600+ } while (0);
9601+
9602+ do { /* PaX: patched PLT emulation #2 */
9603+ unsigned int ba;
9604+
9605+ err = get_user(ba, (unsigned int *)regs->pc);
9606+
9607+ if (err)
9608+ break;
9609+
9610+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9611+ unsigned int addr;
9612+
9613+ if ((ba & 0xFFC00000U) == 0x30800000U)
9614+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9615+ else
9616+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9617+ regs->pc = addr;
9618+ regs->npc = addr+4;
9619+ return 2;
9620+ }
9621+ } while (0);
9622+
9623+ do { /* PaX: patched PLT emulation #3 */
9624+ unsigned int sethi, bajmpl, nop;
9625+
9626+ err = get_user(sethi, (unsigned int *)regs->pc);
9627+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9628+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9629+
9630+ if (err)
9631+ break;
9632+
9633+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9634+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9635+ nop == 0x01000000U)
9636+ {
9637+ unsigned int addr;
9638+
9639+ addr = (sethi & 0x003FFFFFU) << 10;
9640+ regs->u_regs[UREG_G1] = addr;
9641+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9642+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9643+ else
9644+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9645+ regs->pc = addr;
9646+ regs->npc = addr+4;
9647+ return 2;
9648+ }
9649+ } while (0);
9650+
9651+ do { /* PaX: unpatched PLT emulation step 1 */
9652+ unsigned int sethi, ba, nop;
9653+
9654+ err = get_user(sethi, (unsigned int *)regs->pc);
9655+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9656+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9657+
9658+ if (err)
9659+ break;
9660+
9661+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9662+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9663+ nop == 0x01000000U)
9664+ {
9665+ unsigned int addr, save, call;
9666+
9667+ if ((ba & 0xFFC00000U) == 0x30800000U)
9668+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9669+ else
9670+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9671+
9672+ err = get_user(save, (unsigned int *)addr);
9673+ err |= get_user(call, (unsigned int *)(addr+4));
9674+ err |= get_user(nop, (unsigned int *)(addr+8));
9675+ if (err)
9676+ break;
9677+
9678+#ifdef CONFIG_PAX_DLRESOLVE
9679+ if (save == 0x9DE3BFA8U &&
9680+ (call & 0xC0000000U) == 0x40000000U &&
9681+ nop == 0x01000000U)
9682+ {
9683+ struct vm_area_struct *vma;
9684+ unsigned long call_dl_resolve;
9685+
9686+ down_read(&current->mm->mmap_sem);
9687+ call_dl_resolve = current->mm->call_dl_resolve;
9688+ up_read(&current->mm->mmap_sem);
9689+ if (likely(call_dl_resolve))
9690+ goto emulate;
9691+
9692+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9693+
9694+ down_write(&current->mm->mmap_sem);
9695+ if (current->mm->call_dl_resolve) {
9696+ call_dl_resolve = current->mm->call_dl_resolve;
9697+ up_write(&current->mm->mmap_sem);
9698+ if (vma)
9699+ kmem_cache_free(vm_area_cachep, vma);
9700+ goto emulate;
9701+ }
9702+
9703+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9704+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9705+ up_write(&current->mm->mmap_sem);
9706+ if (vma)
9707+ kmem_cache_free(vm_area_cachep, vma);
9708+ return 1;
9709+ }
9710+
9711+ if (pax_insert_vma(vma, call_dl_resolve)) {
9712+ up_write(&current->mm->mmap_sem);
9713+ kmem_cache_free(vm_area_cachep, vma);
9714+ return 1;
9715+ }
9716+
9717+ current->mm->call_dl_resolve = call_dl_resolve;
9718+ up_write(&current->mm->mmap_sem);
9719+
9720+emulate:
9721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9722+ regs->pc = call_dl_resolve;
9723+ regs->npc = addr+4;
9724+ return 3;
9725+ }
9726+#endif
9727+
9728+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9729+ if ((save & 0xFFC00000U) == 0x05000000U &&
9730+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9731+ nop == 0x01000000U)
9732+ {
9733+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9734+ regs->u_regs[UREG_G2] = addr + 4;
9735+ addr = (save & 0x003FFFFFU) << 10;
9736+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9737+ regs->pc = addr;
9738+ regs->npc = addr+4;
9739+ return 3;
9740+ }
9741+ }
9742+ } while (0);
9743+
9744+ do { /* PaX: unpatched PLT emulation step 2 */
9745+ unsigned int save, call, nop;
9746+
9747+ err = get_user(save, (unsigned int *)(regs->pc-4));
9748+ err |= get_user(call, (unsigned int *)regs->pc);
9749+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9750+ if (err)
9751+ break;
9752+
9753+ if (save == 0x9DE3BFA8U &&
9754+ (call & 0xC0000000U) == 0x40000000U &&
9755+ nop == 0x01000000U)
9756+ {
9757+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9758+
9759+ regs->u_regs[UREG_RETPC] = regs->pc;
9760+ regs->pc = dl_resolve;
9761+ regs->npc = dl_resolve+4;
9762+ return 3;
9763+ }
9764+ } while (0);
9765+#endif
9766+
9767+ return 1;
9768+}
9769+
9770+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9771+{
9772+ unsigned long i;
9773+
9774+ printk(KERN_ERR "PAX: bytes at PC: ");
9775+ for (i = 0; i < 8; i++) {
9776+ unsigned int c;
9777+ if (get_user(c, (unsigned int *)pc+i))
9778+ printk(KERN_CONT "???????? ");
9779+ else
9780+ printk(KERN_CONT "%08x ", c);
9781+ }
9782+ printk("\n");
9783+}
9784+#endif
9785+
9786 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9787 int text_fault)
9788 {
9789@@ -230,6 +504,24 @@ good_area:
9790 if (!(vma->vm_flags & VM_WRITE))
9791 goto bad_area;
9792 } else {
9793+
9794+#ifdef CONFIG_PAX_PAGEEXEC
9795+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9796+ up_read(&mm->mmap_sem);
9797+ switch (pax_handle_fetch_fault(regs)) {
9798+
9799+#ifdef CONFIG_PAX_EMUPLT
9800+ case 2:
9801+ case 3:
9802+ return;
9803+#endif
9804+
9805+ }
9806+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9807+ do_group_exit(SIGKILL);
9808+ }
9809+#endif
9810+
9811 /* Allow reads even for write-only mappings */
9812 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9813 goto bad_area;
9814diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9815index 5062ff3..e0b75f3 100644
9816--- a/arch/sparc/mm/fault_64.c
9817+++ b/arch/sparc/mm/fault_64.c
9818@@ -21,6 +21,9 @@
9819 #include <linux/kprobes.h>
9820 #include <linux/kdebug.h>
9821 #include <linux/percpu.h>
9822+#include <linux/slab.h>
9823+#include <linux/pagemap.h>
9824+#include <linux/compiler.h>
9825
9826 #include <asm/page.h>
9827 #include <asm/pgtable.h>
9828@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9829 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9830 regs->tpc);
9831 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9832- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9833+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9834 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9835 dump_stack();
9836 unhandled_fault(regs->tpc, current, regs);
9837@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9838 show_regs(regs);
9839 }
9840
9841+#ifdef CONFIG_PAX_PAGEEXEC
9842+#ifdef CONFIG_PAX_DLRESOLVE
9843+static void pax_emuplt_close(struct vm_area_struct *vma)
9844+{
9845+ vma->vm_mm->call_dl_resolve = 0UL;
9846+}
9847+
9848+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9849+{
9850+ unsigned int *kaddr;
9851+
9852+ vmf->page = alloc_page(GFP_HIGHUSER);
9853+ if (!vmf->page)
9854+ return VM_FAULT_OOM;
9855+
9856+ kaddr = kmap(vmf->page);
9857+ memset(kaddr, 0, PAGE_SIZE);
9858+ kaddr[0] = 0x9DE3BFA8U; /* save */
9859+ flush_dcache_page(vmf->page);
9860+ kunmap(vmf->page);
9861+ return VM_FAULT_MAJOR;
9862+}
9863+
9864+static const struct vm_operations_struct pax_vm_ops = {
9865+ .close = pax_emuplt_close,
9866+ .fault = pax_emuplt_fault
9867+};
9868+
9869+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9870+{
9871+ int ret;
9872+
9873+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9874+ vma->vm_mm = current->mm;
9875+ vma->vm_start = addr;
9876+ vma->vm_end = addr + PAGE_SIZE;
9877+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9878+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9879+ vma->vm_ops = &pax_vm_ops;
9880+
9881+ ret = insert_vm_struct(current->mm, vma);
9882+ if (ret)
9883+ return ret;
9884+
9885+ ++current->mm->total_vm;
9886+ return 0;
9887+}
9888+#endif
9889+
9890+/*
9891+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9892+ *
9893+ * returns 1 when task should be killed
9894+ * 2 when patched PLT trampoline was detected
9895+ * 3 when unpatched PLT trampoline was detected
9896+ */
9897+static int pax_handle_fetch_fault(struct pt_regs *regs)
9898+{
9899+
9900+#ifdef CONFIG_PAX_EMUPLT
9901+ int err;
9902+
9903+ do { /* PaX: patched PLT emulation #1 */
9904+ unsigned int sethi1, sethi2, jmpl;
9905+
9906+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9907+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9908+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9909+
9910+ if (err)
9911+ break;
9912+
9913+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9914+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9915+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9916+ {
9917+ unsigned long addr;
9918+
9919+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9920+ addr = regs->u_regs[UREG_G1];
9921+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9922+
9923+ if (test_thread_flag(TIF_32BIT))
9924+ addr &= 0xFFFFFFFFUL;
9925+
9926+ regs->tpc = addr;
9927+ regs->tnpc = addr+4;
9928+ return 2;
9929+ }
9930+ } while (0);
9931+
9932+ do { /* PaX: patched PLT emulation #2 */
9933+ unsigned int ba;
9934+
9935+ err = get_user(ba, (unsigned int *)regs->tpc);
9936+
9937+ if (err)
9938+ break;
9939+
9940+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9941+ unsigned long addr;
9942+
9943+ if ((ba & 0xFFC00000U) == 0x30800000U)
9944+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9945+ else
9946+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9947+
9948+ if (test_thread_flag(TIF_32BIT))
9949+ addr &= 0xFFFFFFFFUL;
9950+
9951+ regs->tpc = addr;
9952+ regs->tnpc = addr+4;
9953+ return 2;
9954+ }
9955+ } while (0);
9956+
9957+ do { /* PaX: patched PLT emulation #3 */
9958+ unsigned int sethi, bajmpl, nop;
9959+
9960+ err = get_user(sethi, (unsigned int *)regs->tpc);
9961+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9962+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9963+
9964+ if (err)
9965+ break;
9966+
9967+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9968+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9969+ nop == 0x01000000U)
9970+ {
9971+ unsigned long addr;
9972+
9973+ addr = (sethi & 0x003FFFFFU) << 10;
9974+ regs->u_regs[UREG_G1] = addr;
9975+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9976+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9977+ else
9978+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9979+
9980+ if (test_thread_flag(TIF_32BIT))
9981+ addr &= 0xFFFFFFFFUL;
9982+
9983+ regs->tpc = addr;
9984+ regs->tnpc = addr+4;
9985+ return 2;
9986+ }
9987+ } while (0);
9988+
9989+ do { /* PaX: patched PLT emulation #4 */
9990+ unsigned int sethi, mov1, call, mov2;
9991+
9992+ err = get_user(sethi, (unsigned int *)regs->tpc);
9993+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9994+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9995+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9996+
9997+ if (err)
9998+ break;
9999+
10000+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10001+ mov1 == 0x8210000FU &&
10002+ (call & 0xC0000000U) == 0x40000000U &&
10003+ mov2 == 0x9E100001U)
10004+ {
10005+ unsigned long addr;
10006+
10007+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10008+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10009+
10010+ if (test_thread_flag(TIF_32BIT))
10011+ addr &= 0xFFFFFFFFUL;
10012+
10013+ regs->tpc = addr;
10014+ regs->tnpc = addr+4;
10015+ return 2;
10016+ }
10017+ } while (0);
10018+
10019+ do { /* PaX: patched PLT emulation #5 */
10020+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10021+
10022+ err = get_user(sethi, (unsigned int *)regs->tpc);
10023+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10024+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10025+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10026+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10027+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10028+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10029+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10030+
10031+ if (err)
10032+ break;
10033+
10034+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10035+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10036+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10037+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10038+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10039+ sllx == 0x83287020U &&
10040+ jmpl == 0x81C04005U &&
10041+ nop == 0x01000000U)
10042+ {
10043+ unsigned long addr;
10044+
10045+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10046+ regs->u_regs[UREG_G1] <<= 32;
10047+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10048+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10049+ regs->tpc = addr;
10050+ regs->tnpc = addr+4;
10051+ return 2;
10052+ }
10053+ } while (0);
10054+
10055+ do { /* PaX: patched PLT emulation #6 */
10056+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10057+
10058+ err = get_user(sethi, (unsigned int *)regs->tpc);
10059+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10060+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10061+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10062+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10063+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10064+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10065+
10066+ if (err)
10067+ break;
10068+
10069+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10070+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10071+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10072+ sllx == 0x83287020U &&
10073+ (or & 0xFFFFE000U) == 0x8A116000U &&
10074+ jmpl == 0x81C04005U &&
10075+ nop == 0x01000000U)
10076+ {
10077+ unsigned long addr;
10078+
10079+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10080+ regs->u_regs[UREG_G1] <<= 32;
10081+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10082+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10083+ regs->tpc = addr;
10084+ regs->tnpc = addr+4;
10085+ return 2;
10086+ }
10087+ } while (0);
10088+
10089+ do { /* PaX: unpatched PLT emulation step 1 */
10090+ unsigned int sethi, ba, nop;
10091+
10092+ err = get_user(sethi, (unsigned int *)regs->tpc);
10093+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10094+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10095+
10096+ if (err)
10097+ break;
10098+
10099+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10100+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10101+ nop == 0x01000000U)
10102+ {
10103+ unsigned long addr;
10104+ unsigned int save, call;
10105+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10106+
10107+ if ((ba & 0xFFC00000U) == 0x30800000U)
10108+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10109+ else
10110+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10111+
10112+ if (test_thread_flag(TIF_32BIT))
10113+ addr &= 0xFFFFFFFFUL;
10114+
10115+ err = get_user(save, (unsigned int *)addr);
10116+ err |= get_user(call, (unsigned int *)(addr+4));
10117+ err |= get_user(nop, (unsigned int *)(addr+8));
10118+ if (err)
10119+ break;
10120+
10121+#ifdef CONFIG_PAX_DLRESOLVE
10122+ if (save == 0x9DE3BFA8U &&
10123+ (call & 0xC0000000U) == 0x40000000U &&
10124+ nop == 0x01000000U)
10125+ {
10126+ struct vm_area_struct *vma;
10127+ unsigned long call_dl_resolve;
10128+
10129+ down_read(&current->mm->mmap_sem);
10130+ call_dl_resolve = current->mm->call_dl_resolve;
10131+ up_read(&current->mm->mmap_sem);
10132+ if (likely(call_dl_resolve))
10133+ goto emulate;
10134+
10135+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10136+
10137+ down_write(&current->mm->mmap_sem);
10138+ if (current->mm->call_dl_resolve) {
10139+ call_dl_resolve = current->mm->call_dl_resolve;
10140+ up_write(&current->mm->mmap_sem);
10141+ if (vma)
10142+ kmem_cache_free(vm_area_cachep, vma);
10143+ goto emulate;
10144+ }
10145+
10146+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10147+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10148+ up_write(&current->mm->mmap_sem);
10149+ if (vma)
10150+ kmem_cache_free(vm_area_cachep, vma);
10151+ return 1;
10152+ }
10153+
10154+ if (pax_insert_vma(vma, call_dl_resolve)) {
10155+ up_write(&current->mm->mmap_sem);
10156+ kmem_cache_free(vm_area_cachep, vma);
10157+ return 1;
10158+ }
10159+
10160+ current->mm->call_dl_resolve = call_dl_resolve;
10161+ up_write(&current->mm->mmap_sem);
10162+
10163+emulate:
10164+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10165+ regs->tpc = call_dl_resolve;
10166+ regs->tnpc = addr+4;
10167+ return 3;
10168+ }
10169+#endif
10170+
10171+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10172+ if ((save & 0xFFC00000U) == 0x05000000U &&
10173+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10174+ nop == 0x01000000U)
10175+ {
10176+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10177+ regs->u_regs[UREG_G2] = addr + 4;
10178+ addr = (save & 0x003FFFFFU) << 10;
10179+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10180+
10181+ if (test_thread_flag(TIF_32BIT))
10182+ addr &= 0xFFFFFFFFUL;
10183+
10184+ regs->tpc = addr;
10185+ regs->tnpc = addr+4;
10186+ return 3;
10187+ }
10188+
10189+ /* PaX: 64-bit PLT stub */
10190+ err = get_user(sethi1, (unsigned int *)addr);
10191+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10192+ err |= get_user(or1, (unsigned int *)(addr+8));
10193+ err |= get_user(or2, (unsigned int *)(addr+12));
10194+ err |= get_user(sllx, (unsigned int *)(addr+16));
10195+ err |= get_user(add, (unsigned int *)(addr+20));
10196+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10197+ err |= get_user(nop, (unsigned int *)(addr+28));
10198+ if (err)
10199+ break;
10200+
10201+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10202+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10203+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10204+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10205+ sllx == 0x89293020U &&
10206+ add == 0x8A010005U &&
10207+ jmpl == 0x89C14000U &&
10208+ nop == 0x01000000U)
10209+ {
10210+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10211+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10212+ regs->u_regs[UREG_G4] <<= 32;
10213+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10214+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10215+ regs->u_regs[UREG_G4] = addr + 24;
10216+ addr = regs->u_regs[UREG_G5];
10217+ regs->tpc = addr;
10218+ regs->tnpc = addr+4;
10219+ return 3;
10220+ }
10221+ }
10222+ } while (0);
10223+
10224+#ifdef CONFIG_PAX_DLRESOLVE
10225+ do { /* PaX: unpatched PLT emulation step 2 */
10226+ unsigned int save, call, nop;
10227+
10228+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10229+ err |= get_user(call, (unsigned int *)regs->tpc);
10230+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10231+ if (err)
10232+ break;
10233+
10234+ if (save == 0x9DE3BFA8U &&
10235+ (call & 0xC0000000U) == 0x40000000U &&
10236+ nop == 0x01000000U)
10237+ {
10238+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10239+
10240+ if (test_thread_flag(TIF_32BIT))
10241+ dl_resolve &= 0xFFFFFFFFUL;
10242+
10243+ regs->u_regs[UREG_RETPC] = regs->tpc;
10244+ regs->tpc = dl_resolve;
10245+ regs->tnpc = dl_resolve+4;
10246+ return 3;
10247+ }
10248+ } while (0);
10249+#endif
10250+
10251+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10252+ unsigned int sethi, ba, nop;
10253+
10254+ err = get_user(sethi, (unsigned int *)regs->tpc);
10255+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10256+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10257+
10258+ if (err)
10259+ break;
10260+
10261+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10262+ (ba & 0xFFF00000U) == 0x30600000U &&
10263+ nop == 0x01000000U)
10264+ {
10265+ unsigned long addr;
10266+
10267+ addr = (sethi & 0x003FFFFFU) << 10;
10268+ regs->u_regs[UREG_G1] = addr;
10269+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10270+
10271+ if (test_thread_flag(TIF_32BIT))
10272+ addr &= 0xFFFFFFFFUL;
10273+
10274+ regs->tpc = addr;
10275+ regs->tnpc = addr+4;
10276+ return 2;
10277+ }
10278+ } while (0);
10279+
10280+#endif
10281+
10282+ return 1;
10283+}
10284+
10285+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10286+{
10287+ unsigned long i;
10288+
10289+ printk(KERN_ERR "PAX: bytes at PC: ");
10290+ for (i = 0; i < 8; i++) {
10291+ unsigned int c;
10292+ if (get_user(c, (unsigned int *)pc+i))
10293+ printk(KERN_CONT "???????? ");
10294+ else
10295+ printk(KERN_CONT "%08x ", c);
10296+ }
10297+ printk("\n");
10298+}
10299+#endif
10300+
10301 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
10302 {
10303 struct mm_struct *mm = current->mm;
10304@@ -341,6 +804,29 @@ retry:
10305 if (!vma)
10306 goto bad_area;
10307
10308+#ifdef CONFIG_PAX_PAGEEXEC
10309+ /* PaX: detect ITLB misses on non-exec pages */
10310+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
10311+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
10312+ {
10313+ if (address != regs->tpc)
10314+ goto good_area;
10315+
10316+ up_read(&mm->mmap_sem);
10317+ switch (pax_handle_fetch_fault(regs)) {
10318+
10319+#ifdef CONFIG_PAX_EMUPLT
10320+ case 2:
10321+ case 3:
10322+ return;
10323+#endif
10324+
10325+ }
10326+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
10327+ do_group_exit(SIGKILL);
10328+ }
10329+#endif
10330+
10331 /* Pure DTLB misses do not tell us whether the fault causing
10332 * load/store/atomic was a write or not, it only says that there
10333 * was no match. So in such a case we (carefully) read the
10334diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10335index d2b5944..d878f3c 100644
10336--- a/arch/sparc/mm/hugetlbpage.c
10337+++ b/arch/sparc/mm/hugetlbpage.c
10338@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10339 unsigned long addr,
10340 unsigned long len,
10341 unsigned long pgoff,
10342- unsigned long flags)
10343+ unsigned long flags,
10344+ unsigned long offset)
10345 {
10346 unsigned long task_size = TASK_SIZE;
10347 struct vm_unmapped_area_info info;
10348@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10349
10350 info.flags = 0;
10351 info.length = len;
10352- info.low_limit = TASK_UNMAPPED_BASE;
10353+ info.low_limit = mm->mmap_base;
10354 info.high_limit = min(task_size, VA_EXCLUDE_START);
10355 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10356 info.align_offset = 0;
10357+ info.threadstack_offset = offset;
10358 addr = vm_unmapped_area(&info);
10359
10360 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10361 VM_BUG_ON(addr != -ENOMEM);
10362 info.low_limit = VA_EXCLUDE_END;
10363+
10364+#ifdef CONFIG_PAX_RANDMMAP
10365+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10366+ info.low_limit += mm->delta_mmap;
10367+#endif
10368+
10369 info.high_limit = task_size;
10370 addr = vm_unmapped_area(&info);
10371 }
10372@@ -58,7 +66,8 @@ static unsigned long
10373 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10374 const unsigned long len,
10375 const unsigned long pgoff,
10376- const unsigned long flags)
10377+ const unsigned long flags,
10378+ const unsigned long offset)
10379 {
10380 struct mm_struct *mm = current->mm;
10381 unsigned long addr = addr0;
10382@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10383 info.high_limit = mm->mmap_base;
10384 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10385 info.align_offset = 0;
10386+ info.threadstack_offset = offset;
10387 addr = vm_unmapped_area(&info);
10388
10389 /*
10390@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10391 VM_BUG_ON(addr != -ENOMEM);
10392 info.flags = 0;
10393 info.low_limit = TASK_UNMAPPED_BASE;
10394+
10395+#ifdef CONFIG_PAX_RANDMMAP
10396+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10397+ info.low_limit += mm->delta_mmap;
10398+#endif
10399+
10400 info.high_limit = STACK_TOP32;
10401 addr = vm_unmapped_area(&info);
10402 }
10403@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10404 struct mm_struct *mm = current->mm;
10405 struct vm_area_struct *vma;
10406 unsigned long task_size = TASK_SIZE;
10407+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10408
10409 if (test_thread_flag(TIF_32BIT))
10410 task_size = STACK_TOP32;
10411@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10412 return addr;
10413 }
10414
10415+#ifdef CONFIG_PAX_RANDMMAP
10416+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10417+#endif
10418+
10419 if (addr) {
10420 addr = ALIGN(addr, HPAGE_SIZE);
10421 vma = find_vma(mm, addr);
10422- if (task_size - len >= addr &&
10423- (!vma || addr + len <= vma->vm_start))
10424+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10425 return addr;
10426 }
10427 if (mm->get_unmapped_area == arch_get_unmapped_area)
10428 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
10429- pgoff, flags);
10430+ pgoff, flags, offset);
10431 else
10432 return hugetlb_get_unmapped_area_topdown(file, addr, len,
10433- pgoff, flags);
10434+ pgoff, flags, offset);
10435 }
10436
10437 pte_t *huge_pte_alloc(struct mm_struct *mm,
10438diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10439index f4500c6..889656c 100644
10440--- a/arch/tile/include/asm/atomic_64.h
10441+++ b/arch/tile/include/asm/atomic_64.h
10442@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10443
10444 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10445
10446+#define atomic64_read_unchecked(v) atomic64_read(v)
10447+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10448+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10449+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10450+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10451+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10452+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10453+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10454+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10455+
10456 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10457 #define smp_mb__before_atomic_dec() smp_mb()
10458 #define smp_mb__after_atomic_dec() smp_mb()
10459diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10460index a9a5299..0fce79e 100644
10461--- a/arch/tile/include/asm/cache.h
10462+++ b/arch/tile/include/asm/cache.h
10463@@ -15,11 +15,12 @@
10464 #ifndef _ASM_TILE_CACHE_H
10465 #define _ASM_TILE_CACHE_H
10466
10467+#include <linux/const.h>
10468 #include <arch/chip.h>
10469
10470 /* bytes per L1 data cache line */
10471 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10472-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10473+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10474
10475 /* bytes per L2 cache line */
10476 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10477diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10478index 8a082bc..7a6bf87 100644
10479--- a/arch/tile/include/asm/uaccess.h
10480+++ b/arch/tile/include/asm/uaccess.h
10481@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10482 const void __user *from,
10483 unsigned long n)
10484 {
10485- int sz = __compiletime_object_size(to);
10486+ size_t sz = __compiletime_object_size(to);
10487
10488- if (likely(sz == -1 || sz >= n))
10489+ if (likely(sz == (size_t)-1 || sz >= n))
10490 n = _copy_from_user(to, from, n);
10491 else
10492 copy_from_user_overflow();
10493diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
10494index 650ccff..45fe2d6 100644
10495--- a/arch/tile/mm/hugetlbpage.c
10496+++ b/arch/tile/mm/hugetlbpage.c
10497@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
10498 info.high_limit = TASK_SIZE;
10499 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10500 info.align_offset = 0;
10501+ info.threadstack_offset = 0;
10502 return vm_unmapped_area(&info);
10503 }
10504
10505@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
10506 info.high_limit = current->mm->mmap_base;
10507 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10508 info.align_offset = 0;
10509+ info.threadstack_offset = 0;
10510 addr = vm_unmapped_area(&info);
10511
10512 /*
10513diff --git a/arch/um/Makefile b/arch/um/Makefile
10514index 133f7de..1d6f2f1 100644
10515--- a/arch/um/Makefile
10516+++ b/arch/um/Makefile
10517@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10518 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10519 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10520
10521+ifdef CONSTIFY_PLUGIN
10522+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10523+endif
10524+
10525 #This will adjust *FLAGS accordingly to the platform.
10526 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10527
10528diff --git a/arch/um/defconfig b/arch/um/defconfig
10529index 08107a7..ab22afe 100644
10530--- a/arch/um/defconfig
10531+++ b/arch/um/defconfig
10532@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
10533 CONFIG_X86_L1_CACHE_SHIFT=5
10534 CONFIG_X86_XADD=y
10535 CONFIG_X86_PPRO_FENCE=y
10536-CONFIG_X86_WP_WORKS_OK=y
10537 CONFIG_X86_INVLPG=y
10538 CONFIG_X86_BSWAP=y
10539 CONFIG_X86_POPAD_OK=y
10540diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10541index 19e1bdd..3665b77 100644
10542--- a/arch/um/include/asm/cache.h
10543+++ b/arch/um/include/asm/cache.h
10544@@ -1,6 +1,7 @@
10545 #ifndef __UM_CACHE_H
10546 #define __UM_CACHE_H
10547
10548+#include <linux/const.h>
10549
10550 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10551 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10552@@ -12,6 +13,6 @@
10553 # define L1_CACHE_SHIFT 5
10554 #endif
10555
10556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10558
10559 #endif
10560diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10561index 2e0a6b1..a64d0f5 100644
10562--- a/arch/um/include/asm/kmap_types.h
10563+++ b/arch/um/include/asm/kmap_types.h
10564@@ -8,6 +8,6 @@
10565
10566 /* No more #include "asm/arch/kmap_types.h" ! */
10567
10568-#define KM_TYPE_NR 14
10569+#define KM_TYPE_NR 15
10570
10571 #endif
10572diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10573index 5ff53d9..5850cdf 100644
10574--- a/arch/um/include/asm/page.h
10575+++ b/arch/um/include/asm/page.h
10576@@ -14,6 +14,9 @@
10577 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10578 #define PAGE_MASK (~(PAGE_SIZE-1))
10579
10580+#define ktla_ktva(addr) (addr)
10581+#define ktva_ktla(addr) (addr)
10582+
10583 #ifndef __ASSEMBLY__
10584
10585 struct page;
10586diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10587index 0032f92..cd151e0 100644
10588--- a/arch/um/include/asm/pgtable-3level.h
10589+++ b/arch/um/include/asm/pgtable-3level.h
10590@@ -58,6 +58,7 @@
10591 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10592 #define pud_populate(mm, pud, pmd) \
10593 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10594+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10595
10596 #ifdef CONFIG_64BIT
10597 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10598diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
10599index 4febacd..29b0301 100644
10600--- a/arch/um/include/asm/tlb.h
10601+++ b/arch/um/include/asm/tlb.h
10602@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
10603 }
10604
10605 static inline void
10606-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
10607+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
10608 {
10609 tlb->mm = mm;
10610- tlb->fullmm = full_mm_flush;
10611+ tlb->start = start;
10612+ tlb->end = end;
10613+ tlb->fullmm = !(start | (end+1));
10614
10615 init_tlb_gather(tlb);
10616 }
10617diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10618index bbcef52..6a2a483 100644
10619--- a/arch/um/kernel/process.c
10620+++ b/arch/um/kernel/process.c
10621@@ -367,22 +367,6 @@ int singlestepping(void * t)
10622 return 2;
10623 }
10624
10625-/*
10626- * Only x86 and x86_64 have an arch_align_stack().
10627- * All other arches have "#define arch_align_stack(x) (x)"
10628- * in their asm/system.h
10629- * As this is included in UML from asm-um/system-generic.h,
10630- * we can use it to behave as the subarch does.
10631- */
10632-#ifndef arch_align_stack
10633-unsigned long arch_align_stack(unsigned long sp)
10634-{
10635- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10636- sp -= get_random_int() % 8192;
10637- return sp & ~0xf;
10638-}
10639-#endif
10640-
10641 unsigned long get_wchan(struct task_struct *p)
10642 {
10643 unsigned long stack_page, sp, ip;
10644diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10645index ad8f795..2c7eec6 100644
10646--- a/arch/unicore32/include/asm/cache.h
10647+++ b/arch/unicore32/include/asm/cache.h
10648@@ -12,8 +12,10 @@
10649 #ifndef __UNICORE_CACHE_H__
10650 #define __UNICORE_CACHE_H__
10651
10652-#define L1_CACHE_SHIFT (5)
10653-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10654+#include <linux/const.h>
10655+
10656+#define L1_CACHE_SHIFT 5
10657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10658
10659 /*
10660 * Memory returned by kmalloc() may be used for DMA, so we must make
10661diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10662index fe120da..24177f7 100644
10663--- a/arch/x86/Kconfig
10664+++ b/arch/x86/Kconfig
10665@@ -239,7 +239,7 @@ config X86_HT
10666
10667 config X86_32_LAZY_GS
10668 def_bool y
10669- depends on X86_32 && !CC_STACKPROTECTOR
10670+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10671
10672 config ARCH_HWEIGHT_CFLAGS
10673 string
10674@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
10675
10676 config X86_MSR
10677 tristate "/dev/cpu/*/msr - Model-specific register support"
10678+ depends on !GRKERNSEC_KMEM
10679 ---help---
10680 This device gives privileged processes access to the x86
10681 Model-Specific Registers (MSRs). It is a character device with
10682@@ -1096,7 +1097,7 @@ choice
10683
10684 config NOHIGHMEM
10685 bool "off"
10686- depends on !X86_NUMAQ
10687+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10688 ---help---
10689 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10690 However, the address space of 32-bit x86 processors is only 4
10691@@ -1133,7 +1134,7 @@ config NOHIGHMEM
10692
10693 config HIGHMEM4G
10694 bool "4GB"
10695- depends on !X86_NUMAQ
10696+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10697 ---help---
10698 Select this if you have a 32-bit processor and between 1 and 4
10699 gigabytes of physical RAM.
10700@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
10701 hex
10702 default 0xB0000000 if VMSPLIT_3G_OPT
10703 default 0x80000000 if VMSPLIT_2G
10704- default 0x78000000 if VMSPLIT_2G_OPT
10705+ default 0x70000000 if VMSPLIT_2G_OPT
10706 default 0x40000000 if VMSPLIT_1G
10707 default 0xC0000000
10708 depends on X86_32
10709@@ -1584,6 +1585,7 @@ config SECCOMP
10710
10711 config CC_STACKPROTECTOR
10712 bool "Enable -fstack-protector buffer overflow detection"
10713+ depends on X86_64 || !PAX_MEMORY_UDEREF
10714 ---help---
10715 This option turns on the -fstack-protector GCC feature. This
10716 feature puts, at the beginning of functions, a canary value on
10717@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
10718 config PHYSICAL_ALIGN
10719 hex "Alignment value to which kernel should be aligned" if X86_32
10720 default "0x1000000"
10721+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
10722+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
10723 range 0x2000 0x1000000
10724 ---help---
10725 This value puts the alignment restrictions on physical address
10726@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
10727 If unsure, say N.
10728
10729 config COMPAT_VDSO
10730- def_bool y
10731+ def_bool n
10732 prompt "Compat VDSO support"
10733 depends on X86_32 || IA32_EMULATION
10734+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10735 ---help---
10736 Map the 32-bit VDSO to the predictable old-style address too.
10737
10738diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10739index c026cca..14657ae 100644
10740--- a/arch/x86/Kconfig.cpu
10741+++ b/arch/x86/Kconfig.cpu
10742@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10743
10744 config X86_F00F_BUG
10745 def_bool y
10746- depends on M586MMX || M586TSC || M586 || M486
10747+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10748
10749 config X86_INVD_BUG
10750 def_bool y
10751@@ -327,7 +327,7 @@ config X86_INVD_BUG
10752
10753 config X86_ALIGNMENT_16
10754 def_bool y
10755- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10756+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10757
10758 config X86_INTEL_USERCOPY
10759 def_bool y
10760@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10761 # generates cmov.
10762 config X86_CMOV
10763 def_bool y
10764- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10765+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10766
10767 config X86_MINIMUM_CPU_FAMILY
10768 int
10769diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10770index c198b7e..63eea60 100644
10771--- a/arch/x86/Kconfig.debug
10772+++ b/arch/x86/Kconfig.debug
10773@@ -84,7 +84,7 @@ config X86_PTDUMP
10774 config DEBUG_RODATA
10775 bool "Write protect kernel read-only data structures"
10776 default y
10777- depends on DEBUG_KERNEL
10778+ depends on DEBUG_KERNEL && BROKEN
10779 ---help---
10780 Mark the kernel read-only data as write-protected in the pagetables,
10781 in order to catch accidental (and incorrect) writes to such const
10782@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10783
10784 config DEBUG_SET_MODULE_RONX
10785 bool "Set loadable kernel module data as NX and text as RO"
10786- depends on MODULES
10787+ depends on MODULES && BROKEN
10788 ---help---
10789 This option helps catch unintended modifications to loadable
10790 kernel module's text and read-only data. It also prevents execution
10791diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10792index 5c47726..8c4fa67 100644
10793--- a/arch/x86/Makefile
10794+++ b/arch/x86/Makefile
10795@@ -54,6 +54,7 @@ else
10796 UTS_MACHINE := x86_64
10797 CHECKFLAGS += -D__x86_64__ -m64
10798
10799+ biarch := $(call cc-option,-m64)
10800 KBUILD_AFLAGS += -m64
10801 KBUILD_CFLAGS += -m64
10802
10803@@ -234,3 +235,12 @@ define archhelp
10804 echo ' FDARGS="..." arguments for the booted kernel'
10805 echo ' FDINITRD=file initrd for the booted kernel'
10806 endef
10807+
10808+define OLD_LD
10809+
10810+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10811+*** Please upgrade your binutils to 2.18 or newer
10812+endef
10813+
10814+archprepare:
10815+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10816diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10817index 379814b..add62ce 100644
10818--- a/arch/x86/boot/Makefile
10819+++ b/arch/x86/boot/Makefile
10820@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10821 $(call cc-option, -fno-stack-protector) \
10822 $(call cc-option, -mpreferred-stack-boundary=2)
10823 KBUILD_CFLAGS += $(call cc-option, -m32)
10824+ifdef CONSTIFY_PLUGIN
10825+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10826+endif
10827 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10828 GCOV_PROFILE := n
10829
10830diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10831index 878e4b9..20537ab 100644
10832--- a/arch/x86/boot/bitops.h
10833+++ b/arch/x86/boot/bitops.h
10834@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10835 u8 v;
10836 const u32 *p = (const u32 *)addr;
10837
10838- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10839+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10840 return v;
10841 }
10842
10843@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10844
10845 static inline void set_bit(int nr, void *addr)
10846 {
10847- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10848+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10849 }
10850
10851 #endif /* BOOT_BITOPS_H */
10852diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10853index 5b75319..331a4ca 100644
10854--- a/arch/x86/boot/boot.h
10855+++ b/arch/x86/boot/boot.h
10856@@ -85,7 +85,7 @@ static inline void io_delay(void)
10857 static inline u16 ds(void)
10858 {
10859 u16 seg;
10860- asm("movw %%ds,%0" : "=rm" (seg));
10861+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10862 return seg;
10863 }
10864
10865@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10866 static inline int memcmp(const void *s1, const void *s2, size_t len)
10867 {
10868 u8 diff;
10869- asm("repe; cmpsb; setnz %0"
10870+ asm volatile("repe; cmpsb; setnz %0"
10871 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10872 return diff;
10873 }
10874diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10875index 5ef205c..342191d 100644
10876--- a/arch/x86/boot/compressed/Makefile
10877+++ b/arch/x86/boot/compressed/Makefile
10878@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10879 KBUILD_CFLAGS += $(cflags-y)
10880 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10881 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10882+ifdef CONSTIFY_PLUGIN
10883+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10884+endif
10885
10886 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10887 GCOV_PROFILE := n
10888diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10889index d606463..b887794 100644
10890--- a/arch/x86/boot/compressed/eboot.c
10891+++ b/arch/x86/boot/compressed/eboot.c
10892@@ -150,7 +150,6 @@ again:
10893 *addr = max_addr;
10894 }
10895
10896-free_pool:
10897 efi_call_phys1(sys_table->boottime->free_pool, map);
10898
10899 fail:
10900@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10901 if (i == map_size / desc_size)
10902 status = EFI_NOT_FOUND;
10903
10904-free_pool:
10905 efi_call_phys1(sys_table->boottime->free_pool, map);
10906 fail:
10907 return status;
10908diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
10909index a53440e..c3dbf1e 100644
10910--- a/arch/x86/boot/compressed/efi_stub_32.S
10911+++ b/arch/x86/boot/compressed/efi_stub_32.S
10912@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
10913 * parameter 2, ..., param n. To make things easy, we save the return
10914 * address of efi_call_phys in a global variable.
10915 */
10916- popl %ecx
10917- movl %ecx, saved_return_addr(%edx)
10918- /* get the function pointer into ECX*/
10919- popl %ecx
10920- movl %ecx, efi_rt_function_ptr(%edx)
10921+ popl saved_return_addr(%edx)
10922+ popl efi_rt_function_ptr(%edx)
10923
10924 /*
10925 * 3. Call the physical function.
10926 */
10927- call *%ecx
10928+ call *efi_rt_function_ptr(%edx)
10929
10930 /*
10931 * 4. Balance the stack. And because EAX contain the return value,
10932@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
10933 1: popl %edx
10934 subl $1b, %edx
10935
10936- movl efi_rt_function_ptr(%edx), %ecx
10937- pushl %ecx
10938+ pushl efi_rt_function_ptr(%edx)
10939
10940 /*
10941 * 10. Push the saved return address onto the stack and return.
10942 */
10943- movl saved_return_addr(%edx), %ecx
10944- pushl %ecx
10945- ret
10946+ jmpl *saved_return_addr(%edx)
10947 ENDPROC(efi_call_phys)
10948 .previous
10949
10950diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10951index 1e3184f..0d11e2e 100644
10952--- a/arch/x86/boot/compressed/head_32.S
10953+++ b/arch/x86/boot/compressed/head_32.S
10954@@ -118,7 +118,7 @@ preferred_addr:
10955 notl %eax
10956 andl %eax, %ebx
10957 #else
10958- movl $LOAD_PHYSICAL_ADDR, %ebx
10959+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10960 #endif
10961
10962 /* Target address to relocate to for decompression */
10963@@ -204,7 +204,7 @@ relocated:
10964 * and where it was actually loaded.
10965 */
10966 movl %ebp, %ebx
10967- subl $LOAD_PHYSICAL_ADDR, %ebx
10968+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10969 jz 2f /* Nothing to be done if loaded at compiled addr. */
10970 /*
10971 * Process relocations.
10972@@ -212,8 +212,7 @@ relocated:
10973
10974 1: subl $4, %edi
10975 movl (%edi), %ecx
10976- testl %ecx, %ecx
10977- jz 2f
10978+ jecxz 2f
10979 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10980 jmp 1b
10981 2:
10982diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10983index 16f24e6..47491a3 100644
10984--- a/arch/x86/boot/compressed/head_64.S
10985+++ b/arch/x86/boot/compressed/head_64.S
10986@@ -97,7 +97,7 @@ ENTRY(startup_32)
10987 notl %eax
10988 andl %eax, %ebx
10989 #else
10990- movl $LOAD_PHYSICAL_ADDR, %ebx
10991+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10992 #endif
10993
10994 /* Target address to relocate to for decompression */
10995@@ -272,7 +272,7 @@ preferred_addr:
10996 notq %rax
10997 andq %rax, %rbp
10998 #else
10999- movq $LOAD_PHYSICAL_ADDR, %rbp
11000+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11001 #endif
11002
11003 /* Target address to relocate to for decompression */
11004@@ -363,8 +363,8 @@ gdt:
11005 .long gdt
11006 .word 0
11007 .quad 0x0000000000000000 /* NULL descriptor */
11008- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11009- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11010+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11011+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11012 .quad 0x0080890000000000 /* TS descriptor */
11013 .quad 0x0000000000000000 /* TS continued */
11014 gdt_end:
11015diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11016index 7cb56c6..d382d84 100644
11017--- a/arch/x86/boot/compressed/misc.c
11018+++ b/arch/x86/boot/compressed/misc.c
11019@@ -303,7 +303,7 @@ static void parse_elf(void *output)
11020 case PT_LOAD:
11021 #ifdef CONFIG_RELOCATABLE
11022 dest = output;
11023- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11024+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11025 #else
11026 dest = (void *)(phdr->p_paddr);
11027 #endif
11028@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
11029 error("Destination address too large");
11030 #endif
11031 #ifndef CONFIG_RELOCATABLE
11032- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
11033+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
11034 error("Wrong destination address");
11035 #endif
11036
11037diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
11038index 4d3ff03..e4972ff 100644
11039--- a/arch/x86/boot/cpucheck.c
11040+++ b/arch/x86/boot/cpucheck.c
11041@@ -74,7 +74,7 @@ static int has_fpu(void)
11042 u16 fcw = -1, fsw = -1;
11043 u32 cr0;
11044
11045- asm("movl %%cr0,%0" : "=r" (cr0));
11046+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
11047 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
11048 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
11049 asm volatile("movl %0,%%cr0" : : "r" (cr0));
11050@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
11051 {
11052 u32 f0, f1;
11053
11054- asm("pushfl ; "
11055+ asm volatile("pushfl ; "
11056 "pushfl ; "
11057 "popl %0 ; "
11058 "movl %0,%1 ; "
11059@@ -115,7 +115,7 @@ static void get_flags(void)
11060 set_bit(X86_FEATURE_FPU, cpu.flags);
11061
11062 if (has_eflag(X86_EFLAGS_ID)) {
11063- asm("cpuid"
11064+ asm volatile("cpuid"
11065 : "=a" (max_intel_level),
11066 "=b" (cpu_vendor[0]),
11067 "=d" (cpu_vendor[1]),
11068@@ -124,7 +124,7 @@ static void get_flags(void)
11069
11070 if (max_intel_level >= 0x00000001 &&
11071 max_intel_level <= 0x0000ffff) {
11072- asm("cpuid"
11073+ asm volatile("cpuid"
11074 : "=a" (tfms),
11075 "=c" (cpu.flags[4]),
11076 "=d" (cpu.flags[0])
11077@@ -136,7 +136,7 @@ static void get_flags(void)
11078 cpu.model += ((tfms >> 16) & 0xf) << 4;
11079 }
11080
11081- asm("cpuid"
11082+ asm volatile("cpuid"
11083 : "=a" (max_amd_level)
11084 : "a" (0x80000000)
11085 : "ebx", "ecx", "edx");
11086@@ -144,7 +144,7 @@ static void get_flags(void)
11087 if (max_amd_level >= 0x80000001 &&
11088 max_amd_level <= 0x8000ffff) {
11089 u32 eax = 0x80000001;
11090- asm("cpuid"
11091+ asm volatile("cpuid"
11092 : "+a" (eax),
11093 "=c" (cpu.flags[6]),
11094 "=d" (cpu.flags[1])
11095@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11096 u32 ecx = MSR_K7_HWCR;
11097 u32 eax, edx;
11098
11099- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11100+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11101 eax &= ~(1 << 15);
11102- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11103+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11104
11105 get_flags(); /* Make sure it really did something */
11106 err = check_flags();
11107@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11108 u32 ecx = MSR_VIA_FCR;
11109 u32 eax, edx;
11110
11111- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11112+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11113 eax |= (1<<1)|(1<<7);
11114- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11115+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11116
11117 set_bit(X86_FEATURE_CX8, cpu.flags);
11118 err = check_flags();
11119@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11120 u32 eax, edx;
11121 u32 level = 1;
11122
11123- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11124- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11125- asm("cpuid"
11126+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11127+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11128+ asm volatile("cpuid"
11129 : "+a" (level), "=d" (cpu.flags[0])
11130 : : "ecx", "ebx");
11131- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11132+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11133
11134 err = check_flags();
11135 }
11136diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
11137index 9ec06a1..2c25e79 100644
11138--- a/arch/x86/boot/header.S
11139+++ b/arch/x86/boot/header.S
11140@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
11141 # single linked list of
11142 # struct setup_data
11143
11144-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
11145+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
11146
11147 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
11148+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11149+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
11150+#else
11151 #define VO_INIT_SIZE (VO__end - VO__text)
11152+#endif
11153 #if ZO_INIT_SIZE > VO_INIT_SIZE
11154 #define INIT_SIZE ZO_INIT_SIZE
11155 #else
11156diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
11157index db75d07..8e6d0af 100644
11158--- a/arch/x86/boot/memory.c
11159+++ b/arch/x86/boot/memory.c
11160@@ -19,7 +19,7 @@
11161
11162 static int detect_memory_e820(void)
11163 {
11164- int count = 0;
11165+ unsigned int count = 0;
11166 struct biosregs ireg, oreg;
11167 struct e820entry *desc = boot_params.e820_map;
11168 static struct e820entry buf; /* static so it is zeroed */
11169diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
11170index 11e8c6e..fdbb1ed 100644
11171--- a/arch/x86/boot/video-vesa.c
11172+++ b/arch/x86/boot/video-vesa.c
11173@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
11174
11175 boot_params.screen_info.vesapm_seg = oreg.es;
11176 boot_params.screen_info.vesapm_off = oreg.di;
11177+ boot_params.screen_info.vesapm_size = oreg.cx;
11178 }
11179
11180 /*
11181diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
11182index 43eda28..5ab5fdb 100644
11183--- a/arch/x86/boot/video.c
11184+++ b/arch/x86/boot/video.c
11185@@ -96,7 +96,7 @@ static void store_mode_params(void)
11186 static unsigned int get_entry(void)
11187 {
11188 char entry_buf[4];
11189- int i, len = 0;
11190+ unsigned int i, len = 0;
11191 int key;
11192 unsigned int v;
11193
11194diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
11195index 9105655..5e37f27 100644
11196--- a/arch/x86/crypto/aes-x86_64-asm_64.S
11197+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
11198@@ -8,6 +8,8 @@
11199 * including this sentence is retained in full.
11200 */
11201
11202+#include <asm/alternative-asm.h>
11203+
11204 .extern crypto_ft_tab
11205 .extern crypto_it_tab
11206 .extern crypto_fl_tab
11207@@ -70,6 +72,8 @@
11208 je B192; \
11209 leaq 32(r9),r9;
11210
11211+#define ret pax_force_retaddr 0, 1; ret
11212+
11213 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11214 movq r1,r2; \
11215 movq r3,r4; \
11216diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11217index 477e9d7..3ab339f 100644
11218--- a/arch/x86/crypto/aesni-intel_asm.S
11219+++ b/arch/x86/crypto/aesni-intel_asm.S
11220@@ -31,6 +31,7 @@
11221
11222 #include <linux/linkage.h>
11223 #include <asm/inst.h>
11224+#include <asm/alternative-asm.h>
11225
11226 #ifdef __x86_64__
11227 .data
11228@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
11229 pop %r14
11230 pop %r13
11231 pop %r12
11232+ pax_force_retaddr 0, 1
11233 ret
11234 ENDPROC(aesni_gcm_dec)
11235
11236@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
11237 pop %r14
11238 pop %r13
11239 pop %r12
11240+ pax_force_retaddr 0, 1
11241 ret
11242 ENDPROC(aesni_gcm_enc)
11243
11244@@ -1722,6 +1725,7 @@ _key_expansion_256a:
11245 pxor %xmm1, %xmm0
11246 movaps %xmm0, (TKEYP)
11247 add $0x10, TKEYP
11248+ pax_force_retaddr_bts
11249 ret
11250 ENDPROC(_key_expansion_128)
11251 ENDPROC(_key_expansion_256a)
11252@@ -1748,6 +1752,7 @@ _key_expansion_192a:
11253 shufps $0b01001110, %xmm2, %xmm1
11254 movaps %xmm1, 0x10(TKEYP)
11255 add $0x20, TKEYP
11256+ pax_force_retaddr_bts
11257 ret
11258 ENDPROC(_key_expansion_192a)
11259
11260@@ -1768,6 +1773,7 @@ _key_expansion_192b:
11261
11262 movaps %xmm0, (TKEYP)
11263 add $0x10, TKEYP
11264+ pax_force_retaddr_bts
11265 ret
11266 ENDPROC(_key_expansion_192b)
11267
11268@@ -1781,6 +1787,7 @@ _key_expansion_256b:
11269 pxor %xmm1, %xmm2
11270 movaps %xmm2, (TKEYP)
11271 add $0x10, TKEYP
11272+ pax_force_retaddr_bts
11273 ret
11274 ENDPROC(_key_expansion_256b)
11275
11276@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
11277 #ifndef __x86_64__
11278 popl KEYP
11279 #endif
11280+ pax_force_retaddr 0, 1
11281 ret
11282 ENDPROC(aesni_set_key)
11283
11284@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
11285 popl KLEN
11286 popl KEYP
11287 #endif
11288+ pax_force_retaddr 0, 1
11289 ret
11290 ENDPROC(aesni_enc)
11291
11292@@ -1974,6 +1983,7 @@ _aesni_enc1:
11293 AESENC KEY STATE
11294 movaps 0x70(TKEYP), KEY
11295 AESENCLAST KEY STATE
11296+ pax_force_retaddr_bts
11297 ret
11298 ENDPROC(_aesni_enc1)
11299
11300@@ -2083,6 +2093,7 @@ _aesni_enc4:
11301 AESENCLAST KEY STATE2
11302 AESENCLAST KEY STATE3
11303 AESENCLAST KEY STATE4
11304+ pax_force_retaddr_bts
11305 ret
11306 ENDPROC(_aesni_enc4)
11307
11308@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
11309 popl KLEN
11310 popl KEYP
11311 #endif
11312+ pax_force_retaddr 0, 1
11313 ret
11314 ENDPROC(aesni_dec)
11315
11316@@ -2164,6 +2176,7 @@ _aesni_dec1:
11317 AESDEC KEY STATE
11318 movaps 0x70(TKEYP), KEY
11319 AESDECLAST KEY STATE
11320+ pax_force_retaddr_bts
11321 ret
11322 ENDPROC(_aesni_dec1)
11323
11324@@ -2273,6 +2286,7 @@ _aesni_dec4:
11325 AESDECLAST KEY STATE2
11326 AESDECLAST KEY STATE3
11327 AESDECLAST KEY STATE4
11328+ pax_force_retaddr_bts
11329 ret
11330 ENDPROC(_aesni_dec4)
11331
11332@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
11333 popl KEYP
11334 popl LEN
11335 #endif
11336+ pax_force_retaddr 0, 1
11337 ret
11338 ENDPROC(aesni_ecb_enc)
11339
11340@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
11341 popl KEYP
11342 popl LEN
11343 #endif
11344+ pax_force_retaddr 0, 1
11345 ret
11346 ENDPROC(aesni_ecb_dec)
11347
11348@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
11349 popl LEN
11350 popl IVP
11351 #endif
11352+ pax_force_retaddr 0, 1
11353 ret
11354 ENDPROC(aesni_cbc_enc)
11355
11356@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
11357 popl LEN
11358 popl IVP
11359 #endif
11360+ pax_force_retaddr 0, 1
11361 ret
11362 ENDPROC(aesni_cbc_dec)
11363
11364@@ -2550,6 +2568,7 @@ _aesni_inc_init:
11365 mov $1, TCTR_LOW
11366 MOVQ_R64_XMM TCTR_LOW INC
11367 MOVQ_R64_XMM CTR TCTR_LOW
11368+ pax_force_retaddr_bts
11369 ret
11370 ENDPROC(_aesni_inc_init)
11371
11372@@ -2579,6 +2598,7 @@ _aesni_inc:
11373 .Linc_low:
11374 movaps CTR, IV
11375 PSHUFB_XMM BSWAP_MASK IV
11376+ pax_force_retaddr_bts
11377 ret
11378 ENDPROC(_aesni_inc)
11379
11380@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
11381 .Lctr_enc_ret:
11382 movups IV, (IVP)
11383 .Lctr_enc_just_ret:
11384+ pax_force_retaddr 0, 1
11385 ret
11386 ENDPROC(aesni_ctr_enc)
11387
11388@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
11389 pxor INC, STATE4
11390 movdqu STATE4, 0x70(OUTP)
11391
11392+ pax_force_retaddr 0, 1
11393 ret
11394 ENDPROC(aesni_xts_crypt8)
11395
11396diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S
11397index 784452e..46982c7 100644
11398--- a/arch/x86/crypto/blowfish-avx2-asm_64.S
11399+++ b/arch/x86/crypto/blowfish-avx2-asm_64.S
11400@@ -221,6 +221,7 @@ __blowfish_enc_blk32:
11401
11402 write_block(RXl, RXr);
11403
11404+ pax_force_retaddr 0, 1
11405 ret;
11406 ENDPROC(__blowfish_enc_blk32)
11407
11408@@ -250,6 +251,7 @@ __blowfish_dec_blk32:
11409
11410 write_block(RXl, RXr);
11411
11412+ pax_force_retaddr 0, 1
11413 ret;
11414 ENDPROC(__blowfish_dec_blk32)
11415
11416@@ -284,6 +286,7 @@ ENTRY(blowfish_ecb_enc_32way)
11417
11418 vzeroupper;
11419
11420+ pax_force_retaddr 0, 1
11421 ret;
11422 ENDPROC(blowfish_ecb_enc_32way)
11423
11424@@ -318,6 +321,7 @@ ENTRY(blowfish_ecb_dec_32way)
11425
11426 vzeroupper;
11427
11428+ pax_force_retaddr 0, 1
11429 ret;
11430 ENDPROC(blowfish_ecb_dec_32way)
11431
11432@@ -365,6 +369,7 @@ ENTRY(blowfish_cbc_dec_32way)
11433
11434 vzeroupper;
11435
11436+ pax_force_retaddr 0, 1
11437 ret;
11438 ENDPROC(blowfish_cbc_dec_32way)
11439
11440@@ -445,5 +450,6 @@ ENTRY(blowfish_ctr_32way)
11441
11442 vzeroupper;
11443
11444+ pax_force_retaddr 0, 1
11445 ret;
11446 ENDPROC(blowfish_ctr_32way)
11447diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11448index 246c670..4d1ed00 100644
11449--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11450+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11451@@ -21,6 +21,7 @@
11452 */
11453
11454 #include <linux/linkage.h>
11455+#include <asm/alternative-asm.h>
11456
11457 .file "blowfish-x86_64-asm.S"
11458 .text
11459@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
11460 jnz .L__enc_xor;
11461
11462 write_block();
11463+ pax_force_retaddr 0, 1
11464 ret;
11465 .L__enc_xor:
11466 xor_block();
11467+ pax_force_retaddr 0, 1
11468 ret;
11469 ENDPROC(__blowfish_enc_blk)
11470
11471@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
11472
11473 movq %r11, %rbp;
11474
11475+ pax_force_retaddr 0, 1
11476 ret;
11477 ENDPROC(blowfish_dec_blk)
11478
11479@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
11480
11481 popq %rbx;
11482 popq %rbp;
11483+ pax_force_retaddr 0, 1
11484 ret;
11485
11486 .L__enc_xor4:
11487@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
11488
11489 popq %rbx;
11490 popq %rbp;
11491+ pax_force_retaddr 0, 1
11492 ret;
11493 ENDPROC(__blowfish_enc_blk_4way)
11494
11495@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
11496 popq %rbx;
11497 popq %rbp;
11498
11499+ pax_force_retaddr 0, 1
11500 ret;
11501 ENDPROC(blowfish_dec_blk_4way)
11502diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
11503index ce71f92..2dd5b1e 100644
11504--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
11505+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
11506@@ -16,6 +16,7 @@
11507 */
11508
11509 #include <linux/linkage.h>
11510+#include <asm/alternative-asm.h>
11511
11512 #define CAMELLIA_TABLE_BYTE_LEN 272
11513
11514@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
11515 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
11516 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
11517 %rcx, (%r9));
11518+ pax_force_retaddr_bts
11519 ret;
11520 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
11521
11522@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
11523 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
11524 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
11525 %rax, (%r9));
11526+ pax_force_retaddr_bts
11527 ret;
11528 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
11529
11530@@ -780,6 +783,7 @@ __camellia_enc_blk16:
11531 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
11532 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
11533
11534+ pax_force_retaddr_bts
11535 ret;
11536
11537 .align 8
11538@@ -865,6 +869,7 @@ __camellia_dec_blk16:
11539 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
11540 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
11541
11542+ pax_force_retaddr_bts
11543 ret;
11544
11545 .align 8
11546@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
11547 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11548 %xmm8, %rsi);
11549
11550+ pax_force_retaddr 0, 1
11551 ret;
11552 ENDPROC(camellia_ecb_enc_16way)
11553
11554@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
11555 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11556 %xmm8, %rsi);
11557
11558+ pax_force_retaddr 0, 1
11559 ret;
11560 ENDPROC(camellia_ecb_dec_16way)
11561
11562@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
11563 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11564 %xmm8, %rsi);
11565
11566+ pax_force_retaddr 0, 1
11567 ret;
11568 ENDPROC(camellia_cbc_dec_16way)
11569
11570@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
11571 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11572 %xmm8, %rsi);
11573
11574+ pax_force_retaddr 0, 1
11575 ret;
11576 ENDPROC(camellia_ctr_16way)
11577
11578@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
11579 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11580 %xmm8, %rsi);
11581
11582+ pax_force_retaddr 0, 1
11583 ret;
11584 ENDPROC(camellia_xts_crypt_16way)
11585
11586diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
11587index 91a1878..bcf340a 100644
11588--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
11589+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
11590@@ -11,6 +11,7 @@
11591 */
11592
11593 #include <linux/linkage.h>
11594+#include <asm/alternative-asm.h>
11595
11596 #define CAMELLIA_TABLE_BYTE_LEN 272
11597
11598@@ -212,6 +213,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
11599 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
11600 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
11601 %rcx, (%r9));
11602+ pax_force_retaddr_bts
11603 ret;
11604 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
11605
11606@@ -220,6 +222,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
11607 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
11608 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
11609 %rax, (%r9));
11610+ pax_force_retaddr_bts
11611 ret;
11612 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
11613
11614@@ -802,6 +805,7 @@ __camellia_enc_blk32:
11615 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
11616 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
11617
11618+ pax_force_retaddr_bts
11619 ret;
11620
11621 .align 8
11622@@ -887,6 +891,7 @@ __camellia_dec_blk32:
11623 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
11624 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
11625
11626+ pax_force_retaddr_bts
11627 ret;
11628
11629 .align 8
11630@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
11631
11632 vzeroupper;
11633
11634+ pax_force_retaddr 0, 1
11635 ret;
11636 ENDPROC(camellia_ecb_enc_32way)
11637
11638@@ -962,6 +968,7 @@ ENTRY(camellia_ecb_dec_32way)
11639
11640 vzeroupper;
11641
11642+ pax_force_retaddr 0, 1
11643 ret;
11644 ENDPROC(camellia_ecb_dec_32way)
11645
11646@@ -1028,6 +1035,7 @@ ENTRY(camellia_cbc_dec_32way)
11647
11648 vzeroupper;
11649
11650+ pax_force_retaddr 0, 1
11651 ret;
11652 ENDPROC(camellia_cbc_dec_32way)
11653
11654@@ -1166,6 +1174,7 @@ ENTRY(camellia_ctr_32way)
11655
11656 vzeroupper;
11657
11658+ pax_force_retaddr 0, 1
11659 ret;
11660 ENDPROC(camellia_ctr_32way)
11661
11662@@ -1331,6 +1340,7 @@ camellia_xts_crypt_32way:
11663
11664 vzeroupper;
11665
11666+ pax_force_retaddr 0, 1
11667 ret;
11668 ENDPROC(camellia_xts_crypt_32way)
11669
11670diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11671index 310319c..ce174a4 100644
11672--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11673+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11674@@ -21,6 +21,7 @@
11675 */
11676
11677 #include <linux/linkage.h>
11678+#include <asm/alternative-asm.h>
11679
11680 .file "camellia-x86_64-asm_64.S"
11681 .text
11682@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
11683 enc_outunpack(mov, RT1);
11684
11685 movq RRBP, %rbp;
11686+ pax_force_retaddr 0, 1
11687 ret;
11688
11689 .L__enc_xor:
11690 enc_outunpack(xor, RT1);
11691
11692 movq RRBP, %rbp;
11693+ pax_force_retaddr 0, 1
11694 ret;
11695 ENDPROC(__camellia_enc_blk)
11696
11697@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
11698 dec_outunpack();
11699
11700 movq RRBP, %rbp;
11701+ pax_force_retaddr 0, 1
11702 ret;
11703 ENDPROC(camellia_dec_blk)
11704
11705@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
11706
11707 movq RRBP, %rbp;
11708 popq %rbx;
11709+ pax_force_retaddr 0, 1
11710 ret;
11711
11712 .L__enc2_xor:
11713@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
11714
11715 movq RRBP, %rbp;
11716 popq %rbx;
11717+ pax_force_retaddr 0, 1
11718 ret;
11719 ENDPROC(__camellia_enc_blk_2way)
11720
11721@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
11722
11723 movq RRBP, %rbp;
11724 movq RXOR, %rbx;
11725+ pax_force_retaddr 0, 1
11726 ret;
11727 ENDPROC(camellia_dec_blk_2way)
11728diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11729index c35fd5d..c1ee236 100644
11730--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11731+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11732@@ -24,6 +24,7 @@
11733 */
11734
11735 #include <linux/linkage.h>
11736+#include <asm/alternative-asm.h>
11737
11738 .file "cast5-avx-x86_64-asm_64.S"
11739
11740@@ -281,6 +282,7 @@ __cast5_enc_blk16:
11741 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11742 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11743
11744+ pax_force_retaddr 0, 1
11745 ret;
11746 ENDPROC(__cast5_enc_blk16)
11747
11748@@ -352,6 +354,7 @@ __cast5_dec_blk16:
11749 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11750 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11751
11752+ pax_force_retaddr 0, 1
11753 ret;
11754
11755 .L__skip_dec:
11756@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
11757 vmovdqu RR4, (6*4*4)(%r11);
11758 vmovdqu RL4, (7*4*4)(%r11);
11759
11760+ pax_force_retaddr
11761 ret;
11762 ENDPROC(cast5_ecb_enc_16way)
11763
11764@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
11765 vmovdqu RR4, (6*4*4)(%r11);
11766 vmovdqu RL4, (7*4*4)(%r11);
11767
11768+ pax_force_retaddr
11769 ret;
11770 ENDPROC(cast5_ecb_dec_16way)
11771
11772@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
11773
11774 popq %r12;
11775
11776+ pax_force_retaddr
11777 ret;
11778 ENDPROC(cast5_cbc_dec_16way)
11779
11780@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
11781
11782 popq %r12;
11783
11784+ pax_force_retaddr
11785 ret;
11786 ENDPROC(cast5_ctr_16way)
11787diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11788index e3531f8..18ded3a 100644
11789--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11790+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11791@@ -24,6 +24,7 @@
11792 */
11793
11794 #include <linux/linkage.h>
11795+#include <asm/alternative-asm.h>
11796 #include "glue_helper-asm-avx.S"
11797
11798 .file "cast6-avx-x86_64-asm_64.S"
11799@@ -295,6 +296,7 @@ __cast6_enc_blk8:
11800 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11801 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11802
11803+ pax_force_retaddr 0, 1
11804 ret;
11805 ENDPROC(__cast6_enc_blk8)
11806
11807@@ -340,6 +342,7 @@ __cast6_dec_blk8:
11808 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11809 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11810
11811+ pax_force_retaddr 0, 1
11812 ret;
11813 ENDPROC(__cast6_dec_blk8)
11814
11815@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
11816
11817 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11818
11819+ pax_force_retaddr
11820 ret;
11821 ENDPROC(cast6_ecb_enc_8way)
11822
11823@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
11824
11825 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11826
11827+ pax_force_retaddr
11828 ret;
11829 ENDPROC(cast6_ecb_dec_8way)
11830
11831@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
11832
11833 popq %r12;
11834
11835+ pax_force_retaddr
11836 ret;
11837 ENDPROC(cast6_cbc_dec_8way)
11838
11839@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
11840
11841 popq %r12;
11842
11843+ pax_force_retaddr
11844 ret;
11845 ENDPROC(cast6_ctr_8way)
11846
11847@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
11848 /* dst <= regs xor IVs(in dst) */
11849 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11850
11851+ pax_force_retaddr
11852 ret;
11853 ENDPROC(cast6_xts_enc_8way)
11854
11855@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
11856 /* dst <= regs xor IVs(in dst) */
11857 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11858
11859+ pax_force_retaddr
11860 ret;
11861 ENDPROC(cast6_xts_dec_8way)
11862diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
11863index dbc4339..3d868c5 100644
11864--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
11865+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
11866@@ -45,6 +45,7 @@
11867
11868 #include <asm/inst.h>
11869 #include <linux/linkage.h>
11870+#include <asm/alternative-asm.h>
11871
11872 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
11873
11874@@ -312,6 +313,7 @@ do_return:
11875 popq %rsi
11876 popq %rdi
11877 popq %rbx
11878+ pax_force_retaddr 0, 1
11879 ret
11880
11881 ################################################################
11882diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
11883index 586f41a..d02851e 100644
11884--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
11885+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
11886@@ -18,6 +18,7 @@
11887
11888 #include <linux/linkage.h>
11889 #include <asm/inst.h>
11890+#include <asm/alternative-asm.h>
11891
11892 .data
11893
11894@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
11895 psrlq $1, T2
11896 pxor T2, T1
11897 pxor T1, DATA
11898+ pax_force_retaddr
11899 ret
11900 ENDPROC(__clmul_gf128mul_ble)
11901
11902@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
11903 call __clmul_gf128mul_ble
11904 PSHUFB_XMM BSWAP DATA
11905 movups DATA, (%rdi)
11906+ pax_force_retaddr
11907 ret
11908 ENDPROC(clmul_ghash_mul)
11909
11910@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
11911 PSHUFB_XMM BSWAP DATA
11912 movups DATA, (%rdi)
11913 .Lupdate_just_ret:
11914+ pax_force_retaddr
11915 ret
11916 ENDPROC(clmul_ghash_update)
11917
11918@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
11919 pand .Lpoly, %xmm1
11920 pxor %xmm1, %xmm0
11921 movups %xmm0, (%rdi)
11922+ pax_force_retaddr
11923 ret
11924 ENDPROC(clmul_ghash_setkey)
11925diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11926index 9279e0b..9270820 100644
11927--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11928+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11929@@ -1,4 +1,5 @@
11930 #include <linux/linkage.h>
11931+#include <asm/alternative-asm.h>
11932
11933 # enter salsa20_encrypt_bytes
11934 ENTRY(salsa20_encrypt_bytes)
11935@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
11936 add %r11,%rsp
11937 mov %rdi,%rax
11938 mov %rsi,%rdx
11939+ pax_force_retaddr 0, 1
11940 ret
11941 # bytesatleast65:
11942 ._bytesatleast65:
11943@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
11944 add %r11,%rsp
11945 mov %rdi,%rax
11946 mov %rsi,%rdx
11947+ pax_force_retaddr
11948 ret
11949 ENDPROC(salsa20_keysetup)
11950
11951@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
11952 add %r11,%rsp
11953 mov %rdi,%rax
11954 mov %rsi,%rdx
11955+ pax_force_retaddr
11956 ret
11957 ENDPROC(salsa20_ivsetup)
11958diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11959index 2f202f4..d9164d6 100644
11960--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11961+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11962@@ -24,6 +24,7 @@
11963 */
11964
11965 #include <linux/linkage.h>
11966+#include <asm/alternative-asm.h>
11967 #include "glue_helper-asm-avx.S"
11968
11969 .file "serpent-avx-x86_64-asm_64.S"
11970@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
11971 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11972 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11973
11974+ pax_force_retaddr
11975 ret;
11976 ENDPROC(__serpent_enc_blk8_avx)
11977
11978@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
11979 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11980 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11981
11982+ pax_force_retaddr
11983 ret;
11984 ENDPROC(__serpent_dec_blk8_avx)
11985
11986@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
11987
11988 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11989
11990+ pax_force_retaddr
11991 ret;
11992 ENDPROC(serpent_ecb_enc_8way_avx)
11993
11994@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
11995
11996 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11997
11998+ pax_force_retaddr
11999 ret;
12000 ENDPROC(serpent_ecb_dec_8way_avx)
12001
12002@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
12003
12004 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12005
12006+ pax_force_retaddr
12007 ret;
12008 ENDPROC(serpent_cbc_dec_8way_avx)
12009
12010@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
12011
12012 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12013
12014+ pax_force_retaddr
12015 ret;
12016 ENDPROC(serpent_ctr_8way_avx)
12017
12018@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
12019 /* dst <= regs xor IVs(in dst) */
12020 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12021
12022+ pax_force_retaddr
12023 ret;
12024 ENDPROC(serpent_xts_enc_8way_avx)
12025
12026@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
12027 /* dst <= regs xor IVs(in dst) */
12028 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12029
12030+ pax_force_retaddr
12031 ret;
12032 ENDPROC(serpent_xts_dec_8way_avx)
12033diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
12034index b222085..abd483c 100644
12035--- a/arch/x86/crypto/serpent-avx2-asm_64.S
12036+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
12037@@ -15,6 +15,7 @@
12038 */
12039
12040 #include <linux/linkage.h>
12041+#include <asm/alternative-asm.h>
12042 #include "glue_helper-asm-avx2.S"
12043
12044 .file "serpent-avx2-asm_64.S"
12045@@ -610,6 +611,7 @@ __serpent_enc_blk16:
12046 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12047 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12048
12049+ pax_force_retaddr
12050 ret;
12051 ENDPROC(__serpent_enc_blk16)
12052
12053@@ -664,6 +666,7 @@ __serpent_dec_blk16:
12054 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12055 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12056
12057+ pax_force_retaddr
12058 ret;
12059 ENDPROC(__serpent_dec_blk16)
12060
12061@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
12062
12063 vzeroupper;
12064
12065+ pax_force_retaddr
12066 ret;
12067 ENDPROC(serpent_ecb_enc_16way)
12068
12069@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
12070
12071 vzeroupper;
12072
12073+ pax_force_retaddr
12074 ret;
12075 ENDPROC(serpent_ecb_dec_16way)
12076
12077@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
12078
12079 vzeroupper;
12080
12081+ pax_force_retaddr
12082 ret;
12083 ENDPROC(serpent_cbc_dec_16way)
12084
12085@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
12086
12087 vzeroupper;
12088
12089+ pax_force_retaddr
12090 ret;
12091 ENDPROC(serpent_ctr_16way)
12092
12093@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
12094
12095 vzeroupper;
12096
12097+ pax_force_retaddr
12098 ret;
12099 ENDPROC(serpent_xts_enc_16way)
12100
12101@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
12102
12103 vzeroupper;
12104
12105+ pax_force_retaddr
12106 ret;
12107 ENDPROC(serpent_xts_dec_16way)
12108diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12109index acc066c..1559cc4 100644
12110--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12111+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12112@@ -25,6 +25,7 @@
12113 */
12114
12115 #include <linux/linkage.h>
12116+#include <asm/alternative-asm.h>
12117
12118 .file "serpent-sse2-x86_64-asm_64.S"
12119 .text
12120@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
12121 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12122 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12123
12124+ pax_force_retaddr
12125 ret;
12126
12127 .L__enc_xor8:
12128 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12129 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12130
12131+ pax_force_retaddr
12132 ret;
12133 ENDPROC(__serpent_enc_blk_8way)
12134
12135@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
12136 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12137 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12138
12139+ pax_force_retaddr
12140 ret;
12141 ENDPROC(serpent_dec_blk_8way)
12142diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
12143index a410950..3356d42 100644
12144--- a/arch/x86/crypto/sha1_ssse3_asm.S
12145+++ b/arch/x86/crypto/sha1_ssse3_asm.S
12146@@ -29,6 +29,7 @@
12147 */
12148
12149 #include <linux/linkage.h>
12150+#include <asm/alternative-asm.h>
12151
12152 #define CTX %rdi // arg1
12153 #define BUF %rsi // arg2
12154@@ -104,6 +105,7 @@
12155 pop %r12
12156 pop %rbp
12157 pop %rbx
12158+ pax_force_retaddr 0, 1
12159 ret
12160
12161 ENDPROC(\name)
12162diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
12163index 642f156..4ab07b9 100644
12164--- a/arch/x86/crypto/sha256-avx-asm.S
12165+++ b/arch/x86/crypto/sha256-avx-asm.S
12166@@ -49,6 +49,7 @@
12167
12168 #ifdef CONFIG_AS_AVX
12169 #include <linux/linkage.h>
12170+#include <asm/alternative-asm.h>
12171
12172 ## assume buffers not aligned
12173 #define VMOVDQ vmovdqu
12174@@ -460,6 +461,7 @@ done_hash:
12175 popq %r13
12176 popq %rbp
12177 popq %rbx
12178+ pax_force_retaddr 0, 1
12179 ret
12180 ENDPROC(sha256_transform_avx)
12181
12182diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
12183index 9e86944..2e7f95a 100644
12184--- a/arch/x86/crypto/sha256-avx2-asm.S
12185+++ b/arch/x86/crypto/sha256-avx2-asm.S
12186@@ -50,6 +50,7 @@
12187
12188 #ifdef CONFIG_AS_AVX2
12189 #include <linux/linkage.h>
12190+#include <asm/alternative-asm.h>
12191
12192 ## assume buffers not aligned
12193 #define VMOVDQ vmovdqu
12194@@ -720,6 +721,7 @@ done_hash:
12195 popq %r12
12196 popq %rbp
12197 popq %rbx
12198+ pax_force_retaddr 0, 1
12199 ret
12200 ENDPROC(sha256_transform_rorx)
12201
12202diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
12203index f833b74..c36ed14 100644
12204--- a/arch/x86/crypto/sha256-ssse3-asm.S
12205+++ b/arch/x86/crypto/sha256-ssse3-asm.S
12206@@ -47,6 +47,7 @@
12207 ########################################################################
12208
12209 #include <linux/linkage.h>
12210+#include <asm/alternative-asm.h>
12211
12212 ## assume buffers not aligned
12213 #define MOVDQ movdqu
12214@@ -471,6 +472,7 @@ done_hash:
12215 popq %rbp
12216 popq %rbx
12217
12218+ pax_force_retaddr 0, 1
12219 ret
12220 ENDPROC(sha256_transform_ssse3)
12221
12222diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
12223index 974dde9..4533d34 100644
12224--- a/arch/x86/crypto/sha512-avx-asm.S
12225+++ b/arch/x86/crypto/sha512-avx-asm.S
12226@@ -49,6 +49,7 @@
12227
12228 #ifdef CONFIG_AS_AVX
12229 #include <linux/linkage.h>
12230+#include <asm/alternative-asm.h>
12231
12232 .text
12233
12234@@ -364,6 +365,7 @@ updateblock:
12235 mov frame_RSPSAVE(%rsp), %rsp
12236
12237 nowork:
12238+ pax_force_retaddr 0, 1
12239 ret
12240 ENDPROC(sha512_transform_avx)
12241
12242diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
12243index 568b961..061ef1d 100644
12244--- a/arch/x86/crypto/sha512-avx2-asm.S
12245+++ b/arch/x86/crypto/sha512-avx2-asm.S
12246@@ -51,6 +51,7 @@
12247
12248 #ifdef CONFIG_AS_AVX2
12249 #include <linux/linkage.h>
12250+#include <asm/alternative-asm.h>
12251
12252 .text
12253
12254@@ -678,6 +679,7 @@ done_hash:
12255
12256 # Restore Stack Pointer
12257 mov frame_RSPSAVE(%rsp), %rsp
12258+ pax_force_retaddr 0, 1
12259 ret
12260 ENDPROC(sha512_transform_rorx)
12261
12262diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
12263index fb56855..e23914f 100644
12264--- a/arch/x86/crypto/sha512-ssse3-asm.S
12265+++ b/arch/x86/crypto/sha512-ssse3-asm.S
12266@@ -48,6 +48,7 @@
12267 ########################################################################
12268
12269 #include <linux/linkage.h>
12270+#include <asm/alternative-asm.h>
12271
12272 .text
12273
12274@@ -363,6 +364,7 @@ updateblock:
12275 mov frame_RSPSAVE(%rsp), %rsp
12276
12277 nowork:
12278+ pax_force_retaddr 0, 1
12279 ret
12280 ENDPROC(sha512_transform_ssse3)
12281
12282diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12283index 0505813..63b1d00 100644
12284--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12285+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12286@@ -24,6 +24,7 @@
12287 */
12288
12289 #include <linux/linkage.h>
12290+#include <asm/alternative-asm.h>
12291 #include "glue_helper-asm-avx.S"
12292
12293 .file "twofish-avx-x86_64-asm_64.S"
12294@@ -284,6 +285,7 @@ __twofish_enc_blk8:
12295 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
12296 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
12297
12298+ pax_force_retaddr 0, 1
12299 ret;
12300 ENDPROC(__twofish_enc_blk8)
12301
12302@@ -324,6 +326,7 @@ __twofish_dec_blk8:
12303 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
12304 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
12305
12306+ pax_force_retaddr 0, 1
12307 ret;
12308 ENDPROC(__twofish_dec_blk8)
12309
12310@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
12311
12312 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
12313
12314+ pax_force_retaddr 0, 1
12315 ret;
12316 ENDPROC(twofish_ecb_enc_8way)
12317
12318@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
12319
12320 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12321
12322+ pax_force_retaddr 0, 1
12323 ret;
12324 ENDPROC(twofish_ecb_dec_8way)
12325
12326@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
12327
12328 popq %r12;
12329
12330+ pax_force_retaddr 0, 1
12331 ret;
12332 ENDPROC(twofish_cbc_dec_8way)
12333
12334@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
12335
12336 popq %r12;
12337
12338+ pax_force_retaddr 0, 1
12339 ret;
12340 ENDPROC(twofish_ctr_8way)
12341
12342@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
12343 /* dst <= regs xor IVs(in dst) */
12344 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
12345
12346+ pax_force_retaddr 0, 1
12347 ret;
12348 ENDPROC(twofish_xts_enc_8way)
12349
12350@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
12351 /* dst <= regs xor IVs(in dst) */
12352 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12353
12354+ pax_force_retaddr 0, 1
12355 ret;
12356 ENDPROC(twofish_xts_dec_8way)
12357diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S
12358index e1a83b9..33006b9 100644
12359--- a/arch/x86/crypto/twofish-avx2-asm_64.S
12360+++ b/arch/x86/crypto/twofish-avx2-asm_64.S
12361@@ -11,6 +11,7 @@
12362 */
12363
12364 #include <linux/linkage.h>
12365+#include <asm/alternative-asm.h>
12366 #include "glue_helper-asm-avx2.S"
12367
12368 .file "twofish-avx2-asm_64.S"
12369@@ -422,6 +423,7 @@ __twofish_enc_blk16:
12370 outunpack_enc16(RA, RB, RC, RD);
12371 write_blocks16(RA, RB, RC, RD);
12372
12373+ pax_force_retaddr_bts
12374 ret;
12375 ENDPROC(__twofish_enc_blk16)
12376
12377@@ -454,6 +456,7 @@ __twofish_dec_blk16:
12378 outunpack_dec16(RA, RB, RC, RD);
12379 write_blocks16(RA, RB, RC, RD);
12380
12381+ pax_force_retaddr_bts
12382 ret;
12383 ENDPROC(__twofish_dec_blk16)
12384
12385@@ -476,6 +479,7 @@ ENTRY(twofish_ecb_enc_16way)
12386 popq %r12;
12387 vzeroupper;
12388
12389+ pax_force_retaddr 0, 1
12390 ret;
12391 ENDPROC(twofish_ecb_enc_16way)
12392
12393@@ -498,6 +502,7 @@ ENTRY(twofish_ecb_dec_16way)
12394 popq %r12;
12395 vzeroupper;
12396
12397+ pax_force_retaddr 0, 1
12398 ret;
12399 ENDPROC(twofish_ecb_dec_16way)
12400
12401@@ -521,6 +526,7 @@ ENTRY(twofish_cbc_dec_16way)
12402 popq %r12;
12403 vzeroupper;
12404
12405+ pax_force_retaddr 0, 1
12406 ret;
12407 ENDPROC(twofish_cbc_dec_16way)
12408
12409@@ -546,6 +552,7 @@ ENTRY(twofish_ctr_16way)
12410 popq %r12;
12411 vzeroupper;
12412
12413+ pax_force_retaddr 0, 1
12414 ret;
12415 ENDPROC(twofish_ctr_16way)
12416
12417@@ -574,6 +581,7 @@ twofish_xts_crypt_16way:
12418 popq %r12;
12419 vzeroupper;
12420
12421+ pax_force_retaddr 0, 1
12422 ret;
12423 ENDPROC(twofish_xts_crypt_16way)
12424
12425diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12426index 1c3b7ce..b365c5e 100644
12427--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12428+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12429@@ -21,6 +21,7 @@
12430 */
12431
12432 #include <linux/linkage.h>
12433+#include <asm/alternative-asm.h>
12434
12435 .file "twofish-x86_64-asm-3way.S"
12436 .text
12437@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
12438 popq %r13;
12439 popq %r14;
12440 popq %r15;
12441+ pax_force_retaddr 0, 1
12442 ret;
12443
12444 .L__enc_xor3:
12445@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
12446 popq %r13;
12447 popq %r14;
12448 popq %r15;
12449+ pax_force_retaddr 0, 1
12450 ret;
12451 ENDPROC(__twofish_enc_blk_3way)
12452
12453@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
12454 popq %r13;
12455 popq %r14;
12456 popq %r15;
12457+ pax_force_retaddr 0, 1
12458 ret;
12459 ENDPROC(twofish_dec_blk_3way)
12460diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
12461index a039d21..29e7615 100644
12462--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
12463+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
12464@@ -22,6 +22,7 @@
12465
12466 #include <linux/linkage.h>
12467 #include <asm/asm-offsets.h>
12468+#include <asm/alternative-asm.h>
12469
12470 #define a_offset 0
12471 #define b_offset 4
12472@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
12473
12474 popq R1
12475 movq $1,%rax
12476+ pax_force_retaddr 0, 1
12477 ret
12478 ENDPROC(twofish_enc_blk)
12479
12480@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
12481
12482 popq R1
12483 movq $1,%rax
12484+ pax_force_retaddr 0, 1
12485 ret
12486 ENDPROC(twofish_dec_blk)
12487diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
12488index 52ff81c..98af645 100644
12489--- a/arch/x86/ia32/ia32_aout.c
12490+++ b/arch/x86/ia32/ia32_aout.c
12491@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
12492 unsigned long dump_start, dump_size;
12493 struct user32 dump;
12494
12495+ memset(&dump, 0, sizeof(dump));
12496+
12497 fs = get_fs();
12498 set_fs(KERNEL_DS);
12499 has_dumped = 1;
12500diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
12501index cf1a471..5ba2673 100644
12502--- a/arch/x86/ia32/ia32_signal.c
12503+++ b/arch/x86/ia32/ia32_signal.c
12504@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
12505 sp -= frame_size;
12506 /* Align the stack pointer according to the i386 ABI,
12507 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
12508- sp = ((sp + 4) & -16ul) - 4;
12509+ sp = ((sp - 12) & -16ul) - 4;
12510 return (void __user *) sp;
12511 }
12512
12513@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
12514 * These are actually not used anymore, but left because some
12515 * gdb versions depend on them as a marker.
12516 */
12517- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
12518+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
12519 } put_user_catch(err);
12520
12521 if (err)
12522@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
12523 0xb8,
12524 __NR_ia32_rt_sigreturn,
12525 0x80cd,
12526- 0,
12527+ 0
12528 };
12529
12530 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
12531@@ -459,20 +459,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
12532 else
12533 put_user_ex(0, &frame->uc.uc_flags);
12534 put_user_ex(0, &frame->uc.uc_link);
12535- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
12536+ __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
12537
12538 if (ksig->ka.sa.sa_flags & SA_RESTORER)
12539 restorer = ksig->ka.sa.sa_restorer;
12540+ else if (current->mm->context.vdso)
12541+ /* Return stub is in 32bit vsyscall page */
12542+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
12543 else
12544- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
12545- rt_sigreturn);
12546+ restorer = &frame->retcode;
12547 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
12548
12549 /*
12550 * Not actually used anymore, but left because some gdb
12551 * versions need it.
12552 */
12553- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
12554+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
12555 } put_user_catch(err);
12556
12557 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
12558diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
12559index 474dc1b..9297c58 100644
12560--- a/arch/x86/ia32/ia32entry.S
12561+++ b/arch/x86/ia32/ia32entry.S
12562@@ -15,8 +15,10 @@
12563 #include <asm/irqflags.h>
12564 #include <asm/asm.h>
12565 #include <asm/smap.h>
12566+#include <asm/pgtable.h>
12567 #include <linux/linkage.h>
12568 #include <linux/err.h>
12569+#include <asm/alternative-asm.h>
12570
12571 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12572 #include <linux/elf-em.h>
12573@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
12574 ENDPROC(native_irq_enable_sysexit)
12575 #endif
12576
12577+ .macro pax_enter_kernel_user
12578+ pax_set_fptr_mask
12579+#ifdef CONFIG_PAX_MEMORY_UDEREF
12580+ call pax_enter_kernel_user
12581+#endif
12582+ .endm
12583+
12584+ .macro pax_exit_kernel_user
12585+#ifdef CONFIG_PAX_MEMORY_UDEREF
12586+ call pax_exit_kernel_user
12587+#endif
12588+#ifdef CONFIG_PAX_RANDKSTACK
12589+ pushq %rax
12590+ pushq %r11
12591+ call pax_randomize_kstack
12592+ popq %r11
12593+ popq %rax
12594+#endif
12595+ .endm
12596+
12597+ .macro pax_erase_kstack
12598+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12599+ call pax_erase_kstack
12600+#endif
12601+ .endm
12602+
12603 /*
12604 * 32bit SYSENTER instruction entry.
12605 *
12606@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
12607 CFI_REGISTER rsp,rbp
12608 SWAPGS_UNSAFE_STACK
12609 movq PER_CPU_VAR(kernel_stack), %rsp
12610- addq $(KERNEL_STACK_OFFSET),%rsp
12611- /*
12612- * No need to follow this irqs on/off section: the syscall
12613- * disabled irqs, here we enable it straight after entry:
12614- */
12615- ENABLE_INTERRUPTS(CLBR_NONE)
12616 movl %ebp,%ebp /* zero extension */
12617 pushq_cfi $__USER32_DS
12618 /*CFI_REL_OFFSET ss,0*/
12619@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
12620 CFI_REL_OFFSET rsp,0
12621 pushfq_cfi
12622 /*CFI_REL_OFFSET rflags,0*/
12623- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
12624- CFI_REGISTER rip,r10
12625+ orl $X86_EFLAGS_IF,(%rsp)
12626+ GET_THREAD_INFO(%r11)
12627+ movl TI_sysenter_return(%r11), %r11d
12628+ CFI_REGISTER rip,r11
12629 pushq_cfi $__USER32_CS
12630 /*CFI_REL_OFFSET cs,0*/
12631 movl %eax, %eax
12632- pushq_cfi %r10
12633+ pushq_cfi %r11
12634 CFI_REL_OFFSET rip,0
12635 pushq_cfi %rax
12636 cld
12637 SAVE_ARGS 0,1,0
12638+ pax_enter_kernel_user
12639+
12640+#ifdef CONFIG_PAX_RANDKSTACK
12641+ pax_erase_kstack
12642+#endif
12643+
12644+ /*
12645+ * No need to follow this irqs on/off section: the syscall
12646+ * disabled irqs, here we enable it straight after entry:
12647+ */
12648+ ENABLE_INTERRUPTS(CLBR_NONE)
12649 /* no need to do an access_ok check here because rbp has been
12650 32bit zero extended */
12651+
12652+#ifdef CONFIG_PAX_MEMORY_UDEREF
12653+ addq pax_user_shadow_base,%rbp
12654+ ASM_PAX_OPEN_USERLAND
12655+#endif
12656+
12657 ASM_STAC
12658 1: movl (%rbp),%ebp
12659 _ASM_EXTABLE(1b,ia32_badarg)
12660 ASM_CLAC
12661- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12662- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12663+
12664+#ifdef CONFIG_PAX_MEMORY_UDEREF
12665+ ASM_PAX_CLOSE_USERLAND
12666+#endif
12667+
12668+ GET_THREAD_INFO(%r11)
12669+ orl $TS_COMPAT,TI_status(%r11)
12670+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12671 CFI_REMEMBER_STATE
12672 jnz sysenter_tracesys
12673 cmpq $(IA32_NR_syscalls-1),%rax
12674@@ -162,12 +209,15 @@ sysenter_do_call:
12675 sysenter_dispatch:
12676 call *ia32_sys_call_table(,%rax,8)
12677 movq %rax,RAX-ARGOFFSET(%rsp)
12678+ GET_THREAD_INFO(%r11)
12679 DISABLE_INTERRUPTS(CLBR_NONE)
12680 TRACE_IRQS_OFF
12681- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12682+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12683 jnz sysexit_audit
12684 sysexit_from_sys_call:
12685- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12686+ pax_exit_kernel_user
12687+ pax_erase_kstack
12688+ andl $~TS_COMPAT,TI_status(%r11)
12689 /* clear IF, that popfq doesn't enable interrupts early */
12690 andl $~0x200,EFLAGS-R11(%rsp)
12691 movl RIP-R11(%rsp),%edx /* User %eip */
12692@@ -193,6 +243,9 @@ sysexit_from_sys_call:
12693 movl %eax,%esi /* 2nd arg: syscall number */
12694 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
12695 call __audit_syscall_entry
12696+
12697+ pax_erase_kstack
12698+
12699 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
12700 cmpq $(IA32_NR_syscalls-1),%rax
12701 ja ia32_badsys
12702@@ -204,7 +257,7 @@ sysexit_from_sys_call:
12703 .endm
12704
12705 .macro auditsys_exit exit
12706- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12707+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12708 jnz ia32_ret_from_sys_call
12709 TRACE_IRQS_ON
12710 ENABLE_INTERRUPTS(CLBR_NONE)
12711@@ -215,11 +268,12 @@ sysexit_from_sys_call:
12712 1: setbe %al /* 1 if error, 0 if not */
12713 movzbl %al,%edi /* zero-extend that into %edi */
12714 call __audit_syscall_exit
12715+ GET_THREAD_INFO(%r11)
12716 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
12717 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
12718 DISABLE_INTERRUPTS(CLBR_NONE)
12719 TRACE_IRQS_OFF
12720- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12721+ testl %edi,TI_flags(%r11)
12722 jz \exit
12723 CLEAR_RREGS -ARGOFFSET
12724 jmp int_with_check
12725@@ -237,7 +291,7 @@ sysexit_audit:
12726
12727 sysenter_tracesys:
12728 #ifdef CONFIG_AUDITSYSCALL
12729- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12730+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12731 jz sysenter_auditsys
12732 #endif
12733 SAVE_REST
12734@@ -249,6 +303,9 @@ sysenter_tracesys:
12735 RESTORE_REST
12736 cmpq $(IA32_NR_syscalls-1),%rax
12737 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
12738+
12739+ pax_erase_kstack
12740+
12741 jmp sysenter_do_call
12742 CFI_ENDPROC
12743 ENDPROC(ia32_sysenter_target)
12744@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
12745 ENTRY(ia32_cstar_target)
12746 CFI_STARTPROC32 simple
12747 CFI_SIGNAL_FRAME
12748- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12749+ CFI_DEF_CFA rsp,0
12750 CFI_REGISTER rip,rcx
12751 /*CFI_REGISTER rflags,r11*/
12752 SWAPGS_UNSAFE_STACK
12753 movl %esp,%r8d
12754 CFI_REGISTER rsp,r8
12755 movq PER_CPU_VAR(kernel_stack),%rsp
12756+ SAVE_ARGS 8*6,0,0
12757+ pax_enter_kernel_user
12758+
12759+#ifdef CONFIG_PAX_RANDKSTACK
12760+ pax_erase_kstack
12761+#endif
12762+
12763 /*
12764 * No need to follow this irqs on/off section: the syscall
12765 * disabled irqs and here we enable it straight after entry:
12766 */
12767 ENABLE_INTERRUPTS(CLBR_NONE)
12768- SAVE_ARGS 8,0,0
12769 movl %eax,%eax /* zero extension */
12770 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12771 movq %rcx,RIP-ARGOFFSET(%rsp)
12772@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
12773 /* no need to do an access_ok check here because r8 has been
12774 32bit zero extended */
12775 /* hardware stack frame is complete now */
12776+
12777+#ifdef CONFIG_PAX_MEMORY_UDEREF
12778+ ASM_PAX_OPEN_USERLAND
12779+ movq pax_user_shadow_base,%r8
12780+ addq RSP-ARGOFFSET(%rsp),%r8
12781+#endif
12782+
12783 ASM_STAC
12784 1: movl (%r8),%r9d
12785 _ASM_EXTABLE(1b,ia32_badarg)
12786 ASM_CLAC
12787- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12788- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12789+
12790+#ifdef CONFIG_PAX_MEMORY_UDEREF
12791+ ASM_PAX_CLOSE_USERLAND
12792+#endif
12793+
12794+ GET_THREAD_INFO(%r11)
12795+ orl $TS_COMPAT,TI_status(%r11)
12796+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12797 CFI_REMEMBER_STATE
12798 jnz cstar_tracesys
12799 cmpq $IA32_NR_syscalls-1,%rax
12800@@ -319,12 +395,15 @@ cstar_do_call:
12801 cstar_dispatch:
12802 call *ia32_sys_call_table(,%rax,8)
12803 movq %rax,RAX-ARGOFFSET(%rsp)
12804+ GET_THREAD_INFO(%r11)
12805 DISABLE_INTERRUPTS(CLBR_NONE)
12806 TRACE_IRQS_OFF
12807- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12808+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12809 jnz sysretl_audit
12810 sysretl_from_sys_call:
12811- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12812+ pax_exit_kernel_user
12813+ pax_erase_kstack
12814+ andl $~TS_COMPAT,TI_status(%r11)
12815 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
12816 movl RIP-ARGOFFSET(%rsp),%ecx
12817 CFI_REGISTER rip,rcx
12818@@ -352,7 +431,7 @@ sysretl_audit:
12819
12820 cstar_tracesys:
12821 #ifdef CONFIG_AUDITSYSCALL
12822- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12823+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12824 jz cstar_auditsys
12825 #endif
12826 xchgl %r9d,%ebp
12827@@ -366,11 +445,19 @@ cstar_tracesys:
12828 xchgl %ebp,%r9d
12829 cmpq $(IA32_NR_syscalls-1),%rax
12830 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
12831+
12832+ pax_erase_kstack
12833+
12834 jmp cstar_do_call
12835 END(ia32_cstar_target)
12836
12837 ia32_badarg:
12838 ASM_CLAC
12839+
12840+#ifdef CONFIG_PAX_MEMORY_UDEREF
12841+ ASM_PAX_CLOSE_USERLAND
12842+#endif
12843+
12844 movq $-EFAULT,%rax
12845 jmp ia32_sysret
12846 CFI_ENDPROC
12847@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
12848 CFI_REL_OFFSET rip,RIP-RIP
12849 PARAVIRT_ADJUST_EXCEPTION_FRAME
12850 SWAPGS
12851- /*
12852- * No need to follow this irqs on/off section: the syscall
12853- * disabled irqs and here we enable it straight after entry:
12854- */
12855- ENABLE_INTERRUPTS(CLBR_NONE)
12856 movl %eax,%eax
12857 pushq_cfi %rax
12858 cld
12859 /* note the registers are not zero extended to the sf.
12860 this could be a problem. */
12861 SAVE_ARGS 0,1,0
12862- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12863- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12864+ pax_enter_kernel_user
12865+
12866+#ifdef CONFIG_PAX_RANDKSTACK
12867+ pax_erase_kstack
12868+#endif
12869+
12870+ /*
12871+ * No need to follow this irqs on/off section: the syscall
12872+ * disabled irqs and here we enable it straight after entry:
12873+ */
12874+ ENABLE_INTERRUPTS(CLBR_NONE)
12875+ GET_THREAD_INFO(%r11)
12876+ orl $TS_COMPAT,TI_status(%r11)
12877+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12878 jnz ia32_tracesys
12879 cmpq $(IA32_NR_syscalls-1),%rax
12880 ja ia32_badsys
12881@@ -442,6 +536,9 @@ ia32_tracesys:
12882 RESTORE_REST
12883 cmpq $(IA32_NR_syscalls-1),%rax
12884 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
12885+
12886+ pax_erase_kstack
12887+
12888 jmp ia32_do_call
12889 END(ia32_syscall)
12890
12891diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
12892index 8e0ceec..af13504 100644
12893--- a/arch/x86/ia32/sys_ia32.c
12894+++ b/arch/x86/ia32/sys_ia32.c
12895@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
12896 */
12897 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
12898 {
12899- typeof(ubuf->st_uid) uid = 0;
12900- typeof(ubuf->st_gid) gid = 0;
12901+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
12902+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
12903 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
12904 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
12905 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
12906diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
12907index 372231c..a5aa1a1 100644
12908--- a/arch/x86/include/asm/alternative-asm.h
12909+++ b/arch/x86/include/asm/alternative-asm.h
12910@@ -18,6 +18,45 @@
12911 .endm
12912 #endif
12913
12914+#ifdef KERNEXEC_PLUGIN
12915+ .macro pax_force_retaddr_bts rip=0
12916+ btsq $63,\rip(%rsp)
12917+ .endm
12918+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12919+ .macro pax_force_retaddr rip=0, reload=0
12920+ btsq $63,\rip(%rsp)
12921+ .endm
12922+ .macro pax_force_fptr ptr
12923+ btsq $63,\ptr
12924+ .endm
12925+ .macro pax_set_fptr_mask
12926+ .endm
12927+#endif
12928+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
12929+ .macro pax_force_retaddr rip=0, reload=0
12930+ .if \reload
12931+ pax_set_fptr_mask
12932+ .endif
12933+ orq %r10,\rip(%rsp)
12934+ .endm
12935+ .macro pax_force_fptr ptr
12936+ orq %r10,\ptr
12937+ .endm
12938+ .macro pax_set_fptr_mask
12939+ movabs $0x8000000000000000,%r10
12940+ .endm
12941+#endif
12942+#else
12943+ .macro pax_force_retaddr rip=0, reload=0
12944+ .endm
12945+ .macro pax_force_fptr ptr
12946+ .endm
12947+ .macro pax_force_retaddr_bts rip=0
12948+ .endm
12949+ .macro pax_set_fptr_mask
12950+ .endm
12951+#endif
12952+
12953 .macro altinstruction_entry orig alt feature orig_len alt_len
12954 .long \orig - .
12955 .long \alt - .
12956diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
12957index 58ed6d9..f1cbe58 100644
12958--- a/arch/x86/include/asm/alternative.h
12959+++ b/arch/x86/include/asm/alternative.h
12960@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12961 ".pushsection .discard,\"aw\",@progbits\n" \
12962 DISCARD_ENTRY(1) \
12963 ".popsection\n" \
12964- ".pushsection .altinstr_replacement, \"ax\"\n" \
12965+ ".pushsection .altinstr_replacement, \"a\"\n" \
12966 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
12967 ".popsection"
12968
12969@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12970 DISCARD_ENTRY(1) \
12971 DISCARD_ENTRY(2) \
12972 ".popsection\n" \
12973- ".pushsection .altinstr_replacement, \"ax\"\n" \
12974+ ".pushsection .altinstr_replacement, \"a\"\n" \
12975 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
12976 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
12977 ".popsection"
12978diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
12979index 3388034..050f0b9 100644
12980--- a/arch/x86/include/asm/apic.h
12981+++ b/arch/x86/include/asm/apic.h
12982@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12983
12984 #ifdef CONFIG_X86_LOCAL_APIC
12985
12986-extern unsigned int apic_verbosity;
12987+extern int apic_verbosity;
12988 extern int local_apic_timer_c2_ok;
12989
12990 extern int disable_apic;
12991diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12992index 20370c6..a2eb9b0 100644
12993--- a/arch/x86/include/asm/apm.h
12994+++ b/arch/x86/include/asm/apm.h
12995@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12996 __asm__ __volatile__(APM_DO_ZERO_SEGS
12997 "pushl %%edi\n\t"
12998 "pushl %%ebp\n\t"
12999- "lcall *%%cs:apm_bios_entry\n\t"
13000+ "lcall *%%ss:apm_bios_entry\n\t"
13001 "setc %%al\n\t"
13002 "popl %%ebp\n\t"
13003 "popl %%edi\n\t"
13004@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
13005 __asm__ __volatile__(APM_DO_ZERO_SEGS
13006 "pushl %%edi\n\t"
13007 "pushl %%ebp\n\t"
13008- "lcall *%%cs:apm_bios_entry\n\t"
13009+ "lcall *%%ss:apm_bios_entry\n\t"
13010 "setc %%bl\n\t"
13011 "popl %%ebp\n\t"
13012 "popl %%edi\n\t"
13013diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
13014index 722aa3b..3a0bb27 100644
13015--- a/arch/x86/include/asm/atomic.h
13016+++ b/arch/x86/include/asm/atomic.h
13017@@ -22,7 +22,18 @@
13018 */
13019 static inline int atomic_read(const atomic_t *v)
13020 {
13021- return (*(volatile int *)&(v)->counter);
13022+ return (*(volatile const int *)&(v)->counter);
13023+}
13024+
13025+/**
13026+ * atomic_read_unchecked - read atomic variable
13027+ * @v: pointer of type atomic_unchecked_t
13028+ *
13029+ * Atomically reads the value of @v.
13030+ */
13031+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
13032+{
13033+ return (*(volatile const int *)&(v)->counter);
13034 }
13035
13036 /**
13037@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
13038 }
13039
13040 /**
13041+ * atomic_set_unchecked - set atomic variable
13042+ * @v: pointer of type atomic_unchecked_t
13043+ * @i: required value
13044+ *
13045+ * Atomically sets the value of @v to @i.
13046+ */
13047+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
13048+{
13049+ v->counter = i;
13050+}
13051+
13052+/**
13053 * atomic_add - add integer to atomic variable
13054 * @i: integer value to add
13055 * @v: pointer of type atomic_t
13056@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
13057 */
13058 static inline void atomic_add(int i, atomic_t *v)
13059 {
13060- asm volatile(LOCK_PREFIX "addl %1,%0"
13061+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13062+
13063+#ifdef CONFIG_PAX_REFCOUNT
13064+ "jno 0f\n"
13065+ LOCK_PREFIX "subl %1,%0\n"
13066+ "int $4\n0:\n"
13067+ _ASM_EXTABLE(0b, 0b)
13068+#endif
13069+
13070+ : "+m" (v->counter)
13071+ : "ir" (i));
13072+}
13073+
13074+/**
13075+ * atomic_add_unchecked - add integer to atomic variable
13076+ * @i: integer value to add
13077+ * @v: pointer of type atomic_unchecked_t
13078+ *
13079+ * Atomically adds @i to @v.
13080+ */
13081+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
13082+{
13083+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13084 : "+m" (v->counter)
13085 : "ir" (i));
13086 }
13087@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
13088 */
13089 static inline void atomic_sub(int i, atomic_t *v)
13090 {
13091- asm volatile(LOCK_PREFIX "subl %1,%0"
13092+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13093+
13094+#ifdef CONFIG_PAX_REFCOUNT
13095+ "jno 0f\n"
13096+ LOCK_PREFIX "addl %1,%0\n"
13097+ "int $4\n0:\n"
13098+ _ASM_EXTABLE(0b, 0b)
13099+#endif
13100+
13101+ : "+m" (v->counter)
13102+ : "ir" (i));
13103+}
13104+
13105+/**
13106+ * atomic_sub_unchecked - subtract integer from atomic variable
13107+ * @i: integer value to subtract
13108+ * @v: pointer of type atomic_unchecked_t
13109+ *
13110+ * Atomically subtracts @i from @v.
13111+ */
13112+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
13113+{
13114+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13115 : "+m" (v->counter)
13116 : "ir" (i));
13117 }
13118@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13119 {
13120 unsigned char c;
13121
13122- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
13123+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
13124+
13125+#ifdef CONFIG_PAX_REFCOUNT
13126+ "jno 0f\n"
13127+ LOCK_PREFIX "addl %2,%0\n"
13128+ "int $4\n0:\n"
13129+ _ASM_EXTABLE(0b, 0b)
13130+#endif
13131+
13132+ "sete %1\n"
13133 : "+m" (v->counter), "=qm" (c)
13134 : "ir" (i) : "memory");
13135 return c;
13136@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13137 */
13138 static inline void atomic_inc(atomic_t *v)
13139 {
13140- asm volatile(LOCK_PREFIX "incl %0"
13141+ asm volatile(LOCK_PREFIX "incl %0\n"
13142+
13143+#ifdef CONFIG_PAX_REFCOUNT
13144+ "jno 0f\n"
13145+ LOCK_PREFIX "decl %0\n"
13146+ "int $4\n0:\n"
13147+ _ASM_EXTABLE(0b, 0b)
13148+#endif
13149+
13150+ : "+m" (v->counter));
13151+}
13152+
13153+/**
13154+ * atomic_inc_unchecked - increment atomic variable
13155+ * @v: pointer of type atomic_unchecked_t
13156+ *
13157+ * Atomically increments @v by 1.
13158+ */
13159+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
13160+{
13161+ asm volatile(LOCK_PREFIX "incl %0\n"
13162 : "+m" (v->counter));
13163 }
13164
13165@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
13166 */
13167 static inline void atomic_dec(atomic_t *v)
13168 {
13169- asm volatile(LOCK_PREFIX "decl %0"
13170+ asm volatile(LOCK_PREFIX "decl %0\n"
13171+
13172+#ifdef CONFIG_PAX_REFCOUNT
13173+ "jno 0f\n"
13174+ LOCK_PREFIX "incl %0\n"
13175+ "int $4\n0:\n"
13176+ _ASM_EXTABLE(0b, 0b)
13177+#endif
13178+
13179+ : "+m" (v->counter));
13180+}
13181+
13182+/**
13183+ * atomic_dec_unchecked - decrement atomic variable
13184+ * @v: pointer of type atomic_unchecked_t
13185+ *
13186+ * Atomically decrements @v by 1.
13187+ */
13188+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
13189+{
13190+ asm volatile(LOCK_PREFIX "decl %0\n"
13191 : "+m" (v->counter));
13192 }
13193
13194@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
13195 {
13196 unsigned char c;
13197
13198- asm volatile(LOCK_PREFIX "decl %0; sete %1"
13199+ asm volatile(LOCK_PREFIX "decl %0\n"
13200+
13201+#ifdef CONFIG_PAX_REFCOUNT
13202+ "jno 0f\n"
13203+ LOCK_PREFIX "incl %0\n"
13204+ "int $4\n0:\n"
13205+ _ASM_EXTABLE(0b, 0b)
13206+#endif
13207+
13208+ "sete %1\n"
13209 : "+m" (v->counter), "=qm" (c)
13210 : : "memory");
13211 return c != 0;
13212@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
13213 {
13214 unsigned char c;
13215
13216- asm volatile(LOCK_PREFIX "incl %0; sete %1"
13217+ asm volatile(LOCK_PREFIX "incl %0\n"
13218+
13219+#ifdef CONFIG_PAX_REFCOUNT
13220+ "jno 0f\n"
13221+ LOCK_PREFIX "decl %0\n"
13222+ "int $4\n0:\n"
13223+ _ASM_EXTABLE(0b, 0b)
13224+#endif
13225+
13226+ "sete %1\n"
13227+ : "+m" (v->counter), "=qm" (c)
13228+ : : "memory");
13229+ return c != 0;
13230+}
13231+
13232+/**
13233+ * atomic_inc_and_test_unchecked - increment and test
13234+ * @v: pointer of type atomic_unchecked_t
13235+ *
13236+ * Atomically increments @v by 1
13237+ * and returns true if the result is zero, or false for all
13238+ * other cases.
13239+ */
13240+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
13241+{
13242+ unsigned char c;
13243+
13244+ asm volatile(LOCK_PREFIX "incl %0\n"
13245+ "sete %1\n"
13246 : "+m" (v->counter), "=qm" (c)
13247 : : "memory");
13248 return c != 0;
13249@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13250 {
13251 unsigned char c;
13252
13253- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
13254+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
13255+
13256+#ifdef CONFIG_PAX_REFCOUNT
13257+ "jno 0f\n"
13258+ LOCK_PREFIX "subl %2,%0\n"
13259+ "int $4\n0:\n"
13260+ _ASM_EXTABLE(0b, 0b)
13261+#endif
13262+
13263+ "sets %1\n"
13264 : "+m" (v->counter), "=qm" (c)
13265 : "ir" (i) : "memory");
13266 return c;
13267@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13268 */
13269 static inline int atomic_add_return(int i, atomic_t *v)
13270 {
13271+ return i + xadd_check_overflow(&v->counter, i);
13272+}
13273+
13274+/**
13275+ * atomic_add_return_unchecked - add integer and return
13276+ * @i: integer value to add
13277+ * @v: pointer of type atomic_unchecked_t
13278+ *
13279+ * Atomically adds @i to @v and returns @i + @v
13280+ */
13281+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
13282+{
13283 return i + xadd(&v->counter, i);
13284 }
13285
13286@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
13287 }
13288
13289 #define atomic_inc_return(v) (atomic_add_return(1, v))
13290+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
13291+{
13292+ return atomic_add_return_unchecked(1, v);
13293+}
13294 #define atomic_dec_return(v) (atomic_sub_return(1, v))
13295
13296 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13297@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13298 return cmpxchg(&v->counter, old, new);
13299 }
13300
13301+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
13302+{
13303+ return cmpxchg(&v->counter, old, new);
13304+}
13305+
13306 static inline int atomic_xchg(atomic_t *v, int new)
13307 {
13308 return xchg(&v->counter, new);
13309 }
13310
13311+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
13312+{
13313+ return xchg(&v->counter, new);
13314+}
13315+
13316 /**
13317 * __atomic_add_unless - add unless the number is already a given value
13318 * @v: pointer of type atomic_t
13319@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
13320 */
13321 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
13322 {
13323- int c, old;
13324+ int c, old, new;
13325 c = atomic_read(v);
13326 for (;;) {
13327- if (unlikely(c == (u)))
13328+ if (unlikely(c == u))
13329 break;
13330- old = atomic_cmpxchg((v), c, c + (a));
13331+
13332+ asm volatile("addl %2,%0\n"
13333+
13334+#ifdef CONFIG_PAX_REFCOUNT
13335+ "jno 0f\n"
13336+ "subl %2,%0\n"
13337+ "int $4\n0:\n"
13338+ _ASM_EXTABLE(0b, 0b)
13339+#endif
13340+
13341+ : "=r" (new)
13342+ : "0" (c), "ir" (a));
13343+
13344+ old = atomic_cmpxchg(v, c, new);
13345 if (likely(old == c))
13346 break;
13347 c = old;
13348@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
13349 }
13350
13351 /**
13352+ * atomic_inc_not_zero_hint - increment if not null
13353+ * @v: pointer of type atomic_t
13354+ * @hint: probable value of the atomic before the increment
13355+ *
13356+ * This version of atomic_inc_not_zero() gives a hint of probable
13357+ * value of the atomic. This helps processor to not read the memory
13358+ * before doing the atomic read/modify/write cycle, lowering
13359+ * number of bus transactions on some arches.
13360+ *
13361+ * Returns: 0 if increment was not done, 1 otherwise.
13362+ */
13363+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
13364+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
13365+{
13366+ int val, c = hint, new;
13367+
13368+ /* sanity test, should be removed by compiler if hint is a constant */
13369+ if (!hint)
13370+ return __atomic_add_unless(v, 1, 0);
13371+
13372+ do {
13373+ asm volatile("incl %0\n"
13374+
13375+#ifdef CONFIG_PAX_REFCOUNT
13376+ "jno 0f\n"
13377+ "decl %0\n"
13378+ "int $4\n0:\n"
13379+ _ASM_EXTABLE(0b, 0b)
13380+#endif
13381+
13382+ : "=r" (new)
13383+ : "0" (c));
13384+
13385+ val = atomic_cmpxchg(v, c, new);
13386+ if (val == c)
13387+ return 1;
13388+ c = val;
13389+ } while (c);
13390+
13391+ return 0;
13392+}
13393+
13394+/**
13395 * atomic_inc_short - increment of a short integer
13396 * @v: pointer to type int
13397 *
13398@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
13399 #endif
13400
13401 /* These are x86-specific, used by some header files */
13402-#define atomic_clear_mask(mask, addr) \
13403- asm volatile(LOCK_PREFIX "andl %0,%1" \
13404- : : "r" (~(mask)), "m" (*(addr)) : "memory")
13405+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
13406+{
13407+ asm volatile(LOCK_PREFIX "andl %1,%0"
13408+ : "+m" (v->counter)
13409+ : "r" (~(mask))
13410+ : "memory");
13411+}
13412
13413-#define atomic_set_mask(mask, addr) \
13414- asm volatile(LOCK_PREFIX "orl %0,%1" \
13415- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
13416- : "memory")
13417+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
13418+{
13419+ asm volatile(LOCK_PREFIX "andl %1,%0"
13420+ : "+m" (v->counter)
13421+ : "r" (~(mask))
13422+ : "memory");
13423+}
13424+
13425+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
13426+{
13427+ asm volatile(LOCK_PREFIX "orl %1,%0"
13428+ : "+m" (v->counter)
13429+ : "r" (mask)
13430+ : "memory");
13431+}
13432+
13433+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
13434+{
13435+ asm volatile(LOCK_PREFIX "orl %1,%0"
13436+ : "+m" (v->counter)
13437+ : "r" (mask)
13438+ : "memory");
13439+}
13440
13441 /* Atomic operations are already serializing on x86 */
13442 #define smp_mb__before_atomic_dec() barrier()
13443diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
13444index b154de7..aadebd8 100644
13445--- a/arch/x86/include/asm/atomic64_32.h
13446+++ b/arch/x86/include/asm/atomic64_32.h
13447@@ -12,6 +12,14 @@ typedef struct {
13448 u64 __aligned(8) counter;
13449 } atomic64_t;
13450
13451+#ifdef CONFIG_PAX_REFCOUNT
13452+typedef struct {
13453+ u64 __aligned(8) counter;
13454+} atomic64_unchecked_t;
13455+#else
13456+typedef atomic64_t atomic64_unchecked_t;
13457+#endif
13458+
13459 #define ATOMIC64_INIT(val) { (val) }
13460
13461 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
13462@@ -37,21 +45,31 @@ typedef struct {
13463 ATOMIC64_DECL_ONE(sym##_386)
13464
13465 ATOMIC64_DECL_ONE(add_386);
13466+ATOMIC64_DECL_ONE(add_unchecked_386);
13467 ATOMIC64_DECL_ONE(sub_386);
13468+ATOMIC64_DECL_ONE(sub_unchecked_386);
13469 ATOMIC64_DECL_ONE(inc_386);
13470+ATOMIC64_DECL_ONE(inc_unchecked_386);
13471 ATOMIC64_DECL_ONE(dec_386);
13472+ATOMIC64_DECL_ONE(dec_unchecked_386);
13473 #endif
13474
13475 #define alternative_atomic64(f, out, in...) \
13476 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
13477
13478 ATOMIC64_DECL(read);
13479+ATOMIC64_DECL(read_unchecked);
13480 ATOMIC64_DECL(set);
13481+ATOMIC64_DECL(set_unchecked);
13482 ATOMIC64_DECL(xchg);
13483 ATOMIC64_DECL(add_return);
13484+ATOMIC64_DECL(add_return_unchecked);
13485 ATOMIC64_DECL(sub_return);
13486+ATOMIC64_DECL(sub_return_unchecked);
13487 ATOMIC64_DECL(inc_return);
13488+ATOMIC64_DECL(inc_return_unchecked);
13489 ATOMIC64_DECL(dec_return);
13490+ATOMIC64_DECL(dec_return_unchecked);
13491 ATOMIC64_DECL(dec_if_positive);
13492 ATOMIC64_DECL(inc_not_zero);
13493 ATOMIC64_DECL(add_unless);
13494@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
13495 }
13496
13497 /**
13498+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
13499+ * @p: pointer to type atomic64_unchecked_t
13500+ * @o: expected value
13501+ * @n: new value
13502+ *
13503+ * Atomically sets @v to @n if it was equal to @o and returns
13504+ * the old value.
13505+ */
13506+
13507+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
13508+{
13509+ return cmpxchg64(&v->counter, o, n);
13510+}
13511+
13512+/**
13513 * atomic64_xchg - xchg atomic64 variable
13514 * @v: pointer to type atomic64_t
13515 * @n: value to assign
13516@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
13517 }
13518
13519 /**
13520+ * atomic64_set_unchecked - set atomic64 variable
13521+ * @v: pointer to type atomic64_unchecked_t
13522+ * @n: value to assign
13523+ *
13524+ * Atomically sets the value of @v to @n.
13525+ */
13526+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
13527+{
13528+ unsigned high = (unsigned)(i >> 32);
13529+ unsigned low = (unsigned)i;
13530+ alternative_atomic64(set, /* no output */,
13531+ "S" (v), "b" (low), "c" (high)
13532+ : "eax", "edx", "memory");
13533+}
13534+
13535+/**
13536 * atomic64_read - read atomic64 variable
13537 * @v: pointer to type atomic64_t
13538 *
13539@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
13540 }
13541
13542 /**
13543+ * atomic64_read_unchecked - read atomic64 variable
13544+ * @v: pointer to type atomic64_unchecked_t
13545+ *
13546+ * Atomically reads the value of @v and returns it.
13547+ */
13548+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
13549+{
13550+ long long r;
13551+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
13552+ return r;
13553+ }
13554+
13555+/**
13556 * atomic64_add_return - add and return
13557 * @i: integer value to add
13558 * @v: pointer to type atomic64_t
13559@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
13560 return i;
13561 }
13562
13563+/**
13564+ * atomic64_add_return_unchecked - add and return
13565+ * @i: integer value to add
13566+ * @v: pointer to type atomic64_unchecked_t
13567+ *
13568+ * Atomically adds @i to @v and returns @i + *@v
13569+ */
13570+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
13571+{
13572+ alternative_atomic64(add_return_unchecked,
13573+ ASM_OUTPUT2("+A" (i), "+c" (v)),
13574+ ASM_NO_INPUT_CLOBBER("memory"));
13575+ return i;
13576+}
13577+
13578 /*
13579 * Other variants with different arithmetic operators:
13580 */
13581@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
13582 return a;
13583 }
13584
13585+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13586+{
13587+ long long a;
13588+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
13589+ "S" (v) : "memory", "ecx");
13590+ return a;
13591+}
13592+
13593 static inline long long atomic64_dec_return(atomic64_t *v)
13594 {
13595 long long a;
13596@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
13597 }
13598
13599 /**
13600+ * atomic64_add_unchecked - add integer to atomic64 variable
13601+ * @i: integer value to add
13602+ * @v: pointer to type atomic64_unchecked_t
13603+ *
13604+ * Atomically adds @i to @v.
13605+ */
13606+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
13607+{
13608+ __alternative_atomic64(add_unchecked, add_return_unchecked,
13609+ ASM_OUTPUT2("+A" (i), "+c" (v)),
13610+ ASM_NO_INPUT_CLOBBER("memory"));
13611+ return i;
13612+}
13613+
13614+/**
13615 * atomic64_sub - subtract the atomic64 variable
13616 * @i: integer value to subtract
13617 * @v: pointer to type atomic64_t
13618diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
13619index 0e1cbfc..5623683 100644
13620--- a/arch/x86/include/asm/atomic64_64.h
13621+++ b/arch/x86/include/asm/atomic64_64.h
13622@@ -18,7 +18,19 @@
13623 */
13624 static inline long atomic64_read(const atomic64_t *v)
13625 {
13626- return (*(volatile long *)&(v)->counter);
13627+ return (*(volatile const long *)&(v)->counter);
13628+}
13629+
13630+/**
13631+ * atomic64_read_unchecked - read atomic64 variable
13632+ * @v: pointer of type atomic64_unchecked_t
13633+ *
13634+ * Atomically reads the value of @v.
13635+ * Doesn't imply a read memory barrier.
13636+ */
13637+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
13638+{
13639+ return (*(volatile const long *)&(v)->counter);
13640 }
13641
13642 /**
13643@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
13644 }
13645
13646 /**
13647+ * atomic64_set_unchecked - set atomic64 variable
13648+ * @v: pointer to type atomic64_unchecked_t
13649+ * @i: required value
13650+ *
13651+ * Atomically sets the value of @v to @i.
13652+ */
13653+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
13654+{
13655+ v->counter = i;
13656+}
13657+
13658+/**
13659 * atomic64_add - add integer to atomic64 variable
13660 * @i: integer value to add
13661 * @v: pointer to type atomic64_t
13662@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
13663 */
13664 static inline void atomic64_add(long i, atomic64_t *v)
13665 {
13666+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
13667+
13668+#ifdef CONFIG_PAX_REFCOUNT
13669+ "jno 0f\n"
13670+ LOCK_PREFIX "subq %1,%0\n"
13671+ "int $4\n0:\n"
13672+ _ASM_EXTABLE(0b, 0b)
13673+#endif
13674+
13675+ : "=m" (v->counter)
13676+ : "er" (i), "m" (v->counter));
13677+}
13678+
13679+/**
13680+ * atomic64_add_unchecked - add integer to atomic64 variable
13681+ * @i: integer value to add
13682+ * @v: pointer to type atomic64_unchecked_t
13683+ *
13684+ * Atomically adds @i to @v.
13685+ */
13686+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
13687+{
13688 asm volatile(LOCK_PREFIX "addq %1,%0"
13689 : "=m" (v->counter)
13690 : "er" (i), "m" (v->counter));
13691@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
13692 */
13693 static inline void atomic64_sub(long i, atomic64_t *v)
13694 {
13695- asm volatile(LOCK_PREFIX "subq %1,%0"
13696+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
13697+
13698+#ifdef CONFIG_PAX_REFCOUNT
13699+ "jno 0f\n"
13700+ LOCK_PREFIX "addq %1,%0\n"
13701+ "int $4\n0:\n"
13702+ _ASM_EXTABLE(0b, 0b)
13703+#endif
13704+
13705+ : "=m" (v->counter)
13706+ : "er" (i), "m" (v->counter));
13707+}
13708+
13709+/**
13710+ * atomic64_sub_unchecked - subtract the atomic64 variable
13711+ * @i: integer value to subtract
13712+ * @v: pointer to type atomic64_unchecked_t
13713+ *
13714+ * Atomically subtracts @i from @v.
13715+ */
13716+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
13717+{
13718+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
13719 : "=m" (v->counter)
13720 : "er" (i), "m" (v->counter));
13721 }
13722@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
13723 {
13724 unsigned char c;
13725
13726- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
13727+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
13728+
13729+#ifdef CONFIG_PAX_REFCOUNT
13730+ "jno 0f\n"
13731+ LOCK_PREFIX "addq %2,%0\n"
13732+ "int $4\n0:\n"
13733+ _ASM_EXTABLE(0b, 0b)
13734+#endif
13735+
13736+ "sete %1\n"
13737 : "=m" (v->counter), "=qm" (c)
13738 : "er" (i), "m" (v->counter) : "memory");
13739 return c;
13740@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
13741 */
13742 static inline void atomic64_inc(atomic64_t *v)
13743 {
13744+ asm volatile(LOCK_PREFIX "incq %0\n"
13745+
13746+#ifdef CONFIG_PAX_REFCOUNT
13747+ "jno 0f\n"
13748+ LOCK_PREFIX "decq %0\n"
13749+ "int $4\n0:\n"
13750+ _ASM_EXTABLE(0b, 0b)
13751+#endif
13752+
13753+ : "=m" (v->counter)
13754+ : "m" (v->counter));
13755+}
13756+
13757+/**
13758+ * atomic64_inc_unchecked - increment atomic64 variable
13759+ * @v: pointer to type atomic64_unchecked_t
13760+ *
13761+ * Atomically increments @v by 1.
13762+ */
13763+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
13764+{
13765 asm volatile(LOCK_PREFIX "incq %0"
13766 : "=m" (v->counter)
13767 : "m" (v->counter));
13768@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
13769 */
13770 static inline void atomic64_dec(atomic64_t *v)
13771 {
13772- asm volatile(LOCK_PREFIX "decq %0"
13773+ asm volatile(LOCK_PREFIX "decq %0\n"
13774+
13775+#ifdef CONFIG_PAX_REFCOUNT
13776+ "jno 0f\n"
13777+ LOCK_PREFIX "incq %0\n"
13778+ "int $4\n0:\n"
13779+ _ASM_EXTABLE(0b, 0b)
13780+#endif
13781+
13782+ : "=m" (v->counter)
13783+ : "m" (v->counter));
13784+}
13785+
13786+/**
13787+ * atomic64_dec_unchecked - decrement atomic64 variable
13788+ * @v: pointer to type atomic64_t
13789+ *
13790+ * Atomically decrements @v by 1.
13791+ */
13792+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
13793+{
13794+ asm volatile(LOCK_PREFIX "decq %0\n"
13795 : "=m" (v->counter)
13796 : "m" (v->counter));
13797 }
13798@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
13799 {
13800 unsigned char c;
13801
13802- asm volatile(LOCK_PREFIX "decq %0; sete %1"
13803+ asm volatile(LOCK_PREFIX "decq %0\n"
13804+
13805+#ifdef CONFIG_PAX_REFCOUNT
13806+ "jno 0f\n"
13807+ LOCK_PREFIX "incq %0\n"
13808+ "int $4\n0:\n"
13809+ _ASM_EXTABLE(0b, 0b)
13810+#endif
13811+
13812+ "sete %1\n"
13813 : "=m" (v->counter), "=qm" (c)
13814 : "m" (v->counter) : "memory");
13815 return c != 0;
13816@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
13817 {
13818 unsigned char c;
13819
13820- asm volatile(LOCK_PREFIX "incq %0; sete %1"
13821+ asm volatile(LOCK_PREFIX "incq %0\n"
13822+
13823+#ifdef CONFIG_PAX_REFCOUNT
13824+ "jno 0f\n"
13825+ LOCK_PREFIX "decq %0\n"
13826+ "int $4\n0:\n"
13827+ _ASM_EXTABLE(0b, 0b)
13828+#endif
13829+
13830+ "sete %1\n"
13831 : "=m" (v->counter), "=qm" (c)
13832 : "m" (v->counter) : "memory");
13833 return c != 0;
13834@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13835 {
13836 unsigned char c;
13837
13838- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
13839+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
13840+
13841+#ifdef CONFIG_PAX_REFCOUNT
13842+ "jno 0f\n"
13843+ LOCK_PREFIX "subq %2,%0\n"
13844+ "int $4\n0:\n"
13845+ _ASM_EXTABLE(0b, 0b)
13846+#endif
13847+
13848+ "sets %1\n"
13849 : "=m" (v->counter), "=qm" (c)
13850 : "er" (i), "m" (v->counter) : "memory");
13851 return c;
13852@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13853 */
13854 static inline long atomic64_add_return(long i, atomic64_t *v)
13855 {
13856+ return i + xadd_check_overflow(&v->counter, i);
13857+}
13858+
13859+/**
13860+ * atomic64_add_return_unchecked - add and return
13861+ * @i: integer value to add
13862+ * @v: pointer to type atomic64_unchecked_t
13863+ *
13864+ * Atomically adds @i to @v and returns @i + @v
13865+ */
13866+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
13867+{
13868 return i + xadd(&v->counter, i);
13869 }
13870
13871@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
13872 }
13873
13874 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
13875+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13876+{
13877+ return atomic64_add_return_unchecked(1, v);
13878+}
13879 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
13880
13881 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13882@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13883 return cmpxchg(&v->counter, old, new);
13884 }
13885
13886+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
13887+{
13888+ return cmpxchg(&v->counter, old, new);
13889+}
13890+
13891 static inline long atomic64_xchg(atomic64_t *v, long new)
13892 {
13893 return xchg(&v->counter, new);
13894@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
13895 */
13896 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
13897 {
13898- long c, old;
13899+ long c, old, new;
13900 c = atomic64_read(v);
13901 for (;;) {
13902- if (unlikely(c == (u)))
13903+ if (unlikely(c == u))
13904 break;
13905- old = atomic64_cmpxchg((v), c, c + (a));
13906+
13907+ asm volatile("add %2,%0\n"
13908+
13909+#ifdef CONFIG_PAX_REFCOUNT
13910+ "jno 0f\n"
13911+ "sub %2,%0\n"
13912+ "int $4\n0:\n"
13913+ _ASM_EXTABLE(0b, 0b)
13914+#endif
13915+
13916+ : "=r" (new)
13917+ : "0" (c), "ir" (a));
13918+
13919+ old = atomic64_cmpxchg(v, c, new);
13920 if (likely(old == c))
13921 break;
13922 c = old;
13923 }
13924- return c != (u);
13925+ return c != u;
13926 }
13927
13928 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13929diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
13930index 6dfd019..28e188d 100644
13931--- a/arch/x86/include/asm/bitops.h
13932+++ b/arch/x86/include/asm/bitops.h
13933@@ -40,7 +40,7 @@
13934 * a mask operation on a byte.
13935 */
13936 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
13937-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
13938+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
13939 #define CONST_MASK(nr) (1 << ((nr) & 7))
13940
13941 /**
13942@@ -486,7 +486,7 @@ static inline int fls(int x)
13943 * at position 64.
13944 */
13945 #ifdef CONFIG_X86_64
13946-static __always_inline int fls64(__u64 x)
13947+static __always_inline long fls64(__u64 x)
13948 {
13949 int bitpos = -1;
13950 /*
13951diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
13952index 4fa687a..60f2d39 100644
13953--- a/arch/x86/include/asm/boot.h
13954+++ b/arch/x86/include/asm/boot.h
13955@@ -6,10 +6,15 @@
13956 #include <uapi/asm/boot.h>
13957
13958 /* Physical address where kernel should be loaded. */
13959-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13960+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13961 + (CONFIG_PHYSICAL_ALIGN - 1)) \
13962 & ~(CONFIG_PHYSICAL_ALIGN - 1))
13963
13964+#ifndef __ASSEMBLY__
13965+extern unsigned char __LOAD_PHYSICAL_ADDR[];
13966+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
13967+#endif
13968+
13969 /* Minimum kernel alignment, as a power of two */
13970 #ifdef CONFIG_X86_64
13971 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
13972diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
13973index 48f99f1..d78ebf9 100644
13974--- a/arch/x86/include/asm/cache.h
13975+++ b/arch/x86/include/asm/cache.h
13976@@ -5,12 +5,13 @@
13977
13978 /* L1 cache line size */
13979 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13980-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13981+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13982
13983 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13984+#define __read_only __attribute__((__section__(".data..read_only")))
13985
13986 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13987-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13988+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13989
13990 #ifdef CONFIG_X86_VSMP
13991 #ifdef CONFIG_SMP
13992diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13993index 9863ee3..4a1f8e1 100644
13994--- a/arch/x86/include/asm/cacheflush.h
13995+++ b/arch/x86/include/asm/cacheflush.h
13996@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13997 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13998
13999 if (pg_flags == _PGMT_DEFAULT)
14000- return -1;
14001+ return ~0UL;
14002 else if (pg_flags == _PGMT_WC)
14003 return _PAGE_CACHE_WC;
14004 else if (pg_flags == _PGMT_UC_MINUS)
14005diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
14006index 46fc474..b02b0f9 100644
14007--- a/arch/x86/include/asm/checksum_32.h
14008+++ b/arch/x86/include/asm/checksum_32.h
14009@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
14010 int len, __wsum sum,
14011 int *src_err_ptr, int *dst_err_ptr);
14012
14013+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
14014+ int len, __wsum sum,
14015+ int *src_err_ptr, int *dst_err_ptr);
14016+
14017+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
14018+ int len, __wsum sum,
14019+ int *src_err_ptr, int *dst_err_ptr);
14020+
14021 /*
14022 * Note: when you get a NULL pointer exception here this means someone
14023 * passed in an incorrect kernel address to one of these functions.
14024@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
14025 int *err_ptr)
14026 {
14027 might_sleep();
14028- return csum_partial_copy_generic((__force void *)src, dst,
14029+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
14030 len, sum, err_ptr, NULL);
14031 }
14032
14033@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
14034 {
14035 might_sleep();
14036 if (access_ok(VERIFY_WRITE, dst, len))
14037- return csum_partial_copy_generic(src, (__force void *)dst,
14038+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
14039 len, sum, NULL, err_ptr);
14040
14041 if (len)
14042diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
14043index d47786a..ce1b05d 100644
14044--- a/arch/x86/include/asm/cmpxchg.h
14045+++ b/arch/x86/include/asm/cmpxchg.h
14046@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
14047 __compiletime_error("Bad argument size for cmpxchg");
14048 extern void __xadd_wrong_size(void)
14049 __compiletime_error("Bad argument size for xadd");
14050+extern void __xadd_check_overflow_wrong_size(void)
14051+ __compiletime_error("Bad argument size for xadd_check_overflow");
14052 extern void __add_wrong_size(void)
14053 __compiletime_error("Bad argument size for add");
14054+extern void __add_check_overflow_wrong_size(void)
14055+ __compiletime_error("Bad argument size for add_check_overflow");
14056
14057 /*
14058 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
14059@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
14060 __ret; \
14061 })
14062
14063+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
14064+ ({ \
14065+ __typeof__ (*(ptr)) __ret = (arg); \
14066+ switch (sizeof(*(ptr))) { \
14067+ case __X86_CASE_L: \
14068+ asm volatile (lock #op "l %0, %1\n" \
14069+ "jno 0f\n" \
14070+ "mov %0,%1\n" \
14071+ "int $4\n0:\n" \
14072+ _ASM_EXTABLE(0b, 0b) \
14073+ : "+r" (__ret), "+m" (*(ptr)) \
14074+ : : "memory", "cc"); \
14075+ break; \
14076+ case __X86_CASE_Q: \
14077+ asm volatile (lock #op "q %q0, %1\n" \
14078+ "jno 0f\n" \
14079+ "mov %0,%1\n" \
14080+ "int $4\n0:\n" \
14081+ _ASM_EXTABLE(0b, 0b) \
14082+ : "+r" (__ret), "+m" (*(ptr)) \
14083+ : : "memory", "cc"); \
14084+ break; \
14085+ default: \
14086+ __ ## op ## _check_overflow_wrong_size(); \
14087+ } \
14088+ __ret; \
14089+ })
14090+
14091 /*
14092 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
14093 * Since this is generally used to protect other memory information, we
14094@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
14095 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
14096 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
14097
14098+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
14099+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
14100+
14101 #define __add(ptr, inc, lock) \
14102 ({ \
14103 __typeof__ (*(ptr)) __ret = (inc); \
14104diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
14105index 59c6c40..5e0b22c 100644
14106--- a/arch/x86/include/asm/compat.h
14107+++ b/arch/x86/include/asm/compat.h
14108@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
14109 typedef u32 compat_uint_t;
14110 typedef u32 compat_ulong_t;
14111 typedef u64 __attribute__((aligned(4))) compat_u64;
14112-typedef u32 compat_uptr_t;
14113+typedef u32 __user compat_uptr_t;
14114
14115 struct compat_timespec {
14116 compat_time_t tv_sec;
14117diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
14118index e99ac27..10d834e 100644
14119--- a/arch/x86/include/asm/cpufeature.h
14120+++ b/arch/x86/include/asm/cpufeature.h
14121@@ -203,7 +203,7 @@
14122 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
14123 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
14124 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
14125-
14126+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
14127
14128 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
14129 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
14130@@ -211,7 +211,7 @@
14131 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
14132 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
14133 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
14134-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
14135+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
14136 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
14137 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
14138 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
14139@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
14140 #undef cpu_has_centaur_mcr
14141 #define cpu_has_centaur_mcr 0
14142
14143+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
14144 #endif /* CONFIG_X86_64 */
14145
14146 #if __GNUC__ >= 4
14147@@ -394,7 +395,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
14148 ".section .discard,\"aw\",@progbits\n"
14149 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
14150 ".previous\n"
14151- ".section .altinstr_replacement,\"ax\"\n"
14152+ ".section .altinstr_replacement,\"a\"\n"
14153 "3: movb $1,%0\n"
14154 "4:\n"
14155 ".previous\n"
14156diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
14157index 8bf1c06..b6ae785 100644
14158--- a/arch/x86/include/asm/desc.h
14159+++ b/arch/x86/include/asm/desc.h
14160@@ -4,6 +4,7 @@
14161 #include <asm/desc_defs.h>
14162 #include <asm/ldt.h>
14163 #include <asm/mmu.h>
14164+#include <asm/pgtable.h>
14165
14166 #include <linux/smp.h>
14167 #include <linux/percpu.h>
14168@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14169
14170 desc->type = (info->read_exec_only ^ 1) << 1;
14171 desc->type |= info->contents << 2;
14172+ desc->type |= info->seg_not_present ^ 1;
14173
14174 desc->s = 1;
14175 desc->dpl = 0x3;
14176@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14177 }
14178
14179 extern struct desc_ptr idt_descr;
14180-extern gate_desc idt_table[];
14181 extern struct desc_ptr nmi_idt_descr;
14182-extern gate_desc nmi_idt_table[];
14183-
14184-struct gdt_page {
14185- struct desc_struct gdt[GDT_ENTRIES];
14186-} __attribute__((aligned(PAGE_SIZE)));
14187-
14188-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
14189+extern gate_desc idt_table[256];
14190+extern gate_desc nmi_idt_table[256];
14191
14192+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
14193 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
14194 {
14195- return per_cpu(gdt_page, cpu).gdt;
14196+ return cpu_gdt_table[cpu];
14197 }
14198
14199 #ifdef CONFIG_X86_64
14200@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
14201 unsigned long base, unsigned dpl, unsigned flags,
14202 unsigned short seg)
14203 {
14204- gate->a = (seg << 16) | (base & 0xffff);
14205- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
14206+ gate->gate.offset_low = base;
14207+ gate->gate.seg = seg;
14208+ gate->gate.reserved = 0;
14209+ gate->gate.type = type;
14210+ gate->gate.s = 0;
14211+ gate->gate.dpl = dpl;
14212+ gate->gate.p = 1;
14213+ gate->gate.offset_high = base >> 16;
14214 }
14215
14216 #endif
14217@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
14218
14219 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
14220 {
14221+ pax_open_kernel();
14222 memcpy(&idt[entry], gate, sizeof(*gate));
14223+ pax_close_kernel();
14224 }
14225
14226 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
14227 {
14228+ pax_open_kernel();
14229 memcpy(&ldt[entry], desc, 8);
14230+ pax_close_kernel();
14231 }
14232
14233 static inline void
14234@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
14235 default: size = sizeof(*gdt); break;
14236 }
14237
14238+ pax_open_kernel();
14239 memcpy(&gdt[entry], desc, size);
14240+ pax_close_kernel();
14241 }
14242
14243 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
14244@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
14245
14246 static inline void native_load_tr_desc(void)
14247 {
14248+ pax_open_kernel();
14249 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
14250+ pax_close_kernel();
14251 }
14252
14253 static inline void native_load_gdt(const struct desc_ptr *dtr)
14254@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
14255 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
14256 unsigned int i;
14257
14258+ pax_open_kernel();
14259 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
14260 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
14261+ pax_close_kernel();
14262 }
14263
14264 #define _LDT_empty(info) \
14265@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
14266 preempt_enable();
14267 }
14268
14269-static inline unsigned long get_desc_base(const struct desc_struct *desc)
14270+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
14271 {
14272 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
14273 }
14274@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
14275 }
14276
14277 #ifdef CONFIG_X86_64
14278-static inline void set_nmi_gate(int gate, void *addr)
14279+static inline void set_nmi_gate(int gate, const void *addr)
14280 {
14281 gate_desc s;
14282
14283@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
14284 }
14285 #endif
14286
14287-static inline void _set_gate(int gate, unsigned type, void *addr,
14288+static inline void _set_gate(int gate, unsigned type, const void *addr,
14289 unsigned dpl, unsigned ist, unsigned seg)
14290 {
14291 gate_desc s;
14292@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
14293 * Pentium F0 0F bugfix can have resulted in the mapped
14294 * IDT being write-protected.
14295 */
14296-static inline void set_intr_gate(unsigned int n, void *addr)
14297+static inline void set_intr_gate(unsigned int n, const void *addr)
14298 {
14299 BUG_ON((unsigned)n > 0xFF);
14300 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
14301@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
14302 /*
14303 * This routine sets up an interrupt gate at directory privilege level 3.
14304 */
14305-static inline void set_system_intr_gate(unsigned int n, void *addr)
14306+static inline void set_system_intr_gate(unsigned int n, const void *addr)
14307 {
14308 BUG_ON((unsigned)n > 0xFF);
14309 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
14310 }
14311
14312-static inline void set_system_trap_gate(unsigned int n, void *addr)
14313+static inline void set_system_trap_gate(unsigned int n, const void *addr)
14314 {
14315 BUG_ON((unsigned)n > 0xFF);
14316 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
14317 }
14318
14319-static inline void set_trap_gate(unsigned int n, void *addr)
14320+static inline void set_trap_gate(unsigned int n, const void *addr)
14321 {
14322 BUG_ON((unsigned)n > 0xFF);
14323 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
14324@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
14325 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
14326 {
14327 BUG_ON((unsigned)n > 0xFF);
14328- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
14329+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
14330 }
14331
14332-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
14333+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
14334 {
14335 BUG_ON((unsigned)n > 0xFF);
14336 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
14337 }
14338
14339-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
14340+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
14341 {
14342 BUG_ON((unsigned)n > 0xFF);
14343 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
14344 }
14345
14346+#ifdef CONFIG_X86_32
14347+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
14348+{
14349+ struct desc_struct d;
14350+
14351+ if (likely(limit))
14352+ limit = (limit - 1UL) >> PAGE_SHIFT;
14353+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
14354+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
14355+}
14356+#endif
14357+
14358 #endif /* _ASM_X86_DESC_H */
14359diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
14360index 278441f..b95a174 100644
14361--- a/arch/x86/include/asm/desc_defs.h
14362+++ b/arch/x86/include/asm/desc_defs.h
14363@@ -31,6 +31,12 @@ struct desc_struct {
14364 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
14365 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
14366 };
14367+ struct {
14368+ u16 offset_low;
14369+ u16 seg;
14370+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
14371+ unsigned offset_high: 16;
14372+ } gate;
14373 };
14374 } __attribute__((packed));
14375
14376diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
14377index ced283a..ffe04cc 100644
14378--- a/arch/x86/include/asm/div64.h
14379+++ b/arch/x86/include/asm/div64.h
14380@@ -39,7 +39,7 @@
14381 __mod; \
14382 })
14383
14384-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
14385+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
14386 {
14387 union {
14388 u64 v64;
14389diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
14390index 9c999c1..3860cb8 100644
14391--- a/arch/x86/include/asm/elf.h
14392+++ b/arch/x86/include/asm/elf.h
14393@@ -243,7 +243,25 @@ extern int force_personality32;
14394 the loader. We need to make sure that it is out of the way of the program
14395 that it will "exec", and that there is sufficient room for the brk. */
14396
14397+#ifdef CONFIG_PAX_SEGMEXEC
14398+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
14399+#else
14400 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
14401+#endif
14402+
14403+#ifdef CONFIG_PAX_ASLR
14404+#ifdef CONFIG_X86_32
14405+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
14406+
14407+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
14408+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
14409+#else
14410+#define PAX_ELF_ET_DYN_BASE 0x400000UL
14411+
14412+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
14413+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
14414+#endif
14415+#endif
14416
14417 /* This yields a mask that user programs can use to figure out what
14418 instruction set this CPU supports. This could be done in user space,
14419@@ -296,16 +314,12 @@ do { \
14420
14421 #define ARCH_DLINFO \
14422 do { \
14423- if (vdso_enabled) \
14424- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
14425- (unsigned long)current->mm->context.vdso); \
14426+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
14427 } while (0)
14428
14429 #define ARCH_DLINFO_X32 \
14430 do { \
14431- if (vdso_enabled) \
14432- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
14433- (unsigned long)current->mm->context.vdso); \
14434+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
14435 } while (0)
14436
14437 #define AT_SYSINFO 32
14438@@ -320,7 +334,7 @@ else \
14439
14440 #endif /* !CONFIG_X86_32 */
14441
14442-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
14443+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
14444
14445 #define VDSO_ENTRY \
14446 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
14447@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
14448 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
14449 #define compat_arch_setup_additional_pages syscall32_setup_pages
14450
14451-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
14452-#define arch_randomize_brk arch_randomize_brk
14453-
14454 /*
14455 * True on X86_32 or when emulating IA32 on X86_64
14456 */
14457diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
14458index 75ce3f4..882e801 100644
14459--- a/arch/x86/include/asm/emergency-restart.h
14460+++ b/arch/x86/include/asm/emergency-restart.h
14461@@ -13,6 +13,6 @@ enum reboot_type {
14462
14463 extern enum reboot_type reboot_type;
14464
14465-extern void machine_emergency_restart(void);
14466+extern void machine_emergency_restart(void) __noreturn;
14467
14468 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
14469diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
14470index e25cc33..7d3ec01 100644
14471--- a/arch/x86/include/asm/fpu-internal.h
14472+++ b/arch/x86/include/asm/fpu-internal.h
14473@@ -126,8 +126,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
14474 #define user_insn(insn, output, input...) \
14475 ({ \
14476 int err; \
14477+ pax_open_userland(); \
14478 asm volatile(ASM_STAC "\n" \
14479- "1:" #insn "\n\t" \
14480+ "1:" \
14481+ __copyuser_seg \
14482+ #insn "\n\t" \
14483 "2: " ASM_CLAC "\n" \
14484 ".section .fixup,\"ax\"\n" \
14485 "3: movl $-1,%[err]\n" \
14486@@ -136,6 +139,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
14487 _ASM_EXTABLE(1b, 3b) \
14488 : [err] "=r" (err), output \
14489 : "0"(0), input); \
14490+ pax_close_userland(); \
14491 err; \
14492 })
14493
14494@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
14495 "emms\n\t" /* clear stack tags */
14496 "fildl %P[addr]", /* set F?P to defined value */
14497 X86_FEATURE_FXSAVE_LEAK,
14498- [addr] "m" (tsk->thread.fpu.has_fpu));
14499+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
14500
14501 return fpu_restore_checking(&tsk->thread.fpu);
14502 }
14503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
14504index be27ba1..04a8801 100644
14505--- a/arch/x86/include/asm/futex.h
14506+++ b/arch/x86/include/asm/futex.h
14507@@ -12,6 +12,7 @@
14508 #include <asm/smap.h>
14509
14510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
14511+ typecheck(u32 __user *, uaddr); \
14512 asm volatile("\t" ASM_STAC "\n" \
14513 "1:\t" insn "\n" \
14514 "2:\t" ASM_CLAC "\n" \
14515@@ -20,15 +21,16 @@
14516 "\tjmp\t2b\n" \
14517 "\t.previous\n" \
14518 _ASM_EXTABLE(1b, 3b) \
14519- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
14520+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
14521 : "i" (-EFAULT), "0" (oparg), "1" (0))
14522
14523 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
14524+ typecheck(u32 __user *, uaddr); \
14525 asm volatile("\t" ASM_STAC "\n" \
14526 "1:\tmovl %2, %0\n" \
14527 "\tmovl\t%0, %3\n" \
14528 "\t" insn "\n" \
14529- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
14530+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
14531 "\tjnz\t1b\n" \
14532 "3:\t" ASM_CLAC "\n" \
14533 "\t.section .fixup,\"ax\"\n" \
14534@@ -38,7 +40,7 @@
14535 _ASM_EXTABLE(1b, 4b) \
14536 _ASM_EXTABLE(2b, 4b) \
14537 : "=&a" (oldval), "=&r" (ret), \
14538- "+m" (*uaddr), "=&r" (tem) \
14539+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
14540 : "r" (oparg), "i" (-EFAULT), "1" (0))
14541
14542 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14543@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14544
14545 pagefault_disable();
14546
14547+ pax_open_userland();
14548 switch (op) {
14549 case FUTEX_OP_SET:
14550- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
14551+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
14552 break;
14553 case FUTEX_OP_ADD:
14554- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
14555+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
14556 uaddr, oparg);
14557 break;
14558 case FUTEX_OP_OR:
14559@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14560 default:
14561 ret = -ENOSYS;
14562 }
14563+ pax_close_userland();
14564
14565 pagefault_enable();
14566
14567@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
14568 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
14569 return -EFAULT;
14570
14571+ pax_open_userland();
14572 asm volatile("\t" ASM_STAC "\n"
14573- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
14574+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
14575 "2:\t" ASM_CLAC "\n"
14576 "\t.section .fixup, \"ax\"\n"
14577 "3:\tmov %3, %0\n"
14578 "\tjmp 2b\n"
14579 "\t.previous\n"
14580 _ASM_EXTABLE(1b, 3b)
14581- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
14582+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
14583 : "i" (-EFAULT), "r" (newval), "1" (oldval)
14584 : "memory"
14585 );
14586+ pax_close_userland();
14587
14588 *uval = oldval;
14589 return ret;
14590diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
14591index 1da97ef..9c2ebff 100644
14592--- a/arch/x86/include/asm/hw_irq.h
14593+++ b/arch/x86/include/asm/hw_irq.h
14594@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
14595 extern void enable_IO_APIC(void);
14596
14597 /* Statistics */
14598-extern atomic_t irq_err_count;
14599-extern atomic_t irq_mis_count;
14600+extern atomic_unchecked_t irq_err_count;
14601+extern atomic_unchecked_t irq_mis_count;
14602
14603 /* EISA */
14604 extern void eisa_set_level_irq(unsigned int irq);
14605diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
14606index a203659..9889f1c 100644
14607--- a/arch/x86/include/asm/i8259.h
14608+++ b/arch/x86/include/asm/i8259.h
14609@@ -62,7 +62,7 @@ struct legacy_pic {
14610 void (*init)(int auto_eoi);
14611 int (*irq_pending)(unsigned int irq);
14612 void (*make_irq)(unsigned int irq);
14613-};
14614+} __do_const;
14615
14616 extern struct legacy_pic *legacy_pic;
14617 extern struct legacy_pic null_legacy_pic;
14618diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
14619index d8e8eef..1765f78 100644
14620--- a/arch/x86/include/asm/io.h
14621+++ b/arch/x86/include/asm/io.h
14622@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
14623 "m" (*(volatile type __force *)addr) barrier); }
14624
14625 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
14626-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
14627-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
14628+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
14629+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
14630
14631 build_mmio_read(__readb, "b", unsigned char, "=q", )
14632-build_mmio_read(__readw, "w", unsigned short, "=r", )
14633-build_mmio_read(__readl, "l", unsigned int, "=r", )
14634+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
14635+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
14636
14637 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
14638 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
14639@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
14640 return ioremap_nocache(offset, size);
14641 }
14642
14643-extern void iounmap(volatile void __iomem *addr);
14644+extern void iounmap(const volatile void __iomem *addr);
14645
14646 extern void set_iounmap_nonlazy(void);
14647
14648@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
14649
14650 #include <linux/vmalloc.h>
14651
14652+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
14653+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
14654+{
14655+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
14656+}
14657+
14658+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
14659+{
14660+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
14661+}
14662+
14663 /*
14664 * Convert a virtual cached pointer to an uncached pointer
14665 */
14666diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
14667index bba3cf8..06bc8da 100644
14668--- a/arch/x86/include/asm/irqflags.h
14669+++ b/arch/x86/include/asm/irqflags.h
14670@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
14671 sti; \
14672 sysexit
14673
14674+#define GET_CR0_INTO_RDI mov %cr0, %rdi
14675+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
14676+#define GET_CR3_INTO_RDI mov %cr3, %rdi
14677+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
14678+
14679 #else
14680 #define INTERRUPT_RETURN iret
14681 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
14682diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
14683index 5a6d287..f815789 100644
14684--- a/arch/x86/include/asm/kprobes.h
14685+++ b/arch/x86/include/asm/kprobes.h
14686@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
14687 #define RELATIVEJUMP_SIZE 5
14688 #define RELATIVECALL_OPCODE 0xe8
14689 #define RELATIVE_ADDR_SIZE 4
14690-#define MAX_STACK_SIZE 64
14691-#define MIN_STACK_SIZE(ADDR) \
14692- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
14693- THREAD_SIZE - (unsigned long)(ADDR))) \
14694- ? (MAX_STACK_SIZE) \
14695- : (((unsigned long)current_thread_info()) + \
14696- THREAD_SIZE - (unsigned long)(ADDR)))
14697+#define MAX_STACK_SIZE 64UL
14698+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
14699
14700 #define flush_insn_slot(p) do { } while (0)
14701
14702diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
14703index 2d89e39..baee879 100644
14704--- a/arch/x86/include/asm/local.h
14705+++ b/arch/x86/include/asm/local.h
14706@@ -10,33 +10,97 @@ typedef struct {
14707 atomic_long_t a;
14708 } local_t;
14709
14710+typedef struct {
14711+ atomic_long_unchecked_t a;
14712+} local_unchecked_t;
14713+
14714 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14715
14716 #define local_read(l) atomic_long_read(&(l)->a)
14717+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
14718 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
14719+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
14720
14721 static inline void local_inc(local_t *l)
14722 {
14723- asm volatile(_ASM_INC "%0"
14724+ asm volatile(_ASM_INC "%0\n"
14725+
14726+#ifdef CONFIG_PAX_REFCOUNT
14727+ "jno 0f\n"
14728+ _ASM_DEC "%0\n"
14729+ "int $4\n0:\n"
14730+ _ASM_EXTABLE(0b, 0b)
14731+#endif
14732+
14733+ : "+m" (l->a.counter));
14734+}
14735+
14736+static inline void local_inc_unchecked(local_unchecked_t *l)
14737+{
14738+ asm volatile(_ASM_INC "%0\n"
14739 : "+m" (l->a.counter));
14740 }
14741
14742 static inline void local_dec(local_t *l)
14743 {
14744- asm volatile(_ASM_DEC "%0"
14745+ asm volatile(_ASM_DEC "%0\n"
14746+
14747+#ifdef CONFIG_PAX_REFCOUNT
14748+ "jno 0f\n"
14749+ _ASM_INC "%0\n"
14750+ "int $4\n0:\n"
14751+ _ASM_EXTABLE(0b, 0b)
14752+#endif
14753+
14754+ : "+m" (l->a.counter));
14755+}
14756+
14757+static inline void local_dec_unchecked(local_unchecked_t *l)
14758+{
14759+ asm volatile(_ASM_DEC "%0\n"
14760 : "+m" (l->a.counter));
14761 }
14762
14763 static inline void local_add(long i, local_t *l)
14764 {
14765- asm volatile(_ASM_ADD "%1,%0"
14766+ asm volatile(_ASM_ADD "%1,%0\n"
14767+
14768+#ifdef CONFIG_PAX_REFCOUNT
14769+ "jno 0f\n"
14770+ _ASM_SUB "%1,%0\n"
14771+ "int $4\n0:\n"
14772+ _ASM_EXTABLE(0b, 0b)
14773+#endif
14774+
14775+ : "+m" (l->a.counter)
14776+ : "ir" (i));
14777+}
14778+
14779+static inline void local_add_unchecked(long i, local_unchecked_t *l)
14780+{
14781+ asm volatile(_ASM_ADD "%1,%0\n"
14782 : "+m" (l->a.counter)
14783 : "ir" (i));
14784 }
14785
14786 static inline void local_sub(long i, local_t *l)
14787 {
14788- asm volatile(_ASM_SUB "%1,%0"
14789+ asm volatile(_ASM_SUB "%1,%0\n"
14790+
14791+#ifdef CONFIG_PAX_REFCOUNT
14792+ "jno 0f\n"
14793+ _ASM_ADD "%1,%0\n"
14794+ "int $4\n0:\n"
14795+ _ASM_EXTABLE(0b, 0b)
14796+#endif
14797+
14798+ : "+m" (l->a.counter)
14799+ : "ir" (i));
14800+}
14801+
14802+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
14803+{
14804+ asm volatile(_ASM_SUB "%1,%0\n"
14805 : "+m" (l->a.counter)
14806 : "ir" (i));
14807 }
14808@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
14809 {
14810 unsigned char c;
14811
14812- asm volatile(_ASM_SUB "%2,%0; sete %1"
14813+ asm volatile(_ASM_SUB "%2,%0\n"
14814+
14815+#ifdef CONFIG_PAX_REFCOUNT
14816+ "jno 0f\n"
14817+ _ASM_ADD "%2,%0\n"
14818+ "int $4\n0:\n"
14819+ _ASM_EXTABLE(0b, 0b)
14820+#endif
14821+
14822+ "sete %1\n"
14823 : "+m" (l->a.counter), "=qm" (c)
14824 : "ir" (i) : "memory");
14825 return c;
14826@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
14827 {
14828 unsigned char c;
14829
14830- asm volatile(_ASM_DEC "%0; sete %1"
14831+ asm volatile(_ASM_DEC "%0\n"
14832+
14833+#ifdef CONFIG_PAX_REFCOUNT
14834+ "jno 0f\n"
14835+ _ASM_INC "%0\n"
14836+ "int $4\n0:\n"
14837+ _ASM_EXTABLE(0b, 0b)
14838+#endif
14839+
14840+ "sete %1\n"
14841 : "+m" (l->a.counter), "=qm" (c)
14842 : : "memory");
14843 return c != 0;
14844@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
14845 {
14846 unsigned char c;
14847
14848- asm volatile(_ASM_INC "%0; sete %1"
14849+ asm volatile(_ASM_INC "%0\n"
14850+
14851+#ifdef CONFIG_PAX_REFCOUNT
14852+ "jno 0f\n"
14853+ _ASM_DEC "%0\n"
14854+ "int $4\n0:\n"
14855+ _ASM_EXTABLE(0b, 0b)
14856+#endif
14857+
14858+ "sete %1\n"
14859 : "+m" (l->a.counter), "=qm" (c)
14860 : : "memory");
14861 return c != 0;
14862@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
14863 {
14864 unsigned char c;
14865
14866- asm volatile(_ASM_ADD "%2,%0; sets %1"
14867+ asm volatile(_ASM_ADD "%2,%0\n"
14868+
14869+#ifdef CONFIG_PAX_REFCOUNT
14870+ "jno 0f\n"
14871+ _ASM_SUB "%2,%0\n"
14872+ "int $4\n0:\n"
14873+ _ASM_EXTABLE(0b, 0b)
14874+#endif
14875+
14876+ "sets %1\n"
14877 : "+m" (l->a.counter), "=qm" (c)
14878 : "ir" (i) : "memory");
14879 return c;
14880@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
14881 static inline long local_add_return(long i, local_t *l)
14882 {
14883 long __i = i;
14884+ asm volatile(_ASM_XADD "%0, %1\n"
14885+
14886+#ifdef CONFIG_PAX_REFCOUNT
14887+ "jno 0f\n"
14888+ _ASM_MOV "%0,%1\n"
14889+ "int $4\n0:\n"
14890+ _ASM_EXTABLE(0b, 0b)
14891+#endif
14892+
14893+ : "+r" (i), "+m" (l->a.counter)
14894+ : : "memory");
14895+ return i + __i;
14896+}
14897+
14898+/**
14899+ * local_add_return_unchecked - add and return
14900+ * @i: integer value to add
14901+ * @l: pointer to type local_unchecked_t
14902+ *
14903+ * Atomically adds @i to @l and returns @i + @l
14904+ */
14905+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
14906+{
14907+ long __i = i;
14908 asm volatile(_ASM_XADD "%0, %1;"
14909 : "+r" (i), "+m" (l->a.counter)
14910 : : "memory");
14911@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
14912
14913 #define local_cmpxchg(l, o, n) \
14914 (cmpxchg_local(&((l)->a.counter), (o), (n)))
14915+#define local_cmpxchg_unchecked(l, o, n) \
14916+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
14917 /* Always has a lock prefix */
14918 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
14919
14920diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
14921new file mode 100644
14922index 0000000..2bfd3ba
14923--- /dev/null
14924+++ b/arch/x86/include/asm/mman.h
14925@@ -0,0 +1,15 @@
14926+#ifndef _X86_MMAN_H
14927+#define _X86_MMAN_H
14928+
14929+#include <uapi/asm/mman.h>
14930+
14931+#ifdef __KERNEL__
14932+#ifndef __ASSEMBLY__
14933+#ifdef CONFIG_X86_32
14934+#define arch_mmap_check i386_mmap_check
14935+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
14936+#endif
14937+#endif
14938+#endif
14939+
14940+#endif /* X86_MMAN_H */
14941diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
14942index 5f55e69..e20bfb1 100644
14943--- a/arch/x86/include/asm/mmu.h
14944+++ b/arch/x86/include/asm/mmu.h
14945@@ -9,7 +9,7 @@
14946 * we put the segment information here.
14947 */
14948 typedef struct {
14949- void *ldt;
14950+ struct desc_struct *ldt;
14951 int size;
14952
14953 #ifdef CONFIG_X86_64
14954@@ -18,7 +18,19 @@ typedef struct {
14955 #endif
14956
14957 struct mutex lock;
14958- void *vdso;
14959+ unsigned long vdso;
14960+
14961+#ifdef CONFIG_X86_32
14962+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14963+ unsigned long user_cs_base;
14964+ unsigned long user_cs_limit;
14965+
14966+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14967+ cpumask_t cpu_user_cs_mask;
14968+#endif
14969+
14970+#endif
14971+#endif
14972 } mm_context_t;
14973
14974 #ifdef CONFIG_SMP
14975diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
14976index cdbf367..4c73c9e 100644
14977--- a/arch/x86/include/asm/mmu_context.h
14978+++ b/arch/x86/include/asm/mmu_context.h
14979@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
14980
14981 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14982 {
14983+
14984+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14985+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
14986+ unsigned int i;
14987+ pgd_t *pgd;
14988+
14989+ pax_open_kernel();
14990+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
14991+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
14992+ set_pgd_batched(pgd+i, native_make_pgd(0));
14993+ pax_close_kernel();
14994+ }
14995+#endif
14996+
14997 #ifdef CONFIG_SMP
14998 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
14999 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
15000@@ -34,16 +48,55 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15001 struct task_struct *tsk)
15002 {
15003 unsigned cpu = smp_processor_id();
15004+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15005+ int tlbstate = TLBSTATE_OK;
15006+#endif
15007
15008 if (likely(prev != next)) {
15009 #ifdef CONFIG_SMP
15010+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15011+ tlbstate = this_cpu_read(cpu_tlbstate.state);
15012+#endif
15013 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15014 this_cpu_write(cpu_tlbstate.active_mm, next);
15015 #endif
15016 cpumask_set_cpu(cpu, mm_cpumask(next));
15017
15018 /* Re-load page tables */
15019+#ifdef CONFIG_PAX_PER_CPU_PGD
15020+ pax_open_kernel();
15021+
15022+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15023+ if (static_cpu_has(X86_FEATURE_PCID))
15024+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15025+ else
15026+#endif
15027+
15028+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15029+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15030+ pax_close_kernel();
15031+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15032+
15033+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15034+ if (static_cpu_has(X86_FEATURE_PCID)) {
15035+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15036+ unsigned long descriptor[2];
15037+ descriptor[0] = PCID_USER;
15038+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15039+ } else {
15040+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15041+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15042+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15043+ else
15044+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15045+ }
15046+ } else
15047+#endif
15048+
15049+ load_cr3(get_cpu_pgd(cpu, kernel));
15050+#else
15051 load_cr3(next->pgd);
15052+#endif
15053
15054 /* stop flush ipis for the previous mm */
15055 cpumask_clear_cpu(cpu, mm_cpumask(prev));
15056@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15057 */
15058 if (unlikely(prev->context.ldt != next->context.ldt))
15059 load_LDT_nolock(&next->context);
15060- }
15061+
15062+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15063+ if (!(__supported_pte_mask & _PAGE_NX)) {
15064+ smp_mb__before_clear_bit();
15065+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
15066+ smp_mb__after_clear_bit();
15067+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15068+ }
15069+#endif
15070+
15071+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15072+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
15073+ prev->context.user_cs_limit != next->context.user_cs_limit))
15074+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15075 #ifdef CONFIG_SMP
15076+ else if (unlikely(tlbstate != TLBSTATE_OK))
15077+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15078+#endif
15079+#endif
15080+
15081+ }
15082 else {
15083+
15084+#ifdef CONFIG_PAX_PER_CPU_PGD
15085+ pax_open_kernel();
15086+
15087+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15088+ if (static_cpu_has(X86_FEATURE_PCID))
15089+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15090+ else
15091+#endif
15092+
15093+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15094+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15095+ pax_close_kernel();
15096+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15097+
15098+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15099+ if (static_cpu_has(X86_FEATURE_PCID)) {
15100+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15101+ unsigned long descriptor[2];
15102+ descriptor[0] = PCID_USER;
15103+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15104+ } else {
15105+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15106+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15107+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15108+ else
15109+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15110+ }
15111+ } else
15112+#endif
15113+
15114+ load_cr3(get_cpu_pgd(cpu, kernel));
15115+#endif
15116+
15117+#ifdef CONFIG_SMP
15118 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15119 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
15120
15121@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15122 * tlb flush IPI delivery. We must reload CR3
15123 * to make sure to use no freed page tables.
15124 */
15125+
15126+#ifndef CONFIG_PAX_PER_CPU_PGD
15127 load_cr3(next->pgd);
15128+#endif
15129+
15130 load_LDT_nolock(&next->context);
15131+
15132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15133+ if (!(__supported_pte_mask & _PAGE_NX))
15134+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15135+#endif
15136+
15137+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15138+#ifdef CONFIG_PAX_PAGEEXEC
15139+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
15140+#endif
15141+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15142+#endif
15143+
15144 }
15145+#endif
15146 }
15147-#endif
15148 }
15149
15150 #define activate_mm(prev, next) \
15151diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
15152index e3b7819..b257c64 100644
15153--- a/arch/x86/include/asm/module.h
15154+++ b/arch/x86/include/asm/module.h
15155@@ -5,6 +5,7 @@
15156
15157 #ifdef CONFIG_X86_64
15158 /* X86_64 does not define MODULE_PROC_FAMILY */
15159+#define MODULE_PROC_FAMILY ""
15160 #elif defined CONFIG_M486
15161 #define MODULE_PROC_FAMILY "486 "
15162 #elif defined CONFIG_M586
15163@@ -57,8 +58,20 @@
15164 #error unknown processor family
15165 #endif
15166
15167-#ifdef CONFIG_X86_32
15168-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
15169+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15170+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
15171+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
15172+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
15173+#else
15174+#define MODULE_PAX_KERNEXEC ""
15175 #endif
15176
15177+#ifdef CONFIG_PAX_MEMORY_UDEREF
15178+#define MODULE_PAX_UDEREF "UDEREF "
15179+#else
15180+#define MODULE_PAX_UDEREF ""
15181+#endif
15182+
15183+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
15184+
15185 #endif /* _ASM_X86_MODULE_H */
15186diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
15187index 86f9301..b365cda 100644
15188--- a/arch/x86/include/asm/nmi.h
15189+++ b/arch/x86/include/asm/nmi.h
15190@@ -40,11 +40,11 @@ struct nmiaction {
15191 nmi_handler_t handler;
15192 unsigned long flags;
15193 const char *name;
15194-};
15195+} __do_const;
15196
15197 #define register_nmi_handler(t, fn, fg, n, init...) \
15198 ({ \
15199- static struct nmiaction init fn##_na = { \
15200+ static const struct nmiaction init fn##_na = { \
15201 .handler = (fn), \
15202 .name = (n), \
15203 .flags = (fg), \
15204@@ -52,7 +52,7 @@ struct nmiaction {
15205 __register_nmi_handler((t), &fn##_na); \
15206 })
15207
15208-int __register_nmi_handler(unsigned int, struct nmiaction *);
15209+int __register_nmi_handler(unsigned int, const struct nmiaction *);
15210
15211 void unregister_nmi_handler(unsigned int, const char *);
15212
15213diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
15214index c878924..21f4889 100644
15215--- a/arch/x86/include/asm/page.h
15216+++ b/arch/x86/include/asm/page.h
15217@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
15218 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
15219
15220 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
15221+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
15222
15223 #define __boot_va(x) __va(x)
15224 #define __boot_pa(x) __pa(x)
15225diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
15226index 0f1ddee..e2fc3d1 100644
15227--- a/arch/x86/include/asm/page_64.h
15228+++ b/arch/x86/include/asm/page_64.h
15229@@ -7,9 +7,9 @@
15230
15231 /* duplicated to the one in bootmem.h */
15232 extern unsigned long max_pfn;
15233-extern unsigned long phys_base;
15234+extern const unsigned long phys_base;
15235
15236-static inline unsigned long __phys_addr_nodebug(unsigned long x)
15237+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
15238 {
15239 unsigned long y = x - __START_KERNEL_map;
15240
15241diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
15242index cfdc9ee..3f7b5d6 100644
15243--- a/arch/x86/include/asm/paravirt.h
15244+++ b/arch/x86/include/asm/paravirt.h
15245@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
15246 return (pmd_t) { ret };
15247 }
15248
15249-static inline pmdval_t pmd_val(pmd_t pmd)
15250+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
15251 {
15252 pmdval_t ret;
15253
15254@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15255 val);
15256 }
15257
15258+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15259+{
15260+ pgdval_t val = native_pgd_val(pgd);
15261+
15262+ if (sizeof(pgdval_t) > sizeof(long))
15263+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
15264+ val, (u64)val >> 32);
15265+ else
15266+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
15267+ val);
15268+}
15269+
15270 static inline void pgd_clear(pgd_t *pgdp)
15271 {
15272 set_pgd(pgdp, __pgd(0));
15273@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
15274 pv_mmu_ops.set_fixmap(idx, phys, flags);
15275 }
15276
15277+#ifdef CONFIG_PAX_KERNEXEC
15278+static inline unsigned long pax_open_kernel(void)
15279+{
15280+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
15281+}
15282+
15283+static inline unsigned long pax_close_kernel(void)
15284+{
15285+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
15286+}
15287+#else
15288+static inline unsigned long pax_open_kernel(void) { return 0; }
15289+static inline unsigned long pax_close_kernel(void) { return 0; }
15290+#endif
15291+
15292 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
15293
15294 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
15295@@ -926,7 +953,7 @@ extern void default_banner(void);
15296
15297 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
15298 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
15299-#define PARA_INDIRECT(addr) *%cs:addr
15300+#define PARA_INDIRECT(addr) *%ss:addr
15301 #endif
15302
15303 #define INTERRUPT_RETURN \
15304@@ -1001,6 +1028,21 @@ extern void default_banner(void);
15305 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
15306 CLBR_NONE, \
15307 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
15308+
15309+#define GET_CR0_INTO_RDI \
15310+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
15311+ mov %rax,%rdi
15312+
15313+#define SET_RDI_INTO_CR0 \
15314+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15315+
15316+#define GET_CR3_INTO_RDI \
15317+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
15318+ mov %rax,%rdi
15319+
15320+#define SET_RDI_INTO_CR3 \
15321+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
15322+
15323 #endif /* CONFIG_X86_32 */
15324
15325 #endif /* __ASSEMBLY__ */
15326diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
15327index 0db1fca..52310cc 100644
15328--- a/arch/x86/include/asm/paravirt_types.h
15329+++ b/arch/x86/include/asm/paravirt_types.h
15330@@ -84,7 +84,7 @@ struct pv_init_ops {
15331 */
15332 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
15333 unsigned long addr, unsigned len);
15334-};
15335+} __no_const;
15336
15337
15338 struct pv_lazy_ops {
15339@@ -98,7 +98,7 @@ struct pv_time_ops {
15340 unsigned long long (*sched_clock)(void);
15341 unsigned long long (*steal_clock)(int cpu);
15342 unsigned long (*get_tsc_khz)(void);
15343-};
15344+} __no_const;
15345
15346 struct pv_cpu_ops {
15347 /* hooks for various privileged instructions */
15348@@ -192,7 +192,7 @@ struct pv_cpu_ops {
15349
15350 void (*start_context_switch)(struct task_struct *prev);
15351 void (*end_context_switch)(struct task_struct *next);
15352-};
15353+} __no_const;
15354
15355 struct pv_irq_ops {
15356 /*
15357@@ -223,7 +223,7 @@ struct pv_apic_ops {
15358 unsigned long start_eip,
15359 unsigned long start_esp);
15360 #endif
15361-};
15362+} __no_const;
15363
15364 struct pv_mmu_ops {
15365 unsigned long (*read_cr2)(void);
15366@@ -313,6 +313,7 @@ struct pv_mmu_ops {
15367 struct paravirt_callee_save make_pud;
15368
15369 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
15370+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
15371 #endif /* PAGETABLE_LEVELS == 4 */
15372 #endif /* PAGETABLE_LEVELS >= 3 */
15373
15374@@ -324,6 +325,12 @@ struct pv_mmu_ops {
15375 an mfn. We can tell which is which from the index. */
15376 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
15377 phys_addr_t phys, pgprot_t flags);
15378+
15379+#ifdef CONFIG_PAX_KERNEXEC
15380+ unsigned long (*pax_open_kernel)(void);
15381+ unsigned long (*pax_close_kernel)(void);
15382+#endif
15383+
15384 };
15385
15386 struct arch_spinlock;
15387@@ -334,7 +341,7 @@ struct pv_lock_ops {
15388 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
15389 int (*spin_trylock)(struct arch_spinlock *lock);
15390 void (*spin_unlock)(struct arch_spinlock *lock);
15391-};
15392+} __no_const;
15393
15394 /* This contains all the paravirt structures: we get a convenient
15395 * number for each function using the offset which we use to indicate
15396diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
15397index b4389a4..7024269 100644
15398--- a/arch/x86/include/asm/pgalloc.h
15399+++ b/arch/x86/include/asm/pgalloc.h
15400@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
15401 pmd_t *pmd, pte_t *pte)
15402 {
15403 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
15404+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
15405+}
15406+
15407+static inline void pmd_populate_user(struct mm_struct *mm,
15408+ pmd_t *pmd, pte_t *pte)
15409+{
15410+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
15411 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
15412 }
15413
15414@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
15415
15416 #ifdef CONFIG_X86_PAE
15417 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
15418+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
15419+{
15420+ pud_populate(mm, pudp, pmd);
15421+}
15422 #else /* !CONFIG_X86_PAE */
15423 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
15424 {
15425 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
15426 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
15427 }
15428+
15429+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
15430+{
15431+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
15432+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
15433+}
15434 #endif /* CONFIG_X86_PAE */
15435
15436 #if PAGETABLE_LEVELS > 3
15437@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
15438 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
15439 }
15440
15441+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
15442+{
15443+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
15444+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
15445+}
15446+
15447 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
15448 {
15449 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
15450diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
15451index f2b489c..4f7e2e5 100644
15452--- a/arch/x86/include/asm/pgtable-2level.h
15453+++ b/arch/x86/include/asm/pgtable-2level.h
15454@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
15455
15456 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15457 {
15458+ pax_open_kernel();
15459 *pmdp = pmd;
15460+ pax_close_kernel();
15461 }
15462
15463 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15464diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
15465index 4cc9f2b..5fd9226 100644
15466--- a/arch/x86/include/asm/pgtable-3level.h
15467+++ b/arch/x86/include/asm/pgtable-3level.h
15468@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15469
15470 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15471 {
15472+ pax_open_kernel();
15473 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
15474+ pax_close_kernel();
15475 }
15476
15477 static inline void native_set_pud(pud_t *pudp, pud_t pud)
15478 {
15479+ pax_open_kernel();
15480 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
15481+ pax_close_kernel();
15482 }
15483
15484 /*
15485diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
15486index 1e67223..92a9585 100644
15487--- a/arch/x86/include/asm/pgtable.h
15488+++ b/arch/x86/include/asm/pgtable.h
15489@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15490
15491 #ifndef __PAGETABLE_PUD_FOLDED
15492 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
15493+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
15494 #define pgd_clear(pgd) native_pgd_clear(pgd)
15495 #endif
15496
15497@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15498
15499 #define arch_end_context_switch(prev) do {} while(0)
15500
15501+#define pax_open_kernel() native_pax_open_kernel()
15502+#define pax_close_kernel() native_pax_close_kernel()
15503 #endif /* CONFIG_PARAVIRT */
15504
15505+#define __HAVE_ARCH_PAX_OPEN_KERNEL
15506+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
15507+
15508+#ifdef CONFIG_PAX_KERNEXEC
15509+static inline unsigned long native_pax_open_kernel(void)
15510+{
15511+ unsigned long cr0;
15512+
15513+ preempt_disable();
15514+ barrier();
15515+ cr0 = read_cr0() ^ X86_CR0_WP;
15516+ BUG_ON(cr0 & X86_CR0_WP);
15517+ write_cr0(cr0);
15518+ return cr0 ^ X86_CR0_WP;
15519+}
15520+
15521+static inline unsigned long native_pax_close_kernel(void)
15522+{
15523+ unsigned long cr0;
15524+
15525+ cr0 = read_cr0() ^ X86_CR0_WP;
15526+ BUG_ON(!(cr0 & X86_CR0_WP));
15527+ write_cr0(cr0);
15528+ barrier();
15529+ preempt_enable_no_resched();
15530+ return cr0 ^ X86_CR0_WP;
15531+}
15532+#else
15533+static inline unsigned long native_pax_open_kernel(void) { return 0; }
15534+static inline unsigned long native_pax_close_kernel(void) { return 0; }
15535+#endif
15536+
15537 /*
15538 * The following only work if pte_present() is true.
15539 * Undefined behaviour if not..
15540 */
15541+static inline int pte_user(pte_t pte)
15542+{
15543+ return pte_val(pte) & _PAGE_USER;
15544+}
15545+
15546 static inline int pte_dirty(pte_t pte)
15547 {
15548 return pte_flags(pte) & _PAGE_DIRTY;
15549@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
15550 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
15551 }
15552
15553+static inline unsigned long pgd_pfn(pgd_t pgd)
15554+{
15555+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
15556+}
15557+
15558 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
15559
15560 static inline int pmd_large(pmd_t pte)
15561@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
15562 return pte_clear_flags(pte, _PAGE_RW);
15563 }
15564
15565+static inline pte_t pte_mkread(pte_t pte)
15566+{
15567+ return __pte(pte_val(pte) | _PAGE_USER);
15568+}
15569+
15570 static inline pte_t pte_mkexec(pte_t pte)
15571 {
15572- return pte_clear_flags(pte, _PAGE_NX);
15573+#ifdef CONFIG_X86_PAE
15574+ if (__supported_pte_mask & _PAGE_NX)
15575+ return pte_clear_flags(pte, _PAGE_NX);
15576+ else
15577+#endif
15578+ return pte_set_flags(pte, _PAGE_USER);
15579+}
15580+
15581+static inline pte_t pte_exprotect(pte_t pte)
15582+{
15583+#ifdef CONFIG_X86_PAE
15584+ if (__supported_pte_mask & _PAGE_NX)
15585+ return pte_set_flags(pte, _PAGE_NX);
15586+ else
15587+#endif
15588+ return pte_clear_flags(pte, _PAGE_USER);
15589 }
15590
15591 static inline pte_t pte_mkdirty(pte_t pte)
15592@@ -394,6 +459,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
15593 #endif
15594
15595 #ifndef __ASSEMBLY__
15596+
15597+#ifdef CONFIG_PAX_PER_CPU_PGD
15598+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
15599+enum cpu_pgd_type {kernel = 0, user = 1};
15600+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
15601+{
15602+ return cpu_pgd[cpu][type];
15603+}
15604+#endif
15605+
15606 #include <linux/mm_types.h>
15607 #include <linux/log2.h>
15608
15609@@ -529,7 +604,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
15610 * Currently stuck as a macro due to indirect forward reference to
15611 * linux/mmzone.h's __section_mem_map_addr() definition:
15612 */
15613-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
15614+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
15615
15616 /* Find an entry in the second-level page table.. */
15617 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
15618@@ -569,7 +644,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
15619 * Currently stuck as a macro due to indirect forward reference to
15620 * linux/mmzone.h's __section_mem_map_addr() definition:
15621 */
15622-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
15623+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
15624
15625 /* to find an entry in a page-table-directory. */
15626 static inline unsigned long pud_index(unsigned long address)
15627@@ -584,7 +659,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
15628
15629 static inline int pgd_bad(pgd_t pgd)
15630 {
15631- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
15632+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
15633 }
15634
15635 static inline int pgd_none(pgd_t pgd)
15636@@ -607,7 +682,12 @@ static inline int pgd_none(pgd_t pgd)
15637 * pgd_offset() returns a (pgd_t *)
15638 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
15639 */
15640-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
15641+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
15642+
15643+#ifdef CONFIG_PAX_PER_CPU_PGD
15644+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
15645+#endif
15646+
15647 /*
15648 * a shortcut which implies the use of the kernel's pgd, instead
15649 * of a process's
15650@@ -618,6 +698,23 @@ static inline int pgd_none(pgd_t pgd)
15651 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
15652 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
15653
15654+#ifdef CONFIG_X86_32
15655+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
15656+#else
15657+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
15658+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
15659+
15660+#ifdef CONFIG_PAX_MEMORY_UDEREF
15661+#ifdef __ASSEMBLY__
15662+#define pax_user_shadow_base pax_user_shadow_base(%rip)
15663+#else
15664+extern unsigned long pax_user_shadow_base;
15665+extern pgdval_t clone_pgd_mask;
15666+#endif
15667+#endif
15668+
15669+#endif
15670+
15671 #ifndef __ASSEMBLY__
15672
15673 extern int direct_gbpages;
15674@@ -784,11 +881,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
15675 * dst and src can be on the same page, but the range must not overlap,
15676 * and must not cross a page boundary.
15677 */
15678-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
15679+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
15680 {
15681- memcpy(dst, src, count * sizeof(pgd_t));
15682+ pax_open_kernel();
15683+ while (count--)
15684+ *dst++ = *src++;
15685+ pax_close_kernel();
15686 }
15687
15688+#ifdef CONFIG_PAX_PER_CPU_PGD
15689+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
15690+#endif
15691+
15692+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15693+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
15694+#else
15695+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
15696+#endif
15697+
15698 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
15699 static inline int page_level_shift(enum pg_level level)
15700 {
15701diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
15702index 9ee3221..b979c6b 100644
15703--- a/arch/x86/include/asm/pgtable_32.h
15704+++ b/arch/x86/include/asm/pgtable_32.h
15705@@ -25,9 +25,6 @@
15706 struct mm_struct;
15707 struct vm_area_struct;
15708
15709-extern pgd_t swapper_pg_dir[1024];
15710-extern pgd_t initial_page_table[1024];
15711-
15712 static inline void pgtable_cache_init(void) { }
15713 static inline void check_pgt_cache(void) { }
15714 void paging_init(void);
15715@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
15716 # include <asm/pgtable-2level.h>
15717 #endif
15718
15719+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
15720+extern pgd_t initial_page_table[PTRS_PER_PGD];
15721+#ifdef CONFIG_X86_PAE
15722+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
15723+#endif
15724+
15725 #if defined(CONFIG_HIGHPTE)
15726 #define pte_offset_map(dir, address) \
15727 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
15728@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
15729 /* Clear a kernel PTE and flush it from the TLB */
15730 #define kpte_clear_flush(ptep, vaddr) \
15731 do { \
15732+ pax_open_kernel(); \
15733 pte_clear(&init_mm, (vaddr), (ptep)); \
15734+ pax_close_kernel(); \
15735 __flush_tlb_one((vaddr)); \
15736 } while (0)
15737
15738 #endif /* !__ASSEMBLY__ */
15739
15740+#define HAVE_ARCH_UNMAPPED_AREA
15741+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
15742+
15743 /*
15744 * kern_addr_valid() is (1) for FLATMEM and (0) for
15745 * SPARSEMEM and DISCONTIGMEM
15746diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
15747index ed5903b..c7fe163 100644
15748--- a/arch/x86/include/asm/pgtable_32_types.h
15749+++ b/arch/x86/include/asm/pgtable_32_types.h
15750@@ -8,7 +8,7 @@
15751 */
15752 #ifdef CONFIG_X86_PAE
15753 # include <asm/pgtable-3level_types.h>
15754-# define PMD_SIZE (1UL << PMD_SHIFT)
15755+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
15756 # define PMD_MASK (~(PMD_SIZE - 1))
15757 #else
15758 # include <asm/pgtable-2level_types.h>
15759@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
15760 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
15761 #endif
15762
15763+#ifdef CONFIG_PAX_KERNEXEC
15764+#ifndef __ASSEMBLY__
15765+extern unsigned char MODULES_EXEC_VADDR[];
15766+extern unsigned char MODULES_EXEC_END[];
15767+#endif
15768+#include <asm/boot.h>
15769+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
15770+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
15771+#else
15772+#define ktla_ktva(addr) (addr)
15773+#define ktva_ktla(addr) (addr)
15774+#endif
15775+
15776 #define MODULES_VADDR VMALLOC_START
15777 #define MODULES_END VMALLOC_END
15778 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
15779diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
15780index e22c1db..23a625a 100644
15781--- a/arch/x86/include/asm/pgtable_64.h
15782+++ b/arch/x86/include/asm/pgtable_64.h
15783@@ -16,10 +16,14 @@
15784
15785 extern pud_t level3_kernel_pgt[512];
15786 extern pud_t level3_ident_pgt[512];
15787+extern pud_t level3_vmalloc_start_pgt[512];
15788+extern pud_t level3_vmalloc_end_pgt[512];
15789+extern pud_t level3_vmemmap_pgt[512];
15790+extern pud_t level2_vmemmap_pgt[512];
15791 extern pmd_t level2_kernel_pgt[512];
15792 extern pmd_t level2_fixmap_pgt[512];
15793-extern pmd_t level2_ident_pgt[512];
15794-extern pgd_t init_level4_pgt[];
15795+extern pmd_t level2_ident_pgt[512*2];
15796+extern pgd_t init_level4_pgt[512];
15797
15798 #define swapper_pg_dir init_level4_pgt
15799
15800@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15801
15802 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15803 {
15804+ pax_open_kernel();
15805 *pmdp = pmd;
15806+ pax_close_kernel();
15807 }
15808
15809 static inline void native_pmd_clear(pmd_t *pmd)
15810@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
15811
15812 static inline void native_set_pud(pud_t *pudp, pud_t pud)
15813 {
15814+ pax_open_kernel();
15815 *pudp = pud;
15816+ pax_close_kernel();
15817 }
15818
15819 static inline void native_pud_clear(pud_t *pud)
15820@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
15821
15822 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
15823 {
15824+ pax_open_kernel();
15825+ *pgdp = pgd;
15826+ pax_close_kernel();
15827+}
15828+
15829+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15830+{
15831 *pgdp = pgd;
15832 }
15833
15834diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
15835index 2d88344..4679fc3 100644
15836--- a/arch/x86/include/asm/pgtable_64_types.h
15837+++ b/arch/x86/include/asm/pgtable_64_types.h
15838@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
15839 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
15840 #define MODULES_END _AC(0xffffffffff000000, UL)
15841 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
15842+#define MODULES_EXEC_VADDR MODULES_VADDR
15843+#define MODULES_EXEC_END MODULES_END
15844+
15845+#define ktla_ktva(addr) (addr)
15846+#define ktva_ktla(addr) (addr)
15847
15848 #define EARLY_DYNAMIC_PAGE_TABLES 64
15849
15850diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
15851index e642300..0ef8f31 100644
15852--- a/arch/x86/include/asm/pgtable_types.h
15853+++ b/arch/x86/include/asm/pgtable_types.h
15854@@ -16,13 +16,12 @@
15855 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
15856 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
15857 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
15858-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
15859+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
15860 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
15861 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
15862 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
15863-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
15864-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
15865-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
15866+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
15867+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
15868 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
15869
15870 /* If _PAGE_BIT_PRESENT is clear, we use these: */
15871@@ -40,7 +39,6 @@
15872 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
15873 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
15874 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
15875-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
15876 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
15877 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
15878 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
15879@@ -57,8 +55,10 @@
15880
15881 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
15882 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
15883-#else
15884+#elif defined(CONFIG_KMEMCHECK)
15885 #define _PAGE_NX (_AT(pteval_t, 0))
15886+#else
15887+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
15888 #endif
15889
15890 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
15891@@ -116,6 +116,9 @@
15892 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
15893 _PAGE_ACCESSED)
15894
15895+#define PAGE_READONLY_NOEXEC PAGE_READONLY
15896+#define PAGE_SHARED_NOEXEC PAGE_SHARED
15897+
15898 #define __PAGE_KERNEL_EXEC \
15899 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
15900 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
15901@@ -126,7 +129,7 @@
15902 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
15903 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
15904 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
15905-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
15906+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
15907 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
15908 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
15909 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
15910@@ -188,8 +191,8 @@
15911 * bits are combined, this will alow user to access the high address mapped
15912 * VDSO in the presence of CONFIG_COMPAT_VDSO
15913 */
15914-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
15915-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
15916+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15917+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15918 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
15919 #endif
15920
15921@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
15922 {
15923 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
15924 }
15925+#endif
15926
15927+#if PAGETABLE_LEVELS == 3
15928+#include <asm-generic/pgtable-nopud.h>
15929+#endif
15930+
15931+#if PAGETABLE_LEVELS == 2
15932+#include <asm-generic/pgtable-nopmd.h>
15933+#endif
15934+
15935+#ifndef __ASSEMBLY__
15936 #if PAGETABLE_LEVELS > 3
15937 typedef struct { pudval_t pud; } pud_t;
15938
15939@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
15940 return pud.pud;
15941 }
15942 #else
15943-#include <asm-generic/pgtable-nopud.h>
15944-
15945 static inline pudval_t native_pud_val(pud_t pud)
15946 {
15947 return native_pgd_val(pud.pgd);
15948@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
15949 return pmd.pmd;
15950 }
15951 #else
15952-#include <asm-generic/pgtable-nopmd.h>
15953-
15954 static inline pmdval_t native_pmd_val(pmd_t pmd)
15955 {
15956 return native_pgd_val(pmd.pud.pgd);
15957@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
15958
15959 extern pteval_t __supported_pte_mask;
15960 extern void set_nx(void);
15961-extern int nx_enabled;
15962
15963 #define pgprot_writecombine pgprot_writecombine
15964 extern pgprot_t pgprot_writecombine(pgprot_t prot);
15965diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
15966index 22224b3..c5d8d7d 100644
15967--- a/arch/x86/include/asm/processor.h
15968+++ b/arch/x86/include/asm/processor.h
15969@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
15970 : "memory");
15971 }
15972
15973+/* invpcid (%rdx),%rax */
15974+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
15975+
15976+#define INVPCID_SINGLE_ADDRESS 0UL
15977+#define INVPCID_SINGLE_CONTEXT 1UL
15978+#define INVPCID_ALL_GLOBAL 2UL
15979+#define INVPCID_ALL_MONGLOBAL 3UL
15980+
15981+#define PCID_KERNEL 0UL
15982+#define PCID_USER 1UL
15983+#define PCID_NOFLUSH (1UL << 63)
15984+
15985 static inline void load_cr3(pgd_t *pgdir)
15986 {
15987- write_cr3(__pa(pgdir));
15988+ write_cr3(__pa(pgdir) | PCID_KERNEL);
15989 }
15990
15991 #ifdef CONFIG_X86_32
15992@@ -282,7 +294,7 @@ struct tss_struct {
15993
15994 } ____cacheline_aligned;
15995
15996-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
15997+extern struct tss_struct init_tss[NR_CPUS];
15998
15999 /*
16000 * Save the original ist values for checking stack pointers during debugging
16001@@ -452,6 +464,7 @@ struct thread_struct {
16002 unsigned short ds;
16003 unsigned short fsindex;
16004 unsigned short gsindex;
16005+ unsigned short ss;
16006 #endif
16007 #ifdef CONFIG_X86_32
16008 unsigned long ip;
16009@@ -823,11 +836,18 @@ static inline void spin_lock_prefetch(const void *x)
16010 */
16011 #define TASK_SIZE PAGE_OFFSET
16012 #define TASK_SIZE_MAX TASK_SIZE
16013+
16014+#ifdef CONFIG_PAX_SEGMEXEC
16015+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
16016+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
16017+#else
16018 #define STACK_TOP TASK_SIZE
16019-#define STACK_TOP_MAX STACK_TOP
16020+#endif
16021+
16022+#define STACK_TOP_MAX TASK_SIZE
16023
16024 #define INIT_THREAD { \
16025- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16026+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16027 .vm86_info = NULL, \
16028 .sysenter_cs = __KERNEL_CS, \
16029 .io_bitmap_ptr = NULL, \
16030@@ -841,7 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
16031 */
16032 #define INIT_TSS { \
16033 .x86_tss = { \
16034- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16035+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16036 .ss0 = __KERNEL_DS, \
16037 .ss1 = __KERNEL_CS, \
16038 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
16039@@ -852,11 +872,7 @@ static inline void spin_lock_prefetch(const void *x)
16040 extern unsigned long thread_saved_pc(struct task_struct *tsk);
16041
16042 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
16043-#define KSTK_TOP(info) \
16044-({ \
16045- unsigned long *__ptr = (unsigned long *)(info); \
16046- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
16047-})
16048+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
16049
16050 /*
16051 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
16052@@ -871,7 +887,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16053 #define task_pt_regs(task) \
16054 ({ \
16055 struct pt_regs *__regs__; \
16056- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
16057+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
16058 __regs__ - 1; \
16059 })
16060
16061@@ -881,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16062 /*
16063 * User space process size. 47bits minus one guard page.
16064 */
16065-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
16066+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
16067
16068 /* This decides where the kernel will search for a free chunk of vm
16069 * space during mmap's.
16070 */
16071 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
16072- 0xc0000000 : 0xFFFFe000)
16073+ 0xc0000000 : 0xFFFFf000)
16074
16075 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
16076 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
16077@@ -898,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16078 #define STACK_TOP_MAX TASK_SIZE_MAX
16079
16080 #define INIT_THREAD { \
16081- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16082+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16083 }
16084
16085 #define INIT_TSS { \
16086- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16087+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16088 }
16089
16090 /*
16091@@ -930,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
16092 */
16093 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
16094
16095+#ifdef CONFIG_PAX_SEGMEXEC
16096+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
16097+#endif
16098+
16099 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
16100
16101 /* Get/set a process' ability to use the timestamp counter instruction */
16102@@ -942,7 +962,8 @@ extern int set_tsc_mode(unsigned int val);
16103 extern u16 amd_get_nb_id(int cpu);
16104
16105 struct aperfmperf {
16106- u64 aperf, mperf;
16107+ u64 aperf __intentional_overflow(0);
16108+ u64 mperf __intentional_overflow(0);
16109 };
16110
16111 static inline void get_aperfmperf(struct aperfmperf *am)
16112@@ -970,7 +991,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
16113 return ratio;
16114 }
16115
16116-extern unsigned long arch_align_stack(unsigned long sp);
16117+#define arch_align_stack(x) ((x) & ~0xfUL)
16118 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
16119
16120 void default_idle(void);
16121@@ -980,6 +1001,6 @@ bool xen_set_default_idle(void);
16122 #define xen_set_default_idle 0
16123 #endif
16124
16125-void stop_this_cpu(void *dummy);
16126+void stop_this_cpu(void *dummy) __noreturn;
16127
16128 #endif /* _ASM_X86_PROCESSOR_H */
16129diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
16130index 942a086..6c26446 100644
16131--- a/arch/x86/include/asm/ptrace.h
16132+++ b/arch/x86/include/asm/ptrace.h
16133@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
16134 }
16135
16136 /*
16137- * user_mode_vm(regs) determines whether a register set came from user mode.
16138+ * user_mode(regs) determines whether a register set came from user mode.
16139 * This is true if V8086 mode was enabled OR if the register set was from
16140 * protected mode with RPL-3 CS value. This tricky test checks that with
16141 * one comparison. Many places in the kernel can bypass this full check
16142- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
16143+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
16144+ * be used.
16145 */
16146-static inline int user_mode(struct pt_regs *regs)
16147+static inline int user_mode_novm(struct pt_regs *regs)
16148 {
16149 #ifdef CONFIG_X86_32
16150 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
16151 #else
16152- return !!(regs->cs & 3);
16153+ return !!(regs->cs & SEGMENT_RPL_MASK);
16154 #endif
16155 }
16156
16157-static inline int user_mode_vm(struct pt_regs *regs)
16158+static inline int user_mode(struct pt_regs *regs)
16159 {
16160 #ifdef CONFIG_X86_32
16161 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
16162 USER_RPL;
16163 #else
16164- return user_mode(regs);
16165+ return user_mode_novm(regs);
16166 #endif
16167 }
16168
16169@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
16170 #ifdef CONFIG_X86_64
16171 static inline bool user_64bit_mode(struct pt_regs *regs)
16172 {
16173+ unsigned long cs = regs->cs & 0xffff;
16174 #ifndef CONFIG_PARAVIRT
16175 /*
16176 * On non-paravirt systems, this is the only long mode CPL 3
16177 * selector. We do not allow long mode selectors in the LDT.
16178 */
16179- return regs->cs == __USER_CS;
16180+ return cs == __USER_CS;
16181 #else
16182 /* Headers are too twisted for this to go in paravirt.h. */
16183- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
16184+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
16185 #endif
16186 }
16187
16188@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
16189 * Traps from the kernel do not save sp and ss.
16190 * Use the helper function to retrieve sp.
16191 */
16192- if (offset == offsetof(struct pt_regs, sp) &&
16193- regs->cs == __KERNEL_CS)
16194- return kernel_stack_pointer(regs);
16195+ if (offset == offsetof(struct pt_regs, sp)) {
16196+ unsigned long cs = regs->cs & 0xffff;
16197+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
16198+ return kernel_stack_pointer(regs);
16199+ }
16200 #endif
16201 return *(unsigned long *)((unsigned long)regs + offset);
16202 }
16203diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
16204index 9c6b890..5305f53 100644
16205--- a/arch/x86/include/asm/realmode.h
16206+++ b/arch/x86/include/asm/realmode.h
16207@@ -22,16 +22,14 @@ struct real_mode_header {
16208 #endif
16209 /* APM/BIOS reboot */
16210 u32 machine_real_restart_asm;
16211-#ifdef CONFIG_X86_64
16212 u32 machine_real_restart_seg;
16213-#endif
16214 };
16215
16216 /* This must match data at trampoline_32/64.S */
16217 struct trampoline_header {
16218 #ifdef CONFIG_X86_32
16219 u32 start;
16220- u16 gdt_pad;
16221+ u16 boot_cs;
16222 u16 gdt_limit;
16223 u32 gdt_base;
16224 #else
16225diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
16226index a82c4f1..ac45053 100644
16227--- a/arch/x86/include/asm/reboot.h
16228+++ b/arch/x86/include/asm/reboot.h
16229@@ -6,13 +6,13 @@
16230 struct pt_regs;
16231
16232 struct machine_ops {
16233- void (*restart)(char *cmd);
16234- void (*halt)(void);
16235- void (*power_off)(void);
16236+ void (* __noreturn restart)(char *cmd);
16237+ void (* __noreturn halt)(void);
16238+ void (* __noreturn power_off)(void);
16239 void (*shutdown)(void);
16240 void (*crash_shutdown)(struct pt_regs *);
16241- void (*emergency_restart)(void);
16242-};
16243+ void (* __noreturn emergency_restart)(void);
16244+} __no_const;
16245
16246 extern struct machine_ops machine_ops;
16247
16248diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
16249index cad82c9..2e5c5c1 100644
16250--- a/arch/x86/include/asm/rwsem.h
16251+++ b/arch/x86/include/asm/rwsem.h
16252@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
16253 {
16254 asm volatile("# beginning down_read\n\t"
16255 LOCK_PREFIX _ASM_INC "(%1)\n\t"
16256+
16257+#ifdef CONFIG_PAX_REFCOUNT
16258+ "jno 0f\n"
16259+ LOCK_PREFIX _ASM_DEC "(%1)\n"
16260+ "int $4\n0:\n"
16261+ _ASM_EXTABLE(0b, 0b)
16262+#endif
16263+
16264 /* adds 0x00000001 */
16265 " jns 1f\n"
16266 " call call_rwsem_down_read_failed\n"
16267@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
16268 "1:\n\t"
16269 " mov %1,%2\n\t"
16270 " add %3,%2\n\t"
16271+
16272+#ifdef CONFIG_PAX_REFCOUNT
16273+ "jno 0f\n"
16274+ "sub %3,%2\n"
16275+ "int $4\n0:\n"
16276+ _ASM_EXTABLE(0b, 0b)
16277+#endif
16278+
16279 " jle 2f\n\t"
16280 LOCK_PREFIX " cmpxchg %2,%0\n\t"
16281 " jnz 1b\n\t"
16282@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
16283 long tmp;
16284 asm volatile("# beginning down_write\n\t"
16285 LOCK_PREFIX " xadd %1,(%2)\n\t"
16286+
16287+#ifdef CONFIG_PAX_REFCOUNT
16288+ "jno 0f\n"
16289+ "mov %1,(%2)\n"
16290+ "int $4\n0:\n"
16291+ _ASM_EXTABLE(0b, 0b)
16292+#endif
16293+
16294 /* adds 0xffff0001, returns the old value */
16295 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
16296 /* was the active mask 0 before? */
16297@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
16298 long tmp;
16299 asm volatile("# beginning __up_read\n\t"
16300 LOCK_PREFIX " xadd %1,(%2)\n\t"
16301+
16302+#ifdef CONFIG_PAX_REFCOUNT
16303+ "jno 0f\n"
16304+ "mov %1,(%2)\n"
16305+ "int $4\n0:\n"
16306+ _ASM_EXTABLE(0b, 0b)
16307+#endif
16308+
16309 /* subtracts 1, returns the old value */
16310 " jns 1f\n\t"
16311 " call call_rwsem_wake\n" /* expects old value in %edx */
16312@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
16313 long tmp;
16314 asm volatile("# beginning __up_write\n\t"
16315 LOCK_PREFIX " xadd %1,(%2)\n\t"
16316+
16317+#ifdef CONFIG_PAX_REFCOUNT
16318+ "jno 0f\n"
16319+ "mov %1,(%2)\n"
16320+ "int $4\n0:\n"
16321+ _ASM_EXTABLE(0b, 0b)
16322+#endif
16323+
16324 /* subtracts 0xffff0001, returns the old value */
16325 " jns 1f\n\t"
16326 " call call_rwsem_wake\n" /* expects old value in %edx */
16327@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
16328 {
16329 asm volatile("# beginning __downgrade_write\n\t"
16330 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
16331+
16332+#ifdef CONFIG_PAX_REFCOUNT
16333+ "jno 0f\n"
16334+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
16335+ "int $4\n0:\n"
16336+ _ASM_EXTABLE(0b, 0b)
16337+#endif
16338+
16339 /*
16340 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
16341 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
16342@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
16343 */
16344 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
16345 {
16346- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
16347+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
16348+
16349+#ifdef CONFIG_PAX_REFCOUNT
16350+ "jno 0f\n"
16351+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
16352+ "int $4\n0:\n"
16353+ _ASM_EXTABLE(0b, 0b)
16354+#endif
16355+
16356 : "+m" (sem->count)
16357 : "er" (delta));
16358 }
16359@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
16360 */
16361 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
16362 {
16363- return delta + xadd(&sem->count, delta);
16364+ return delta + xadd_check_overflow(&sem->count, delta);
16365 }
16366
16367 #endif /* __KERNEL__ */
16368diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
16369index c48a950..bc40804 100644
16370--- a/arch/x86/include/asm/segment.h
16371+++ b/arch/x86/include/asm/segment.h
16372@@ -64,10 +64,15 @@
16373 * 26 - ESPFIX small SS
16374 * 27 - per-cpu [ offset to per-cpu data area ]
16375 * 28 - stack_canary-20 [ for stack protector ]
16376- * 29 - unused
16377- * 30 - unused
16378+ * 29 - PCI BIOS CS
16379+ * 30 - PCI BIOS DS
16380 * 31 - TSS for double fault handler
16381 */
16382+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
16383+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
16384+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
16385+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
16386+
16387 #define GDT_ENTRY_TLS_MIN 6
16388 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
16389
16390@@ -79,6 +84,8 @@
16391
16392 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
16393
16394+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
16395+
16396 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
16397
16398 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
16399@@ -104,6 +111,12 @@
16400 #define __KERNEL_STACK_CANARY 0
16401 #endif
16402
16403+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
16404+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
16405+
16406+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
16407+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
16408+
16409 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
16410
16411 /*
16412@@ -141,7 +154,7 @@
16413 */
16414
16415 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
16416-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
16417+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
16418
16419
16420 #else
16421@@ -165,6 +178,8 @@
16422 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
16423 #define __USER32_DS __USER_DS
16424
16425+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
16426+
16427 #define GDT_ENTRY_TSS 8 /* needs two entries */
16428 #define GDT_ENTRY_LDT 10 /* needs two entries */
16429 #define GDT_ENTRY_TLS_MIN 12
16430@@ -173,6 +188,8 @@
16431 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
16432 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
16433
16434+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
16435+
16436 /* TLS indexes for 64bit - hardcoded in arch_prctl */
16437 #define FS_TLS 0
16438 #define GS_TLS 1
16439@@ -180,12 +197,14 @@
16440 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
16441 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
16442
16443-#define GDT_ENTRIES 16
16444+#define GDT_ENTRIES 17
16445
16446 #endif
16447
16448 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
16449+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
16450 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
16451+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
16452 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
16453 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
16454 #ifndef CONFIG_PARAVIRT
16455@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
16456 {
16457 unsigned long __limit;
16458 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
16459- return __limit + 1;
16460+ return __limit;
16461 }
16462
16463 #endif /* !__ASSEMBLY__ */
16464diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
16465index 8d3120f..352b440 100644
16466--- a/arch/x86/include/asm/smap.h
16467+++ b/arch/x86/include/asm/smap.h
16468@@ -25,11 +25,40 @@
16469
16470 #include <asm/alternative-asm.h>
16471
16472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16473+#define ASM_PAX_OPEN_USERLAND \
16474+ 661: jmp 663f; \
16475+ .pushsection .altinstr_replacement, "a" ; \
16476+ 662: pushq %rax; nop; \
16477+ .popsection ; \
16478+ .pushsection .altinstructions, "a" ; \
16479+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
16480+ .popsection ; \
16481+ call __pax_open_userland; \
16482+ popq %rax; \
16483+ 663:
16484+
16485+#define ASM_PAX_CLOSE_USERLAND \
16486+ 661: jmp 663f; \
16487+ .pushsection .altinstr_replacement, "a" ; \
16488+ 662: pushq %rax; nop; \
16489+ .popsection; \
16490+ .pushsection .altinstructions, "a" ; \
16491+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
16492+ .popsection; \
16493+ call __pax_close_userland; \
16494+ popq %rax; \
16495+ 663:
16496+#else
16497+#define ASM_PAX_OPEN_USERLAND
16498+#define ASM_PAX_CLOSE_USERLAND
16499+#endif
16500+
16501 #ifdef CONFIG_X86_SMAP
16502
16503 #define ASM_CLAC \
16504 661: ASM_NOP3 ; \
16505- .pushsection .altinstr_replacement, "ax" ; \
16506+ .pushsection .altinstr_replacement, "a" ; \
16507 662: __ASM_CLAC ; \
16508 .popsection ; \
16509 .pushsection .altinstructions, "a" ; \
16510@@ -38,7 +67,7 @@
16511
16512 #define ASM_STAC \
16513 661: ASM_NOP3 ; \
16514- .pushsection .altinstr_replacement, "ax" ; \
16515+ .pushsection .altinstr_replacement, "a" ; \
16516 662: __ASM_STAC ; \
16517 .popsection ; \
16518 .pushsection .altinstructions, "a" ; \
16519@@ -56,6 +85,37 @@
16520
16521 #include <asm/alternative.h>
16522
16523+#define __HAVE_ARCH_PAX_OPEN_USERLAND
16524+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
16525+
16526+extern void __pax_open_userland(void);
16527+static __always_inline unsigned long pax_open_userland(void)
16528+{
16529+
16530+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16531+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
16532+ :
16533+ : [open] "i" (__pax_open_userland)
16534+ : "memory", "rax");
16535+#endif
16536+
16537+ return 0;
16538+}
16539+
16540+extern void __pax_close_userland(void);
16541+static __always_inline unsigned long pax_close_userland(void)
16542+{
16543+
16544+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16545+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
16546+ :
16547+ : [close] "i" (__pax_close_userland)
16548+ : "memory", "rax");
16549+#endif
16550+
16551+ return 0;
16552+}
16553+
16554 #ifdef CONFIG_X86_SMAP
16555
16556 static __always_inline void clac(void)
16557diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
16558index b073aae..39f9bdd 100644
16559--- a/arch/x86/include/asm/smp.h
16560+++ b/arch/x86/include/asm/smp.h
16561@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
16562 /* cpus sharing the last level cache: */
16563 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
16564 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
16565-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
16566+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
16567
16568 static inline struct cpumask *cpu_sibling_mask(int cpu)
16569 {
16570@@ -79,7 +79,7 @@ struct smp_ops {
16571
16572 void (*send_call_func_ipi)(const struct cpumask *mask);
16573 void (*send_call_func_single_ipi)(int cpu);
16574-};
16575+} __no_const;
16576
16577 /* Globals due to paravirt */
16578 extern void set_cpu_sibling_map(int cpu);
16579@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
16580 extern int safe_smp_processor_id(void);
16581
16582 #elif defined(CONFIG_X86_64_SMP)
16583-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
16584-
16585-#define stack_smp_processor_id() \
16586-({ \
16587- struct thread_info *ti; \
16588- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
16589- ti->cpu; \
16590-})
16591+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
16592+#define stack_smp_processor_id() raw_smp_processor_id()
16593 #define safe_smp_processor_id() smp_processor_id()
16594
16595 #endif
16596diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
16597index 33692ea..350a534 100644
16598--- a/arch/x86/include/asm/spinlock.h
16599+++ b/arch/x86/include/asm/spinlock.h
16600@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
16601 static inline void arch_read_lock(arch_rwlock_t *rw)
16602 {
16603 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
16604+
16605+#ifdef CONFIG_PAX_REFCOUNT
16606+ "jno 0f\n"
16607+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
16608+ "int $4\n0:\n"
16609+ _ASM_EXTABLE(0b, 0b)
16610+#endif
16611+
16612 "jns 1f\n"
16613 "call __read_lock_failed\n\t"
16614 "1:\n"
16615@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
16616 static inline void arch_write_lock(arch_rwlock_t *rw)
16617 {
16618 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
16619+
16620+#ifdef CONFIG_PAX_REFCOUNT
16621+ "jno 0f\n"
16622+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
16623+ "int $4\n0:\n"
16624+ _ASM_EXTABLE(0b, 0b)
16625+#endif
16626+
16627 "jz 1f\n"
16628 "call __write_lock_failed\n\t"
16629 "1:\n"
16630@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
16631
16632 static inline void arch_read_unlock(arch_rwlock_t *rw)
16633 {
16634- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
16635+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
16636+
16637+#ifdef CONFIG_PAX_REFCOUNT
16638+ "jno 0f\n"
16639+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
16640+ "int $4\n0:\n"
16641+ _ASM_EXTABLE(0b, 0b)
16642+#endif
16643+
16644 :"+m" (rw->lock) : : "memory");
16645 }
16646
16647 static inline void arch_write_unlock(arch_rwlock_t *rw)
16648 {
16649- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
16650+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
16651+
16652+#ifdef CONFIG_PAX_REFCOUNT
16653+ "jno 0f\n"
16654+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
16655+ "int $4\n0:\n"
16656+ _ASM_EXTABLE(0b, 0b)
16657+#endif
16658+
16659 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
16660 }
16661
16662diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
16663index 6a99859..03cb807 100644
16664--- a/arch/x86/include/asm/stackprotector.h
16665+++ b/arch/x86/include/asm/stackprotector.h
16666@@ -47,7 +47,7 @@
16667 * head_32 for boot CPU and setup_per_cpu_areas() for others.
16668 */
16669 #define GDT_STACK_CANARY_INIT \
16670- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
16671+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
16672
16673 /*
16674 * Initialize the stackprotector canary value.
16675@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
16676
16677 static inline void load_stack_canary_segment(void)
16678 {
16679-#ifdef CONFIG_X86_32
16680+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16681 asm volatile ("mov %0, %%gs" : : "r" (0));
16682 #endif
16683 }
16684diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
16685index 70bbe39..4ae2bd4 100644
16686--- a/arch/x86/include/asm/stacktrace.h
16687+++ b/arch/x86/include/asm/stacktrace.h
16688@@ -11,28 +11,20 @@
16689
16690 extern int kstack_depth_to_print;
16691
16692-struct thread_info;
16693+struct task_struct;
16694 struct stacktrace_ops;
16695
16696-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
16697- unsigned long *stack,
16698- unsigned long bp,
16699- const struct stacktrace_ops *ops,
16700- void *data,
16701- unsigned long *end,
16702- int *graph);
16703+typedef unsigned long walk_stack_t(struct task_struct *task,
16704+ void *stack_start,
16705+ unsigned long *stack,
16706+ unsigned long bp,
16707+ const struct stacktrace_ops *ops,
16708+ void *data,
16709+ unsigned long *end,
16710+ int *graph);
16711
16712-extern unsigned long
16713-print_context_stack(struct thread_info *tinfo,
16714- unsigned long *stack, unsigned long bp,
16715- const struct stacktrace_ops *ops, void *data,
16716- unsigned long *end, int *graph);
16717-
16718-extern unsigned long
16719-print_context_stack_bp(struct thread_info *tinfo,
16720- unsigned long *stack, unsigned long bp,
16721- const struct stacktrace_ops *ops, void *data,
16722- unsigned long *end, int *graph);
16723+extern walk_stack_t print_context_stack;
16724+extern walk_stack_t print_context_stack_bp;
16725
16726 /* Generic stack tracer with callbacks */
16727
16728@@ -40,7 +32,7 @@ struct stacktrace_ops {
16729 void (*address)(void *data, unsigned long address, int reliable);
16730 /* On negative return stop dumping */
16731 int (*stack)(void *data, char *name);
16732- walk_stack_t walk_stack;
16733+ walk_stack_t *walk_stack;
16734 };
16735
16736 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
16737diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
16738index 4ec45b3..a4f0a8a 100644
16739--- a/arch/x86/include/asm/switch_to.h
16740+++ b/arch/x86/include/asm/switch_to.h
16741@@ -108,7 +108,7 @@ do { \
16742 "call __switch_to\n\t" \
16743 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
16744 __switch_canary \
16745- "movq %P[thread_info](%%rsi),%%r8\n\t" \
16746+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
16747 "movq %%rax,%%rdi\n\t" \
16748 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
16749 "jnz ret_from_fork\n\t" \
16750@@ -119,7 +119,7 @@ do { \
16751 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
16752 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
16753 [_tif_fork] "i" (_TIF_FORK), \
16754- [thread_info] "i" (offsetof(struct task_struct, stack)), \
16755+ [thread_info] "m" (current_tinfo), \
16756 [current_task] "m" (current_task) \
16757 __switch_canary_iparam \
16758 : "memory", "cc" __EXTRA_CLOBBER)
16759diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
16760index a1df6e8..e002940 100644
16761--- a/arch/x86/include/asm/thread_info.h
16762+++ b/arch/x86/include/asm/thread_info.h
16763@@ -10,6 +10,7 @@
16764 #include <linux/compiler.h>
16765 #include <asm/page.h>
16766 #include <asm/types.h>
16767+#include <asm/percpu.h>
16768
16769 /*
16770 * low level task data that entry.S needs immediate access to
16771@@ -23,7 +24,6 @@ struct exec_domain;
16772 #include <linux/atomic.h>
16773
16774 struct thread_info {
16775- struct task_struct *task; /* main task structure */
16776 struct exec_domain *exec_domain; /* execution domain */
16777 __u32 flags; /* low level flags */
16778 __u32 status; /* thread synchronous flags */
16779@@ -33,19 +33,13 @@ struct thread_info {
16780 mm_segment_t addr_limit;
16781 struct restart_block restart_block;
16782 void __user *sysenter_return;
16783-#ifdef CONFIG_X86_32
16784- unsigned long previous_esp; /* ESP of the previous stack in
16785- case of nested (IRQ) stacks
16786- */
16787- __u8 supervisor_stack[0];
16788-#endif
16789+ unsigned long lowest_stack;
16790 unsigned int sig_on_uaccess_error:1;
16791 unsigned int uaccess_err:1; /* uaccess failed */
16792 };
16793
16794-#define INIT_THREAD_INFO(tsk) \
16795+#define INIT_THREAD_INFO \
16796 { \
16797- .task = &tsk, \
16798 .exec_domain = &default_exec_domain, \
16799 .flags = 0, \
16800 .cpu = 0, \
16801@@ -56,7 +50,7 @@ struct thread_info {
16802 }, \
16803 }
16804
16805-#define init_thread_info (init_thread_union.thread_info)
16806+#define init_thread_info (init_thread_union.stack)
16807 #define init_stack (init_thread_union.stack)
16808
16809 #else /* !__ASSEMBLY__ */
16810@@ -97,6 +91,7 @@ struct thread_info {
16811 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
16812 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
16813 #define TIF_X32 30 /* 32-bit native x86-64 binary */
16814+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
16815
16816 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
16817 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
16818@@ -121,17 +116,18 @@ struct thread_info {
16819 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
16820 #define _TIF_ADDR32 (1 << TIF_ADDR32)
16821 #define _TIF_X32 (1 << TIF_X32)
16822+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
16823
16824 /* work to do in syscall_trace_enter() */
16825 #define _TIF_WORK_SYSCALL_ENTRY \
16826 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
16827 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
16828- _TIF_NOHZ)
16829+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
16830
16831 /* work to do in syscall_trace_leave() */
16832 #define _TIF_WORK_SYSCALL_EXIT \
16833 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
16834- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
16835+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
16836
16837 /* work to do on interrupt/exception return */
16838 #define _TIF_WORK_MASK \
16839@@ -142,7 +138,7 @@ struct thread_info {
16840 /* work to do on any return to user space */
16841 #define _TIF_ALLWORK_MASK \
16842 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
16843- _TIF_NOHZ)
16844+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
16845
16846 /* Only used for 64 bit */
16847 #define _TIF_DO_NOTIFY_MASK \
16848@@ -158,45 +154,40 @@ struct thread_info {
16849
16850 #define PREEMPT_ACTIVE 0x10000000
16851
16852-#ifdef CONFIG_X86_32
16853-
16854-#define STACK_WARN (THREAD_SIZE/8)
16855-/*
16856- * macros/functions for gaining access to the thread information structure
16857- *
16858- * preempt_count needs to be 1 initially, until the scheduler is functional.
16859- */
16860-#ifndef __ASSEMBLY__
16861-
16862-
16863-/* how to get the current stack pointer from C */
16864-register unsigned long current_stack_pointer asm("esp") __used;
16865-
16866-/* how to get the thread information struct from C */
16867-static inline struct thread_info *current_thread_info(void)
16868-{
16869- return (struct thread_info *)
16870- (current_stack_pointer & ~(THREAD_SIZE - 1));
16871-}
16872-
16873-#else /* !__ASSEMBLY__ */
16874-
16875+#ifdef __ASSEMBLY__
16876 /* how to get the thread information struct from ASM */
16877 #define GET_THREAD_INFO(reg) \
16878- movl $-THREAD_SIZE, reg; \
16879- andl %esp, reg
16880+ mov PER_CPU_VAR(current_tinfo), reg
16881
16882 /* use this one if reg already contains %esp */
16883-#define GET_THREAD_INFO_WITH_ESP(reg) \
16884- andl $-THREAD_SIZE, reg
16885+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
16886+#else
16887+/* how to get the thread information struct from C */
16888+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
16889+
16890+static __always_inline struct thread_info *current_thread_info(void)
16891+{
16892+ return this_cpu_read_stable(current_tinfo);
16893+}
16894+#endif
16895+
16896+#ifdef CONFIG_X86_32
16897+
16898+#define STACK_WARN (THREAD_SIZE/8)
16899+/*
16900+ * macros/functions for gaining access to the thread information structure
16901+ *
16902+ * preempt_count needs to be 1 initially, until the scheduler is functional.
16903+ */
16904+#ifndef __ASSEMBLY__
16905+
16906+/* how to get the current stack pointer from C */
16907+register unsigned long current_stack_pointer asm("esp") __used;
16908
16909 #endif
16910
16911 #else /* X86_32 */
16912
16913-#include <asm/percpu.h>
16914-#define KERNEL_STACK_OFFSET (5*8)
16915-
16916 /*
16917 * macros/functions for gaining access to the thread information structure
16918 * preempt_count needs to be 1 initially, until the scheduler is functional.
16919@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
16920 #ifndef __ASSEMBLY__
16921 DECLARE_PER_CPU(unsigned long, kernel_stack);
16922
16923-static inline struct thread_info *current_thread_info(void)
16924-{
16925- struct thread_info *ti;
16926- ti = (void *)(this_cpu_read_stable(kernel_stack) +
16927- KERNEL_STACK_OFFSET - THREAD_SIZE);
16928- return ti;
16929-}
16930-
16931-#else /* !__ASSEMBLY__ */
16932-
16933-/* how to get the thread information struct from ASM */
16934-#define GET_THREAD_INFO(reg) \
16935- movq PER_CPU_VAR(kernel_stack),reg ; \
16936- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
16937-
16938-/*
16939- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
16940- * a certain register (to be used in assembler memory operands).
16941- */
16942-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
16943-
16944+/* how to get the current stack pointer from C */
16945+register unsigned long current_stack_pointer asm("rsp") __used;
16946 #endif
16947
16948 #endif /* !X86_32 */
16949@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
16950 extern void arch_task_cache_init(void);
16951 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
16952 extern void arch_release_task_struct(struct task_struct *tsk);
16953+
16954+#define __HAVE_THREAD_FUNCTIONS
16955+#define task_thread_info(task) (&(task)->tinfo)
16956+#define task_stack_page(task) ((task)->stack)
16957+#define setup_thread_stack(p, org) do {} while (0)
16958+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
16959+
16960 #endif
16961 #endif /* _ASM_X86_THREAD_INFO_H */
16962diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
16963index 50a7fc0..7c437a7 100644
16964--- a/arch/x86/include/asm/tlbflush.h
16965+++ b/arch/x86/include/asm/tlbflush.h
16966@@ -17,18 +17,40 @@
16967
16968 static inline void __native_flush_tlb(void)
16969 {
16970+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16971+ unsigned long descriptor[2];
16972+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
16973+ return;
16974+ }
16975+
16976+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16977+ if (static_cpu_has(X86_FEATURE_PCID)) {
16978+ unsigned int cpu = raw_get_cpu();
16979+
16980+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16981+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16982+ raw_put_cpu_no_resched();
16983+ return;
16984+ }
16985+#endif
16986+
16987 native_write_cr3(native_read_cr3());
16988 }
16989
16990 static inline void __native_flush_tlb_global_irq_disabled(void)
16991 {
16992- unsigned long cr4;
16993+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16994+ unsigned long descriptor[2];
16995+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
16996+ } else {
16997+ unsigned long cr4;
16998
16999- cr4 = native_read_cr4();
17000- /* clear PGE */
17001- native_write_cr4(cr4 & ~X86_CR4_PGE);
17002- /* write old PGE again and flush TLBs */
17003- native_write_cr4(cr4);
17004+ cr4 = native_read_cr4();
17005+ /* clear PGE */
17006+ native_write_cr4(cr4 & ~X86_CR4_PGE);
17007+ /* write old PGE again and flush TLBs */
17008+ native_write_cr4(cr4);
17009+ }
17010 }
17011
17012 static inline void __native_flush_tlb_global(void)
17013@@ -49,6 +71,42 @@ static inline void __native_flush_tlb_global(void)
17014
17015 static inline void __native_flush_tlb_single(unsigned long addr)
17016 {
17017+
17018+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17019+ unsigned long descriptor[2];
17020+
17021+ descriptor[0] = PCID_KERNEL;
17022+ descriptor[1] = addr;
17023+
17024+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17025+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
17026+ if (addr < TASK_SIZE_MAX)
17027+ descriptor[1] += pax_user_shadow_base;
17028+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17029+ }
17030+
17031+ descriptor[0] = PCID_USER;
17032+ descriptor[1] = addr;
17033+#endif
17034+
17035+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17036+ return;
17037+ }
17038+
17039+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17040+ if (static_cpu_has(X86_FEATURE_PCID)) {
17041+ unsigned int cpu = raw_get_cpu();
17042+
17043+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
17044+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17045+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17046+ raw_put_cpu_no_resched();
17047+
17048+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
17049+ addr += pax_user_shadow_base;
17050+ }
17051+#endif
17052+
17053 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17054 }
17055
17056diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
17057index 5ee2687..74590b9 100644
17058--- a/arch/x86/include/asm/uaccess.h
17059+++ b/arch/x86/include/asm/uaccess.h
17060@@ -7,6 +7,7 @@
17061 #include <linux/compiler.h>
17062 #include <linux/thread_info.h>
17063 #include <linux/string.h>
17064+#include <linux/sched.h>
17065 #include <asm/asm.h>
17066 #include <asm/page.h>
17067 #include <asm/smap.h>
17068@@ -29,7 +30,12 @@
17069
17070 #define get_ds() (KERNEL_DS)
17071 #define get_fs() (current_thread_info()->addr_limit)
17072+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17073+void __set_fs(mm_segment_t x);
17074+void set_fs(mm_segment_t x);
17075+#else
17076 #define set_fs(x) (current_thread_info()->addr_limit = (x))
17077+#endif
17078
17079 #define segment_eq(a, b) ((a).seg == (b).seg)
17080
17081@@ -77,8 +83,33 @@
17082 * checks that the pointer is in the user space range - after calling
17083 * this function, memory access functions may still return -EFAULT.
17084 */
17085-#define access_ok(type, addr, size) \
17086- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17087+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17088+#define access_ok(type, addr, size) \
17089+({ \
17090+ long __size = size; \
17091+ unsigned long __addr = (unsigned long)addr; \
17092+ unsigned long __addr_ao = __addr & PAGE_MASK; \
17093+ unsigned long __end_ao = __addr + __size - 1; \
17094+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
17095+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
17096+ while(__addr_ao <= __end_ao) { \
17097+ char __c_ao; \
17098+ __addr_ao += PAGE_SIZE; \
17099+ if (__size > PAGE_SIZE) \
17100+ cond_resched(); \
17101+ if (__get_user(__c_ao, (char __user *)__addr)) \
17102+ break; \
17103+ if (type != VERIFY_WRITE) { \
17104+ __addr = __addr_ao; \
17105+ continue; \
17106+ } \
17107+ if (__put_user(__c_ao, (char __user *)__addr)) \
17108+ break; \
17109+ __addr = __addr_ao; \
17110+ } \
17111+ } \
17112+ __ret_ao; \
17113+})
17114
17115 /*
17116 * The exception table consists of pairs of addresses relative to the
17117@@ -165,10 +196,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17118 register __inttype(*(ptr)) __val_gu asm("%edx"); \
17119 __chk_user_ptr(ptr); \
17120 might_fault(); \
17121+ pax_open_userland(); \
17122 asm volatile("call __get_user_%P3" \
17123 : "=a" (__ret_gu), "=r" (__val_gu) \
17124 : "0" (ptr), "i" (sizeof(*(ptr)))); \
17125 (x) = (__typeof__(*(ptr))) __val_gu; \
17126+ pax_close_userland(); \
17127 __ret_gu; \
17128 })
17129
17130@@ -176,13 +209,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17131 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
17132 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
17133
17134-
17135+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17136+#define __copyuser_seg "gs;"
17137+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
17138+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
17139+#else
17140+#define __copyuser_seg
17141+#define __COPYUSER_SET_ES
17142+#define __COPYUSER_RESTORE_ES
17143+#endif
17144
17145 #ifdef CONFIG_X86_32
17146 #define __put_user_asm_u64(x, addr, err, errret) \
17147 asm volatile(ASM_STAC "\n" \
17148- "1: movl %%eax,0(%2)\n" \
17149- "2: movl %%edx,4(%2)\n" \
17150+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
17151+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
17152 "3: " ASM_CLAC "\n" \
17153 ".section .fixup,\"ax\"\n" \
17154 "4: movl %3,%0\n" \
17155@@ -195,8 +236,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17156
17157 #define __put_user_asm_ex_u64(x, addr) \
17158 asm volatile(ASM_STAC "\n" \
17159- "1: movl %%eax,0(%1)\n" \
17160- "2: movl %%edx,4(%1)\n" \
17161+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
17162+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
17163 "3: " ASM_CLAC "\n" \
17164 _ASM_EXTABLE_EX(1b, 2b) \
17165 _ASM_EXTABLE_EX(2b, 3b) \
17166@@ -246,7 +287,8 @@ extern void __put_user_8(void);
17167 __typeof__(*(ptr)) __pu_val; \
17168 __chk_user_ptr(ptr); \
17169 might_fault(); \
17170- __pu_val = x; \
17171+ __pu_val = (x); \
17172+ pax_open_userland(); \
17173 switch (sizeof(*(ptr))) { \
17174 case 1: \
17175 __put_user_x(1, __pu_val, ptr, __ret_pu); \
17176@@ -264,6 +306,7 @@ extern void __put_user_8(void);
17177 __put_user_x(X, __pu_val, ptr, __ret_pu); \
17178 break; \
17179 } \
17180+ pax_close_userland(); \
17181 __ret_pu; \
17182 })
17183
17184@@ -344,8 +387,10 @@ do { \
17185 } while (0)
17186
17187 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17188+do { \
17189+ pax_open_userland(); \
17190 asm volatile(ASM_STAC "\n" \
17191- "1: mov"itype" %2,%"rtype"1\n" \
17192+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
17193 "2: " ASM_CLAC "\n" \
17194 ".section .fixup,\"ax\"\n" \
17195 "3: mov %3,%0\n" \
17196@@ -353,8 +398,10 @@ do { \
17197 " jmp 2b\n" \
17198 ".previous\n" \
17199 _ASM_EXTABLE(1b, 3b) \
17200- : "=r" (err), ltype(x) \
17201- : "m" (__m(addr)), "i" (errret), "0" (err))
17202+ : "=r" (err), ltype (x) \
17203+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
17204+ pax_close_userland(); \
17205+} while (0)
17206
17207 #define __get_user_size_ex(x, ptr, size) \
17208 do { \
17209@@ -378,7 +425,7 @@ do { \
17210 } while (0)
17211
17212 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
17213- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
17214+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
17215 "2:\n" \
17216 _ASM_EXTABLE_EX(1b, 2b) \
17217 : ltype(x) : "m" (__m(addr)))
17218@@ -395,13 +442,24 @@ do { \
17219 int __gu_err; \
17220 unsigned long __gu_val; \
17221 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
17222- (x) = (__force __typeof__(*(ptr)))__gu_val; \
17223+ (x) = (__typeof__(*(ptr)))__gu_val; \
17224 __gu_err; \
17225 })
17226
17227 /* FIXME: this hack is definitely wrong -AK */
17228 struct __large_struct { unsigned long buf[100]; };
17229-#define __m(x) (*(struct __large_struct __user *)(x))
17230+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17231+#define ____m(x) \
17232+({ \
17233+ unsigned long ____x = (unsigned long)(x); \
17234+ if (____x < pax_user_shadow_base) \
17235+ ____x += pax_user_shadow_base; \
17236+ (typeof(x))____x; \
17237+})
17238+#else
17239+#define ____m(x) (x)
17240+#endif
17241+#define __m(x) (*(struct __large_struct __user *)____m(x))
17242
17243 /*
17244 * Tell gcc we read from memory instead of writing: this is because
17245@@ -409,8 +467,10 @@ struct __large_struct { unsigned long buf[100]; };
17246 * aliasing issues.
17247 */
17248 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17249+do { \
17250+ pax_open_userland(); \
17251 asm volatile(ASM_STAC "\n" \
17252- "1: mov"itype" %"rtype"1,%2\n" \
17253+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
17254 "2: " ASM_CLAC "\n" \
17255 ".section .fixup,\"ax\"\n" \
17256 "3: mov %3,%0\n" \
17257@@ -418,10 +478,12 @@ struct __large_struct { unsigned long buf[100]; };
17258 ".previous\n" \
17259 _ASM_EXTABLE(1b, 3b) \
17260 : "=r"(err) \
17261- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
17262+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
17263+ pax_close_userland(); \
17264+} while (0)
17265
17266 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
17267- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
17268+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
17269 "2:\n" \
17270 _ASM_EXTABLE_EX(1b, 2b) \
17271 : : ltype(x), "m" (__m(addr)))
17272@@ -431,11 +493,13 @@ struct __large_struct { unsigned long buf[100]; };
17273 */
17274 #define uaccess_try do { \
17275 current_thread_info()->uaccess_err = 0; \
17276+ pax_open_userland(); \
17277 stac(); \
17278 barrier();
17279
17280 #define uaccess_catch(err) \
17281 clac(); \
17282+ pax_close_userland(); \
17283 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
17284 } while (0)
17285
17286@@ -460,8 +524,12 @@ struct __large_struct { unsigned long buf[100]; };
17287 * On error, the variable @x is set to zero.
17288 */
17289
17290+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17291+#define __get_user(x, ptr) get_user((x), (ptr))
17292+#else
17293 #define __get_user(x, ptr) \
17294 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
17295+#endif
17296
17297 /**
17298 * __put_user: - Write a simple value into user space, with less checking.
17299@@ -483,8 +551,12 @@ struct __large_struct { unsigned long buf[100]; };
17300 * Returns zero on success, or -EFAULT on error.
17301 */
17302
17303+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17304+#define __put_user(x, ptr) put_user((x), (ptr))
17305+#else
17306 #define __put_user(x, ptr) \
17307 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
17308+#endif
17309
17310 #define __get_user_unaligned __get_user
17311 #define __put_user_unaligned __put_user
17312@@ -502,7 +574,7 @@ struct __large_struct { unsigned long buf[100]; };
17313 #define get_user_ex(x, ptr) do { \
17314 unsigned long __gue_val; \
17315 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
17316- (x) = (__force __typeof__(*(ptr)))__gue_val; \
17317+ (x) = (__typeof__(*(ptr)))__gue_val; \
17318 } while (0)
17319
17320 #define put_user_try uaccess_try
17321@@ -519,8 +591,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
17322 extern __must_check long strlen_user(const char __user *str);
17323 extern __must_check long strnlen_user(const char __user *str, long n);
17324
17325-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
17326-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
17327+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
17328+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
17329
17330 /*
17331 * movsl can be slow when source and dest are not both 8-byte aligned
17332diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
17333index 7f760a9..04b1c65 100644
17334--- a/arch/x86/include/asm/uaccess_32.h
17335+++ b/arch/x86/include/asm/uaccess_32.h
17336@@ -11,15 +11,15 @@
17337 #include <asm/page.h>
17338
17339 unsigned long __must_check __copy_to_user_ll
17340- (void __user *to, const void *from, unsigned long n);
17341+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
17342 unsigned long __must_check __copy_from_user_ll
17343- (void *to, const void __user *from, unsigned long n);
17344+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17345 unsigned long __must_check __copy_from_user_ll_nozero
17346- (void *to, const void __user *from, unsigned long n);
17347+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17348 unsigned long __must_check __copy_from_user_ll_nocache
17349- (void *to, const void __user *from, unsigned long n);
17350+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17351 unsigned long __must_check __copy_from_user_ll_nocache_nozero
17352- (void *to, const void __user *from, unsigned long n);
17353+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17354
17355 /**
17356 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
17357@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
17358 static __always_inline unsigned long __must_check
17359 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
17360 {
17361+ if ((long)n < 0)
17362+ return n;
17363+
17364+ check_object_size(from, n, true);
17365+
17366 if (__builtin_constant_p(n)) {
17367 unsigned long ret;
17368
17369@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
17370 __copy_to_user(void __user *to, const void *from, unsigned long n)
17371 {
17372 might_fault();
17373+
17374 return __copy_to_user_inatomic(to, from, n);
17375 }
17376
17377 static __always_inline unsigned long
17378 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
17379 {
17380+ if ((long)n < 0)
17381+ return n;
17382+
17383 /* Avoid zeroing the tail if the copy fails..
17384 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
17385 * but as the zeroing behaviour is only significant when n is not
17386@@ -137,6 +146,12 @@ static __always_inline unsigned long
17387 __copy_from_user(void *to, const void __user *from, unsigned long n)
17388 {
17389 might_fault();
17390+
17391+ if ((long)n < 0)
17392+ return n;
17393+
17394+ check_object_size(to, n, false);
17395+
17396 if (__builtin_constant_p(n)) {
17397 unsigned long ret;
17398
17399@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
17400 const void __user *from, unsigned long n)
17401 {
17402 might_fault();
17403+
17404+ if ((long)n < 0)
17405+ return n;
17406+
17407 if (__builtin_constant_p(n)) {
17408 unsigned long ret;
17409
17410@@ -181,15 +200,19 @@ static __always_inline unsigned long
17411 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
17412 unsigned long n)
17413 {
17414- return __copy_from_user_ll_nocache_nozero(to, from, n);
17415+ if ((long)n < 0)
17416+ return n;
17417+
17418+ return __copy_from_user_ll_nocache_nozero(to, from, n);
17419 }
17420
17421-unsigned long __must_check copy_to_user(void __user *to,
17422- const void *from, unsigned long n);
17423-unsigned long __must_check _copy_from_user(void *to,
17424- const void __user *from,
17425- unsigned long n);
17426-
17427+extern void copy_to_user_overflow(void)
17428+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17429+ __compiletime_error("copy_to_user() buffer size is not provably correct")
17430+#else
17431+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
17432+#endif
17433+;
17434
17435 extern void copy_from_user_overflow(void)
17436 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17437@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
17438 #endif
17439 ;
17440
17441-static inline unsigned long __must_check copy_from_user(void *to,
17442- const void __user *from,
17443- unsigned long n)
17444+/**
17445+ * copy_to_user: - Copy a block of data into user space.
17446+ * @to: Destination address, in user space.
17447+ * @from: Source address, in kernel space.
17448+ * @n: Number of bytes to copy.
17449+ *
17450+ * Context: User context only. This function may sleep.
17451+ *
17452+ * Copy data from kernel space to user space.
17453+ *
17454+ * Returns number of bytes that could not be copied.
17455+ * On success, this will be zero.
17456+ */
17457+static inline unsigned long __must_check
17458+copy_to_user(void __user *to, const void *from, unsigned long n)
17459 {
17460- int sz = __compiletime_object_size(to);
17461+ size_t sz = __compiletime_object_size(from);
17462
17463- if (likely(sz == -1 || sz >= n))
17464- n = _copy_from_user(to, from, n);
17465- else
17466+ if (unlikely(sz != (size_t)-1 && sz < n))
17467+ copy_to_user_overflow();
17468+ else if (access_ok(VERIFY_WRITE, to, n))
17469+ n = __copy_to_user(to, from, n);
17470+ return n;
17471+}
17472+
17473+/**
17474+ * copy_from_user: - Copy a block of data from user space.
17475+ * @to: Destination address, in kernel space.
17476+ * @from: Source address, in user space.
17477+ * @n: Number of bytes to copy.
17478+ *
17479+ * Context: User context only. This function may sleep.
17480+ *
17481+ * Copy data from user space to kernel space.
17482+ *
17483+ * Returns number of bytes that could not be copied.
17484+ * On success, this will be zero.
17485+ *
17486+ * If some data could not be copied, this function will pad the copied
17487+ * data to the requested size using zero bytes.
17488+ */
17489+static inline unsigned long __must_check
17490+copy_from_user(void *to, const void __user *from, unsigned long n)
17491+{
17492+ size_t sz = __compiletime_object_size(to);
17493+
17494+ check_object_size(to, n, false);
17495+
17496+ if (unlikely(sz != (size_t)-1 && sz < n))
17497 copy_from_user_overflow();
17498-
17499+ else if (access_ok(VERIFY_READ, from, n))
17500+ n = __copy_from_user(to, from, n);
17501+ else if ((long)n > 0)
17502+ memset(to, 0, n);
17503 return n;
17504 }
17505
17506diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
17507index 142810c..1f2a0a7 100644
17508--- a/arch/x86/include/asm/uaccess_64.h
17509+++ b/arch/x86/include/asm/uaccess_64.h
17510@@ -10,6 +10,9 @@
17511 #include <asm/alternative.h>
17512 #include <asm/cpufeature.h>
17513 #include <asm/page.h>
17514+#include <asm/pgtable.h>
17515+
17516+#define set_fs(x) (current_thread_info()->addr_limit = (x))
17517
17518 /*
17519 * Copy To/From Userspace
17520@@ -17,13 +20,13 @@
17521
17522 /* Handles exceptions in both to and from, but doesn't do access_ok */
17523 __must_check unsigned long
17524-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
17525+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
17526 __must_check unsigned long
17527-copy_user_generic_string(void *to, const void *from, unsigned len);
17528+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
17529 __must_check unsigned long
17530-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
17531+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
17532
17533-static __always_inline __must_check unsigned long
17534+static __always_inline __must_check __size_overflow(3) unsigned long
17535 copy_user_generic(void *to, const void *from, unsigned len)
17536 {
17537 unsigned ret;
17538@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
17539 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
17540 "=d" (len)),
17541 "1" (to), "2" (from), "3" (len)
17542- : "memory", "rcx", "r8", "r9", "r10", "r11");
17543+ : "memory", "rcx", "r8", "r9", "r11");
17544 return ret;
17545 }
17546
17547+static __always_inline __must_check unsigned long
17548+__copy_to_user(void __user *to, const void *from, unsigned long len);
17549+static __always_inline __must_check unsigned long
17550+__copy_from_user(void *to, const void __user *from, unsigned long len);
17551 __must_check unsigned long
17552-_copy_to_user(void __user *to, const void *from, unsigned len);
17553-__must_check unsigned long
17554-_copy_from_user(void *to, const void __user *from, unsigned len);
17555-__must_check unsigned long
17556-copy_in_user(void __user *to, const void __user *from, unsigned len);
17557+copy_in_user(void __user *to, const void __user *from, unsigned long len);
17558+
17559+extern void copy_to_user_overflow(void)
17560+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17561+ __compiletime_error("copy_to_user() buffer size is not provably correct")
17562+#else
17563+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
17564+#endif
17565+;
17566+
17567+extern void copy_from_user_overflow(void)
17568+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17569+ __compiletime_error("copy_from_user() buffer size is not provably correct")
17570+#else
17571+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
17572+#endif
17573+;
17574
17575 static inline unsigned long __must_check copy_from_user(void *to,
17576 const void __user *from,
17577 unsigned long n)
17578 {
17579- int sz = __compiletime_object_size(to);
17580-
17581 might_fault();
17582- if (likely(sz == -1 || sz >= n))
17583- n = _copy_from_user(to, from, n);
17584-#ifdef CONFIG_DEBUG_VM
17585- else
17586- WARN(1, "Buffer overflow detected!\n");
17587-#endif
17588+
17589+ check_object_size(to, n, false);
17590+
17591+ if (access_ok(VERIFY_READ, from, n))
17592+ n = __copy_from_user(to, from, n);
17593+ else if (n < INT_MAX)
17594+ memset(to, 0, n);
17595 return n;
17596 }
17597
17598 static __always_inline __must_check
17599-int copy_to_user(void __user *dst, const void *src, unsigned size)
17600+int copy_to_user(void __user *dst, const void *src, unsigned long size)
17601 {
17602 might_fault();
17603
17604- return _copy_to_user(dst, src, size);
17605+ if (access_ok(VERIFY_WRITE, dst, size))
17606+ size = __copy_to_user(dst, src, size);
17607+ return size;
17608 }
17609
17610 static __always_inline __must_check
17611-int __copy_from_user(void *dst, const void __user *src, unsigned size)
17612+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
17613 {
17614- int ret = 0;
17615+ size_t sz = __compiletime_object_size(dst);
17616+ unsigned ret = 0;
17617
17618 might_fault();
17619+
17620+ if (size > INT_MAX)
17621+ return size;
17622+
17623+ check_object_size(dst, size, false);
17624+
17625+#ifdef CONFIG_PAX_MEMORY_UDEREF
17626+ if (!__access_ok(VERIFY_READ, src, size))
17627+ return size;
17628+#endif
17629+
17630+ if (unlikely(sz != (size_t)-1 && sz < size)) {
17631+ copy_from_user_overflow();
17632+ return size;
17633+ }
17634+
17635 if (!__builtin_constant_p(size))
17636- return copy_user_generic(dst, (__force void *)src, size);
17637+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17638 switch (size) {
17639- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
17640+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
17641 ret, "b", "b", "=q", 1);
17642 return ret;
17643- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
17644+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
17645 ret, "w", "w", "=r", 2);
17646 return ret;
17647- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
17648+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
17649 ret, "l", "k", "=r", 4);
17650 return ret;
17651- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
17652+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17653 ret, "q", "", "=r", 8);
17654 return ret;
17655 case 10:
17656- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
17657+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17658 ret, "q", "", "=r", 10);
17659 if (unlikely(ret))
17660 return ret;
17661 __get_user_asm(*(u16 *)(8 + (char *)dst),
17662- (u16 __user *)(8 + (char __user *)src),
17663+ (const u16 __user *)(8 + (const char __user *)src),
17664 ret, "w", "w", "=r", 2);
17665 return ret;
17666 case 16:
17667- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
17668+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17669 ret, "q", "", "=r", 16);
17670 if (unlikely(ret))
17671 return ret;
17672 __get_user_asm(*(u64 *)(8 + (char *)dst),
17673- (u64 __user *)(8 + (char __user *)src),
17674+ (const u64 __user *)(8 + (const char __user *)src),
17675 ret, "q", "", "=r", 8);
17676 return ret;
17677 default:
17678- return copy_user_generic(dst, (__force void *)src, size);
17679+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17680 }
17681 }
17682
17683 static __always_inline __must_check
17684-int __copy_to_user(void __user *dst, const void *src, unsigned size)
17685+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
17686 {
17687- int ret = 0;
17688+ size_t sz = __compiletime_object_size(src);
17689+ unsigned ret = 0;
17690
17691 might_fault();
17692+
17693+ if (size > INT_MAX)
17694+ return size;
17695+
17696+ check_object_size(src, size, true);
17697+
17698+#ifdef CONFIG_PAX_MEMORY_UDEREF
17699+ if (!__access_ok(VERIFY_WRITE, dst, size))
17700+ return size;
17701+#endif
17702+
17703+ if (unlikely(sz != (size_t)-1 && sz < size)) {
17704+ copy_to_user_overflow();
17705+ return size;
17706+ }
17707+
17708 if (!__builtin_constant_p(size))
17709- return copy_user_generic((__force void *)dst, src, size);
17710+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17711 switch (size) {
17712- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
17713+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
17714 ret, "b", "b", "iq", 1);
17715 return ret;
17716- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
17717+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
17718 ret, "w", "w", "ir", 2);
17719 return ret;
17720- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
17721+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
17722 ret, "l", "k", "ir", 4);
17723 return ret;
17724- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
17725+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17726 ret, "q", "", "er", 8);
17727 return ret;
17728 case 10:
17729- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
17730+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17731 ret, "q", "", "er", 10);
17732 if (unlikely(ret))
17733 return ret;
17734 asm("":::"memory");
17735- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
17736+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
17737 ret, "w", "w", "ir", 2);
17738 return ret;
17739 case 16:
17740- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
17741+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17742 ret, "q", "", "er", 16);
17743 if (unlikely(ret))
17744 return ret;
17745 asm("":::"memory");
17746- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
17747+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
17748 ret, "q", "", "er", 8);
17749 return ret;
17750 default:
17751- return copy_user_generic((__force void *)dst, src, size);
17752+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17753 }
17754 }
17755
17756 static __always_inline __must_check
17757-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17758+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
17759 {
17760- int ret = 0;
17761+ unsigned ret = 0;
17762
17763 might_fault();
17764+
17765+ if (size > INT_MAX)
17766+ return size;
17767+
17768+#ifdef CONFIG_PAX_MEMORY_UDEREF
17769+ if (!__access_ok(VERIFY_READ, src, size))
17770+ return size;
17771+ if (!__access_ok(VERIFY_WRITE, dst, size))
17772+ return size;
17773+#endif
17774+
17775 if (!__builtin_constant_p(size))
17776- return copy_user_generic((__force void *)dst,
17777- (__force void *)src, size);
17778+ return copy_user_generic((__force_kernel void *)____m(dst),
17779+ (__force_kernel const void *)____m(src), size);
17780 switch (size) {
17781 case 1: {
17782 u8 tmp;
17783- __get_user_asm(tmp, (u8 __user *)src,
17784+ __get_user_asm(tmp, (const u8 __user *)src,
17785 ret, "b", "b", "=q", 1);
17786 if (likely(!ret))
17787 __put_user_asm(tmp, (u8 __user *)dst,
17788@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17789 }
17790 case 2: {
17791 u16 tmp;
17792- __get_user_asm(tmp, (u16 __user *)src,
17793+ __get_user_asm(tmp, (const u16 __user *)src,
17794 ret, "w", "w", "=r", 2);
17795 if (likely(!ret))
17796 __put_user_asm(tmp, (u16 __user *)dst,
17797@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17798
17799 case 4: {
17800 u32 tmp;
17801- __get_user_asm(tmp, (u32 __user *)src,
17802+ __get_user_asm(tmp, (const u32 __user *)src,
17803 ret, "l", "k", "=r", 4);
17804 if (likely(!ret))
17805 __put_user_asm(tmp, (u32 __user *)dst,
17806@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17807 }
17808 case 8: {
17809 u64 tmp;
17810- __get_user_asm(tmp, (u64 __user *)src,
17811+ __get_user_asm(tmp, (const u64 __user *)src,
17812 ret, "q", "", "=r", 8);
17813 if (likely(!ret))
17814 __put_user_asm(tmp, (u64 __user *)dst,
17815@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17816 return ret;
17817 }
17818 default:
17819- return copy_user_generic((__force void *)dst,
17820- (__force void *)src, size);
17821+ return copy_user_generic((__force_kernel void *)____m(dst),
17822+ (__force_kernel const void *)____m(src), size);
17823 }
17824 }
17825
17826 static __must_check __always_inline int
17827-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
17828+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
17829 {
17830- return copy_user_generic(dst, (__force const void *)src, size);
17831+ if (size > INT_MAX)
17832+ return size;
17833+
17834+#ifdef CONFIG_PAX_MEMORY_UDEREF
17835+ if (!__access_ok(VERIFY_READ, src, size))
17836+ return size;
17837+#endif
17838+
17839+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17840 }
17841
17842-static __must_check __always_inline int
17843-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
17844+static __must_check __always_inline unsigned long
17845+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
17846 {
17847- return copy_user_generic((__force void *)dst, src, size);
17848+ if (size > INT_MAX)
17849+ return size;
17850+
17851+#ifdef CONFIG_PAX_MEMORY_UDEREF
17852+ if (!__access_ok(VERIFY_WRITE, dst, size))
17853+ return size;
17854+#endif
17855+
17856+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17857 }
17858
17859-extern long __copy_user_nocache(void *dst, const void __user *src,
17860- unsigned size, int zerorest);
17861+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
17862+ unsigned long size, int zerorest) __size_overflow(3);
17863
17864-static inline int
17865-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
17866+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
17867 {
17868 might_sleep();
17869+
17870+ if (size > INT_MAX)
17871+ return size;
17872+
17873+#ifdef CONFIG_PAX_MEMORY_UDEREF
17874+ if (!__access_ok(VERIFY_READ, src, size))
17875+ return size;
17876+#endif
17877+
17878 return __copy_user_nocache(dst, src, size, 1);
17879 }
17880
17881-static inline int
17882-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
17883- unsigned size)
17884+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
17885+ unsigned long size)
17886 {
17887+ if (size > INT_MAX)
17888+ return size;
17889+
17890+#ifdef CONFIG_PAX_MEMORY_UDEREF
17891+ if (!__access_ok(VERIFY_READ, src, size))
17892+ return size;
17893+#endif
17894+
17895 return __copy_user_nocache(dst, src, size, 0);
17896 }
17897
17898-unsigned long
17899-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
17900+extern unsigned long
17901+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
17902
17903 #endif /* _ASM_X86_UACCESS_64_H */
17904diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
17905index 5b238981..77fdd78 100644
17906--- a/arch/x86/include/asm/word-at-a-time.h
17907+++ b/arch/x86/include/asm/word-at-a-time.h
17908@@ -11,7 +11,7 @@
17909 * and shift, for example.
17910 */
17911 struct word_at_a_time {
17912- const unsigned long one_bits, high_bits;
17913+ unsigned long one_bits, high_bits;
17914 };
17915
17916 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
17917diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
17918index d8d9922..bf6cecb 100644
17919--- a/arch/x86/include/asm/x86_init.h
17920+++ b/arch/x86/include/asm/x86_init.h
17921@@ -129,7 +129,7 @@ struct x86_init_ops {
17922 struct x86_init_timers timers;
17923 struct x86_init_iommu iommu;
17924 struct x86_init_pci pci;
17925-};
17926+} __no_const;
17927
17928 /**
17929 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
17930@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
17931 void (*setup_percpu_clockev)(void);
17932 void (*early_percpu_clock_init)(void);
17933 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
17934-};
17935+} __no_const;
17936
17937 /**
17938 * struct x86_platform_ops - platform specific runtime functions
17939@@ -166,7 +166,7 @@ struct x86_platform_ops {
17940 void (*save_sched_clock_state)(void);
17941 void (*restore_sched_clock_state)(void);
17942 void (*apic_post_init)(void);
17943-};
17944+} __no_const;
17945
17946 struct pci_dev;
17947 struct msi_msg;
17948@@ -180,7 +180,7 @@ struct x86_msi_ops {
17949 void (*teardown_msi_irqs)(struct pci_dev *dev);
17950 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
17951 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
17952-};
17953+} __no_const;
17954
17955 struct IO_APIC_route_entry;
17956 struct io_apic_irq_attr;
17957@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
17958 unsigned int destination, int vector,
17959 struct io_apic_irq_attr *attr);
17960 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
17961-};
17962+} __no_const;
17963
17964 extern struct x86_init_ops x86_init;
17965 extern struct x86_cpuinit_ops x86_cpuinit;
17966diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
17967index 0415cda..3b22adc 100644
17968--- a/arch/x86/include/asm/xsave.h
17969+++ b/arch/x86/include/asm/xsave.h
17970@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
17971 if (unlikely(err))
17972 return -EFAULT;
17973
17974+ pax_open_userland();
17975 __asm__ __volatile__(ASM_STAC "\n"
17976- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
17977+ "1:"
17978+ __copyuser_seg
17979+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
17980 "2: " ASM_CLAC "\n"
17981 ".section .fixup,\"ax\"\n"
17982 "3: movl $-1,%[err]\n"
17983@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
17984 : [err] "=r" (err)
17985 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
17986 : "memory");
17987+ pax_close_userland();
17988 return err;
17989 }
17990
17991 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
17992 {
17993 int err;
17994- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
17995+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
17996 u32 lmask = mask;
17997 u32 hmask = mask >> 32;
17998
17999+ pax_open_userland();
18000 __asm__ __volatile__(ASM_STAC "\n"
18001- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18002+ "1:"
18003+ __copyuser_seg
18004+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18005 "2: " ASM_CLAC "\n"
18006 ".section .fixup,\"ax\"\n"
18007 "3: movl $-1,%[err]\n"
18008@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18009 : [err] "=r" (err)
18010 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
18011 : "memory"); /* memory required? */
18012+ pax_close_userland();
18013 return err;
18014 }
18015
18016diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
18017index bbae024..e1528f9 100644
18018--- a/arch/x86/include/uapi/asm/e820.h
18019+++ b/arch/x86/include/uapi/asm/e820.h
18020@@ -63,7 +63,7 @@ struct e820map {
18021 #define ISA_START_ADDRESS 0xa0000
18022 #define ISA_END_ADDRESS 0x100000
18023
18024-#define BIOS_BEGIN 0x000a0000
18025+#define BIOS_BEGIN 0x000c0000
18026 #define BIOS_END 0x00100000
18027
18028 #define BIOS_ROM_BASE 0xffe00000
18029diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
18030index 7bd3bd3..5dac791 100644
18031--- a/arch/x86/kernel/Makefile
18032+++ b/arch/x86/kernel/Makefile
18033@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
18034 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
18035 obj-$(CONFIG_IRQ_WORK) += irq_work.o
18036 obj-y += probe_roms.o
18037-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
18038+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
18039 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
18040 obj-y += syscall_$(BITS).o
18041 obj-$(CONFIG_X86_64) += vsyscall_64.o
18042diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
18043index 230c8ea..f915130 100644
18044--- a/arch/x86/kernel/acpi/boot.c
18045+++ b/arch/x86/kernel/acpi/boot.c
18046@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
18047 * If your system is blacklisted here, but you find that acpi=force
18048 * works for you, please contact linux-acpi@vger.kernel.org
18049 */
18050-static struct dmi_system_id __initdata acpi_dmi_table[] = {
18051+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
18052 /*
18053 * Boxes that need ACPI disabled
18054 */
18055@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
18056 };
18057
18058 /* second table for DMI checks that should run after early-quirks */
18059-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
18060+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
18061 /*
18062 * HP laptops which use a DSDT reporting as HP/SB400/10000,
18063 * which includes some code which overrides all temperature
18064diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
18065index ec94e11..7fbbec0 100644
18066--- a/arch/x86/kernel/acpi/sleep.c
18067+++ b/arch/x86/kernel/acpi/sleep.c
18068@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
18069 #else /* CONFIG_64BIT */
18070 #ifdef CONFIG_SMP
18071 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
18072+
18073+ pax_open_kernel();
18074 early_gdt_descr.address =
18075 (unsigned long)get_cpu_gdt_table(smp_processor_id());
18076+ pax_close_kernel();
18077+
18078 initial_gs = per_cpu_offset(smp_processor_id());
18079 #endif
18080 initial_code = (unsigned long)wakeup_long64;
18081diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
18082index d1daa66..59fecba 100644
18083--- a/arch/x86/kernel/acpi/wakeup_32.S
18084+++ b/arch/x86/kernel/acpi/wakeup_32.S
18085@@ -29,13 +29,11 @@ wakeup_pmode_return:
18086 # and restore the stack ... but you need gdt for this to work
18087 movl saved_context_esp, %esp
18088
18089- movl %cs:saved_magic, %eax
18090- cmpl $0x12345678, %eax
18091+ cmpl $0x12345678, saved_magic
18092 jne bogus_magic
18093
18094 # jump to place where we left off
18095- movl saved_eip, %eax
18096- jmp *%eax
18097+ jmp *(saved_eip)
18098
18099 bogus_magic:
18100 jmp bogus_magic
18101diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
18102index c15cf9a..0e63558 100644
18103--- a/arch/x86/kernel/alternative.c
18104+++ b/arch/x86/kernel/alternative.c
18105@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
18106 */
18107 for (a = start; a < end; a++) {
18108 instr = (u8 *)&a->instr_offset + a->instr_offset;
18109+
18110+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18111+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18112+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
18113+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18114+#endif
18115+
18116 replacement = (u8 *)&a->repl_offset + a->repl_offset;
18117 BUG_ON(a->replacementlen > a->instrlen);
18118 BUG_ON(a->instrlen > sizeof(insnbuf));
18119@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
18120 for (poff = start; poff < end; poff++) {
18121 u8 *ptr = (u8 *)poff + *poff;
18122
18123+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18124+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18125+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18126+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18127+#endif
18128+
18129 if (!*poff || ptr < text || ptr >= text_end)
18130 continue;
18131 /* turn DS segment override prefix into lock prefix */
18132- if (*ptr == 0x3e)
18133+ if (*ktla_ktva(ptr) == 0x3e)
18134 text_poke(ptr, ((unsigned char []){0xf0}), 1);
18135 }
18136 mutex_unlock(&text_mutex);
18137@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
18138 for (poff = start; poff < end; poff++) {
18139 u8 *ptr = (u8 *)poff + *poff;
18140
18141+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18142+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18143+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18144+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18145+#endif
18146+
18147 if (!*poff || ptr < text || ptr >= text_end)
18148 continue;
18149 /* turn lock prefix into DS segment override prefix */
18150- if (*ptr == 0xf0)
18151+ if (*ktla_ktva(ptr) == 0xf0)
18152 text_poke(ptr, ((unsigned char []){0x3E}), 1);
18153 }
18154 mutex_unlock(&text_mutex);
18155@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
18156
18157 BUG_ON(p->len > MAX_PATCH_LEN);
18158 /* prep the buffer with the original instructions */
18159- memcpy(insnbuf, p->instr, p->len);
18160+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
18161 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
18162 (unsigned long)p->instr, p->len);
18163
18164@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
18165 if (!uniproc_patched || num_possible_cpus() == 1)
18166 free_init_pages("SMP alternatives",
18167 (unsigned long)__smp_locks,
18168- (unsigned long)__smp_locks_end);
18169+ PAGE_ALIGN((unsigned long)__smp_locks_end));
18170 #endif
18171
18172 apply_paravirt(__parainstructions, __parainstructions_end);
18173@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
18174 * instructions. And on the local CPU you need to be protected again NMI or MCE
18175 * handlers seeing an inconsistent instruction while you patch.
18176 */
18177-void *__init_or_module text_poke_early(void *addr, const void *opcode,
18178+void *__kprobes text_poke_early(void *addr, const void *opcode,
18179 size_t len)
18180 {
18181 unsigned long flags;
18182 local_irq_save(flags);
18183- memcpy(addr, opcode, len);
18184+
18185+ pax_open_kernel();
18186+ memcpy(ktla_ktva(addr), opcode, len);
18187 sync_core();
18188+ pax_close_kernel();
18189+
18190 local_irq_restore(flags);
18191 /* Could also do a CLFLUSH here to speed up CPU recovery; but
18192 that causes hangs on some VIA CPUs. */
18193@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
18194 */
18195 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
18196 {
18197- unsigned long flags;
18198- char *vaddr;
18199+ unsigned char *vaddr = ktla_ktva(addr);
18200 struct page *pages[2];
18201- int i;
18202+ size_t i;
18203
18204 if (!core_kernel_text((unsigned long)addr)) {
18205- pages[0] = vmalloc_to_page(addr);
18206- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
18207+ pages[0] = vmalloc_to_page(vaddr);
18208+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
18209 } else {
18210- pages[0] = virt_to_page(addr);
18211+ pages[0] = virt_to_page(vaddr);
18212 WARN_ON(!PageReserved(pages[0]));
18213- pages[1] = virt_to_page(addr + PAGE_SIZE);
18214+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
18215 }
18216 BUG_ON(!pages[0]);
18217- local_irq_save(flags);
18218- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
18219- if (pages[1])
18220- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
18221- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
18222- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
18223- clear_fixmap(FIX_TEXT_POKE0);
18224- if (pages[1])
18225- clear_fixmap(FIX_TEXT_POKE1);
18226- local_flush_tlb();
18227- sync_core();
18228- /* Could also do a CLFLUSH here to speed up CPU recovery; but
18229- that causes hangs on some VIA CPUs. */
18230+ text_poke_early(addr, opcode, len);
18231 for (i = 0; i < len; i++)
18232- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
18233- local_irq_restore(flags);
18234+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
18235 return addr;
18236 }
18237
18238diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
18239index 904611b..004dde6 100644
18240--- a/arch/x86/kernel/apic/apic.c
18241+++ b/arch/x86/kernel/apic/apic.c
18242@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
18243 /*
18244 * Debug level, exported for io_apic.c
18245 */
18246-unsigned int apic_verbosity;
18247+int apic_verbosity;
18248
18249 int pic_mode;
18250
18251@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
18252 apic_write(APIC_ESR, 0);
18253 v1 = apic_read(APIC_ESR);
18254 ack_APIC_irq();
18255- atomic_inc(&irq_err_count);
18256+ atomic_inc_unchecked(&irq_err_count);
18257
18258 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
18259 smp_processor_id(), v0 , v1);
18260diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
18261index 00c77cf..2dc6a2d 100644
18262--- a/arch/x86/kernel/apic/apic_flat_64.c
18263+++ b/arch/x86/kernel/apic/apic_flat_64.c
18264@@ -157,7 +157,7 @@ static int flat_probe(void)
18265 return 1;
18266 }
18267
18268-static struct apic apic_flat = {
18269+static struct apic apic_flat __read_only = {
18270 .name = "flat",
18271 .probe = flat_probe,
18272 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
18273@@ -271,7 +271,7 @@ static int physflat_probe(void)
18274 return 0;
18275 }
18276
18277-static struct apic apic_physflat = {
18278+static struct apic apic_physflat __read_only = {
18279
18280 .name = "physical flat",
18281 .probe = physflat_probe,
18282diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
18283index e145f28..2752888 100644
18284--- a/arch/x86/kernel/apic/apic_noop.c
18285+++ b/arch/x86/kernel/apic/apic_noop.c
18286@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
18287 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
18288 }
18289
18290-struct apic apic_noop = {
18291+struct apic apic_noop __read_only = {
18292 .name = "noop",
18293 .probe = noop_probe,
18294 .acpi_madt_oem_check = NULL,
18295diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
18296index d50e364..543bee3 100644
18297--- a/arch/x86/kernel/apic/bigsmp_32.c
18298+++ b/arch/x86/kernel/apic/bigsmp_32.c
18299@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
18300 return dmi_bigsmp;
18301 }
18302
18303-static struct apic apic_bigsmp = {
18304+static struct apic apic_bigsmp __read_only = {
18305
18306 .name = "bigsmp",
18307 .probe = probe_bigsmp,
18308diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
18309index 0874799..a7a7892 100644
18310--- a/arch/x86/kernel/apic/es7000_32.c
18311+++ b/arch/x86/kernel/apic/es7000_32.c
18312@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
18313 return ret && es7000_apic_is_cluster();
18314 }
18315
18316-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
18317-static struct apic __refdata apic_es7000_cluster = {
18318+static struct apic apic_es7000_cluster __read_only = {
18319
18320 .name = "es7000",
18321 .probe = probe_es7000,
18322@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
18323 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
18324 };
18325
18326-static struct apic __refdata apic_es7000 = {
18327+static struct apic apic_es7000 __read_only = {
18328
18329 .name = "es7000",
18330 .probe = probe_es7000,
18331diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
18332index 9ed796c..e930fe4 100644
18333--- a/arch/x86/kernel/apic/io_apic.c
18334+++ b/arch/x86/kernel/apic/io_apic.c
18335@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
18336 }
18337 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
18338
18339-void lock_vector_lock(void)
18340+void lock_vector_lock(void) __acquires(vector_lock)
18341 {
18342 /* Used to the online set of cpus does not change
18343 * during assign_irq_vector.
18344@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
18345 raw_spin_lock(&vector_lock);
18346 }
18347
18348-void unlock_vector_lock(void)
18349+void unlock_vector_lock(void) __releases(vector_lock)
18350 {
18351 raw_spin_unlock(&vector_lock);
18352 }
18353@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
18354 ack_APIC_irq();
18355 }
18356
18357-atomic_t irq_mis_count;
18358+atomic_unchecked_t irq_mis_count;
18359
18360 #ifdef CONFIG_GENERIC_PENDING_IRQ
18361 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
18362@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
18363 * at the cpu.
18364 */
18365 if (!(v & (1 << (i & 0x1f)))) {
18366- atomic_inc(&irq_mis_count);
18367+ atomic_inc_unchecked(&irq_mis_count);
18368
18369 eoi_ioapic_irq(irq, cfg);
18370 }
18371diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
18372index d661ee9..791fd33 100644
18373--- a/arch/x86/kernel/apic/numaq_32.c
18374+++ b/arch/x86/kernel/apic/numaq_32.c
18375@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
18376 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
18377 }
18378
18379-/* Use __refdata to keep false positive warning calm. */
18380-static struct apic __refdata apic_numaq = {
18381+static struct apic apic_numaq __read_only = {
18382
18383 .name = "NUMAQ",
18384 .probe = probe_numaq,
18385diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
18386index eb35ef9..f184a21 100644
18387--- a/arch/x86/kernel/apic/probe_32.c
18388+++ b/arch/x86/kernel/apic/probe_32.c
18389@@ -72,7 +72,7 @@ static int probe_default(void)
18390 return 1;
18391 }
18392
18393-static struct apic apic_default = {
18394+static struct apic apic_default __read_only = {
18395
18396 .name = "default",
18397 .probe = probe_default,
18398diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
18399index 77c95c0..434f8a4 100644
18400--- a/arch/x86/kernel/apic/summit_32.c
18401+++ b/arch/x86/kernel/apic/summit_32.c
18402@@ -486,7 +486,7 @@ void setup_summit(void)
18403 }
18404 #endif
18405
18406-static struct apic apic_summit = {
18407+static struct apic apic_summit __read_only = {
18408
18409 .name = "summit",
18410 .probe = probe_summit,
18411diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
18412index c88baa4..757aee1 100644
18413--- a/arch/x86/kernel/apic/x2apic_cluster.c
18414+++ b/arch/x86/kernel/apic/x2apic_cluster.c
18415@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
18416 return notifier_from_errno(err);
18417 }
18418
18419-static struct notifier_block __refdata x2apic_cpu_notifier = {
18420+static struct notifier_block x2apic_cpu_notifier = {
18421 .notifier_call = update_clusterinfo,
18422 };
18423
18424@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
18425 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
18426 }
18427
18428-static struct apic apic_x2apic_cluster = {
18429+static struct apic apic_x2apic_cluster __read_only = {
18430
18431 .name = "cluster x2apic",
18432 .probe = x2apic_cluster_probe,
18433diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
18434index 562a76d..a003c0f 100644
18435--- a/arch/x86/kernel/apic/x2apic_phys.c
18436+++ b/arch/x86/kernel/apic/x2apic_phys.c
18437@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
18438 return apic == &apic_x2apic_phys;
18439 }
18440
18441-static struct apic apic_x2apic_phys = {
18442+static struct apic apic_x2apic_phys __read_only = {
18443
18444 .name = "physical x2apic",
18445 .probe = x2apic_phys_probe,
18446diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
18447index 794f6eb..67e1db2 100644
18448--- a/arch/x86/kernel/apic/x2apic_uv_x.c
18449+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
18450@@ -342,7 +342,7 @@ static int uv_probe(void)
18451 return apic == &apic_x2apic_uv_x;
18452 }
18453
18454-static struct apic __refdata apic_x2apic_uv_x = {
18455+static struct apic apic_x2apic_uv_x __read_only = {
18456
18457 .name = "UV large system",
18458 .probe = uv_probe,
18459diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
18460index 53a4e27..038760a 100644
18461--- a/arch/x86/kernel/apm_32.c
18462+++ b/arch/x86/kernel/apm_32.c
18463@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
18464 * This is for buggy BIOS's that refer to (real mode) segment 0x40
18465 * even though they are called in protected mode.
18466 */
18467-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
18468+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
18469 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
18470
18471 static const char driver_version[] = "1.16ac"; /* no spaces */
18472@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
18473 BUG_ON(cpu != 0);
18474 gdt = get_cpu_gdt_table(cpu);
18475 save_desc_40 = gdt[0x40 / 8];
18476+
18477+ pax_open_kernel();
18478 gdt[0x40 / 8] = bad_bios_desc;
18479+ pax_close_kernel();
18480
18481 apm_irq_save(flags);
18482 APM_DO_SAVE_SEGS;
18483@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
18484 &call->esi);
18485 APM_DO_RESTORE_SEGS;
18486 apm_irq_restore(flags);
18487+
18488+ pax_open_kernel();
18489 gdt[0x40 / 8] = save_desc_40;
18490+ pax_close_kernel();
18491+
18492 put_cpu();
18493
18494 return call->eax & 0xff;
18495@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
18496 BUG_ON(cpu != 0);
18497 gdt = get_cpu_gdt_table(cpu);
18498 save_desc_40 = gdt[0x40 / 8];
18499+
18500+ pax_open_kernel();
18501 gdt[0x40 / 8] = bad_bios_desc;
18502+ pax_close_kernel();
18503
18504 apm_irq_save(flags);
18505 APM_DO_SAVE_SEGS;
18506@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
18507 &call->eax);
18508 APM_DO_RESTORE_SEGS;
18509 apm_irq_restore(flags);
18510+
18511+ pax_open_kernel();
18512 gdt[0x40 / 8] = save_desc_40;
18513+ pax_close_kernel();
18514+
18515 put_cpu();
18516 return error;
18517 }
18518@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
18519 * code to that CPU.
18520 */
18521 gdt = get_cpu_gdt_table(0);
18522+
18523+ pax_open_kernel();
18524 set_desc_base(&gdt[APM_CS >> 3],
18525 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
18526 set_desc_base(&gdt[APM_CS_16 >> 3],
18527 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
18528 set_desc_base(&gdt[APM_DS >> 3],
18529 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
18530+ pax_close_kernel();
18531
18532 proc_create("apm", 0, NULL, &apm_file_ops);
18533
18534diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
18535index 2861082..6d4718e 100644
18536--- a/arch/x86/kernel/asm-offsets.c
18537+++ b/arch/x86/kernel/asm-offsets.c
18538@@ -33,6 +33,8 @@ void common(void) {
18539 OFFSET(TI_status, thread_info, status);
18540 OFFSET(TI_addr_limit, thread_info, addr_limit);
18541 OFFSET(TI_preempt_count, thread_info, preempt_count);
18542+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
18543+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
18544
18545 BLANK();
18546 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
18547@@ -53,8 +55,26 @@ void common(void) {
18548 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
18549 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
18550 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
18551+
18552+#ifdef CONFIG_PAX_KERNEXEC
18553+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
18554 #endif
18555
18556+#ifdef CONFIG_PAX_MEMORY_UDEREF
18557+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
18558+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
18559+#ifdef CONFIG_X86_64
18560+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
18561+#endif
18562+#endif
18563+
18564+#endif
18565+
18566+ BLANK();
18567+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
18568+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
18569+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
18570+
18571 #ifdef CONFIG_XEN
18572 BLANK();
18573 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
18574diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
18575index e7c798b..2b2019b 100644
18576--- a/arch/x86/kernel/asm-offsets_64.c
18577+++ b/arch/x86/kernel/asm-offsets_64.c
18578@@ -77,6 +77,7 @@ int main(void)
18579 BLANK();
18580 #undef ENTRY
18581
18582+ DEFINE(TSS_size, sizeof(struct tss_struct));
18583 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
18584 BLANK();
18585
18586diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
18587index b0684e4..22ccfd7 100644
18588--- a/arch/x86/kernel/cpu/Makefile
18589+++ b/arch/x86/kernel/cpu/Makefile
18590@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
18591 CFLAGS_REMOVE_perf_event.o = -pg
18592 endif
18593
18594-# Make sure load_percpu_segment has no stackprotector
18595-nostackp := $(call cc-option, -fno-stack-protector)
18596-CFLAGS_common.o := $(nostackp)
18597-
18598 obj-y := intel_cacheinfo.o scattered.o topology.o
18599 obj-y += proc.o capflags.o powerflags.o common.o
18600 obj-y += rdrand.o
18601diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
18602index 5013a48..0782c53 100644
18603--- a/arch/x86/kernel/cpu/amd.c
18604+++ b/arch/x86/kernel/cpu/amd.c
18605@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
18606 unsigned int size)
18607 {
18608 /* AMD errata T13 (order #21922) */
18609- if ((c->x86 == 6)) {
18610+ if (c->x86 == 6) {
18611 /* Duron Rev A0 */
18612 if (c->x86_model == 3 && c->x86_mask == 0)
18613 size = 64;
18614diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
18615index 22018f7..2ae0e75 100644
18616--- a/arch/x86/kernel/cpu/common.c
18617+++ b/arch/x86/kernel/cpu/common.c
18618@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
18619
18620 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
18621
18622-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
18623-#ifdef CONFIG_X86_64
18624- /*
18625- * We need valid kernel segments for data and code in long mode too
18626- * IRET will check the segment types kkeil 2000/10/28
18627- * Also sysret mandates a special GDT layout
18628- *
18629- * TLS descriptors are currently at a different place compared to i386.
18630- * Hopefully nobody expects them at a fixed place (Wine?)
18631- */
18632- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
18633- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
18634- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
18635- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
18636- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
18637- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
18638-#else
18639- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
18640- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18641- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
18642- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
18643- /*
18644- * Segments used for calling PnP BIOS have byte granularity.
18645- * They code segments and data segments have fixed 64k limits,
18646- * the transfer segment sizes are set at run time.
18647- */
18648- /* 32-bit code */
18649- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
18650- /* 16-bit code */
18651- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
18652- /* 16-bit data */
18653- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
18654- /* 16-bit data */
18655- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
18656- /* 16-bit data */
18657- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
18658- /*
18659- * The APM segments have byte granularity and their bases
18660- * are set at run time. All have 64k limits.
18661- */
18662- /* 32-bit code */
18663- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
18664- /* 16-bit code */
18665- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
18666- /* data */
18667- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
18668-
18669- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18670- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18671- GDT_STACK_CANARY_INIT
18672-#endif
18673-} };
18674-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
18675-
18676 static int __init x86_xsave_setup(char *s)
18677 {
18678 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
18679@@ -288,6 +234,53 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
18680 set_in_cr4(X86_CR4_SMAP);
18681 }
18682
18683+#ifdef CONFIG_X86_64
18684+static __init int setup_disable_pcid(char *arg)
18685+{
18686+ setup_clear_cpu_cap(X86_FEATURE_PCID);
18687+
18688+#ifdef CONFIG_PAX_MEMORY_UDEREF
18689+ if (clone_pgd_mask != ~(pgdval_t)0UL)
18690+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
18691+#endif
18692+
18693+ return 1;
18694+}
18695+__setup("nopcid", setup_disable_pcid);
18696+
18697+static void setup_pcid(struct cpuinfo_x86 *c)
18698+{
18699+ if (!cpu_has(c, X86_FEATURE_PCID)) {
18700+
18701+#ifdef CONFIG_PAX_MEMORY_UDEREF
18702+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
18703+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
18704+ printk("PAX: slow and weak UDEREF enabled\n");
18705+ } else
18706+ printk("PAX: UDEREF disabled\n");
18707+#endif
18708+
18709+ return;
18710+ }
18711+
18712+ printk("PAX: PCID detected\n");
18713+ set_in_cr4(X86_CR4_PCIDE);
18714+
18715+#ifdef CONFIG_PAX_MEMORY_UDEREF
18716+ clone_pgd_mask = ~(pgdval_t)0UL;
18717+ if (pax_user_shadow_base)
18718+ printk("PAX: weak UDEREF enabled\n");
18719+ else {
18720+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
18721+ printk("PAX: strong UDEREF enabled\n");
18722+ }
18723+#endif
18724+
18725+ if (cpu_has(c, X86_FEATURE_INVPCID))
18726+ printk("PAX: INVPCID detected\n");
18727+}
18728+#endif
18729+
18730 /*
18731 * Some CPU features depend on higher CPUID levels, which may not always
18732 * be available due to CPUID level capping or broken virtualization
18733@@ -386,7 +379,7 @@ void switch_to_new_gdt(int cpu)
18734 {
18735 struct desc_ptr gdt_descr;
18736
18737- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
18738+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18739 gdt_descr.size = GDT_SIZE - 1;
18740 load_gdt(&gdt_descr);
18741 /* Reload the per-cpu base */
18742@@ -874,6 +867,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
18743 setup_smep(c);
18744 setup_smap(c);
18745
18746+#ifdef CONFIG_X86_64
18747+ setup_pcid(c);
18748+#endif
18749+
18750 /*
18751 * The vendor-specific functions might have changed features.
18752 * Now we do "generic changes."
18753@@ -882,6 +879,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
18754 /* Filter out anything that depends on CPUID levels we don't have */
18755 filter_cpuid_features(c, true);
18756
18757+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18758+ setup_clear_cpu_cap(X86_FEATURE_SEP);
18759+#endif
18760+
18761 /* If the model name is still unset, do table lookup. */
18762 if (!c->x86_model_id[0]) {
18763 const char *p;
18764@@ -1069,10 +1070,12 @@ static __init int setup_disablecpuid(char *arg)
18765 }
18766 __setup("clearcpuid=", setup_disablecpuid);
18767
18768+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
18769+EXPORT_PER_CPU_SYMBOL(current_tinfo);
18770+
18771 #ifdef CONFIG_X86_64
18772 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
18773-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
18774- (unsigned long) nmi_idt_table };
18775+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
18776
18777 DEFINE_PER_CPU_FIRST(union irq_stack_union,
18778 irq_stack_union) __aligned(PAGE_SIZE);
18779@@ -1086,7 +1089,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
18780 EXPORT_PER_CPU_SYMBOL(current_task);
18781
18782 DEFINE_PER_CPU(unsigned long, kernel_stack) =
18783- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
18784+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
18785 EXPORT_PER_CPU_SYMBOL(kernel_stack);
18786
18787 DEFINE_PER_CPU(char *, irq_stack_ptr) =
18788@@ -1231,7 +1234,7 @@ void __cpuinit cpu_init(void)
18789 load_ucode_ap();
18790
18791 cpu = stack_smp_processor_id();
18792- t = &per_cpu(init_tss, cpu);
18793+ t = init_tss + cpu;
18794 oist = &per_cpu(orig_ist, cpu);
18795
18796 #ifdef CONFIG_NUMA
18797@@ -1257,7 +1260,7 @@ void __cpuinit cpu_init(void)
18798 switch_to_new_gdt(cpu);
18799 loadsegment(fs, 0);
18800
18801- load_idt((const struct desc_ptr *)&idt_descr);
18802+ load_idt(&idt_descr);
18803
18804 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
18805 syscall_init();
18806@@ -1266,7 +1269,6 @@ void __cpuinit cpu_init(void)
18807 wrmsrl(MSR_KERNEL_GS_BASE, 0);
18808 barrier();
18809
18810- x86_configure_nx();
18811 enable_x2apic();
18812
18813 /*
18814@@ -1318,7 +1320,7 @@ void __cpuinit cpu_init(void)
18815 {
18816 int cpu = smp_processor_id();
18817 struct task_struct *curr = current;
18818- struct tss_struct *t = &per_cpu(init_tss, cpu);
18819+ struct tss_struct *t = init_tss + cpu;
18820 struct thread_struct *thread = &curr->thread;
18821
18822 show_ucode_info_early();
18823diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
18824index 7c6f7d5..8cac382 100644
18825--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
18826+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
18827@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
18828 };
18829
18830 #ifdef CONFIG_AMD_NB
18831+static struct attribute *default_attrs_amd_nb[] = {
18832+ &type.attr,
18833+ &level.attr,
18834+ &coherency_line_size.attr,
18835+ &physical_line_partition.attr,
18836+ &ways_of_associativity.attr,
18837+ &number_of_sets.attr,
18838+ &size.attr,
18839+ &shared_cpu_map.attr,
18840+ &shared_cpu_list.attr,
18841+ NULL,
18842+ NULL,
18843+ NULL,
18844+ NULL
18845+};
18846+
18847 static struct attribute ** __cpuinit amd_l3_attrs(void)
18848 {
18849 static struct attribute **attrs;
18850@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
18851
18852 n = ARRAY_SIZE(default_attrs);
18853
18854- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
18855- n += 2;
18856-
18857- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
18858- n += 1;
18859-
18860- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
18861- if (attrs == NULL)
18862- return attrs = default_attrs;
18863-
18864- for (n = 0; default_attrs[n]; n++)
18865- attrs[n] = default_attrs[n];
18866+ attrs = default_attrs_amd_nb;
18867
18868 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
18869 attrs[n++] = &cache_disable_0.attr;
18870@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
18871 .default_attrs = default_attrs,
18872 };
18873
18874+#ifdef CONFIG_AMD_NB
18875+static struct kobj_type ktype_cache_amd_nb = {
18876+ .sysfs_ops = &sysfs_ops,
18877+ .default_attrs = default_attrs_amd_nb,
18878+};
18879+#endif
18880+
18881 static struct kobj_type ktype_percpu_entry = {
18882 .sysfs_ops = &sysfs_ops,
18883 };
18884@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
18885 return retval;
18886 }
18887
18888+#ifdef CONFIG_AMD_NB
18889+ amd_l3_attrs();
18890+#endif
18891+
18892 for (i = 0; i < num_cache_leaves; i++) {
18893+ struct kobj_type *ktype;
18894+
18895 this_object = INDEX_KOBJECT_PTR(cpu, i);
18896 this_object->cpu = cpu;
18897 this_object->index = i;
18898
18899 this_leaf = CPUID4_INFO_IDX(cpu, i);
18900
18901- ktype_cache.default_attrs = default_attrs;
18902+ ktype = &ktype_cache;
18903 #ifdef CONFIG_AMD_NB
18904 if (this_leaf->base.nb)
18905- ktype_cache.default_attrs = amd_l3_attrs();
18906+ ktype = &ktype_cache_amd_nb;
18907 #endif
18908 retval = kobject_init_and_add(&(this_object->kobj),
18909- &ktype_cache,
18910+ ktype,
18911 per_cpu(ici_cache_kobject, cpu),
18912 "index%1lu", i);
18913 if (unlikely(retval)) {
18914@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
18915 return NOTIFY_OK;
18916 }
18917
18918-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
18919+static struct notifier_block cacheinfo_cpu_notifier = {
18920 .notifier_call = cacheinfo_cpu_callback,
18921 };
18922
18923diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
18924index 9239504..b2471ce 100644
18925--- a/arch/x86/kernel/cpu/mcheck/mce.c
18926+++ b/arch/x86/kernel/cpu/mcheck/mce.c
18927@@ -45,6 +45,7 @@
18928 #include <asm/processor.h>
18929 #include <asm/mce.h>
18930 #include <asm/msr.h>
18931+#include <asm/local.h>
18932
18933 #include "mce-internal.h"
18934
18935@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
18936 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
18937 m->cs, m->ip);
18938
18939- if (m->cs == __KERNEL_CS)
18940+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
18941 print_symbol("{%s}", m->ip);
18942 pr_cont("\n");
18943 }
18944@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
18945
18946 #define PANIC_TIMEOUT 5 /* 5 seconds */
18947
18948-static atomic_t mce_paniced;
18949+static atomic_unchecked_t mce_paniced;
18950
18951 static int fake_panic;
18952-static atomic_t mce_fake_paniced;
18953+static atomic_unchecked_t mce_fake_paniced;
18954
18955 /* Panic in progress. Enable interrupts and wait for final IPI */
18956 static void wait_for_panic(void)
18957@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18958 /*
18959 * Make sure only one CPU runs in machine check panic
18960 */
18961- if (atomic_inc_return(&mce_paniced) > 1)
18962+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
18963 wait_for_panic();
18964 barrier();
18965
18966@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18967 console_verbose();
18968 } else {
18969 /* Don't log too much for fake panic */
18970- if (atomic_inc_return(&mce_fake_paniced) > 1)
18971+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
18972 return;
18973 }
18974 /* First print corrected ones that are still unlogged */
18975@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18976 if (!fake_panic) {
18977 if (panic_timeout == 0)
18978 panic_timeout = mca_cfg.panic_timeout;
18979- panic(msg);
18980+ panic("%s", msg);
18981 } else
18982 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
18983 }
18984@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
18985 * might have been modified by someone else.
18986 */
18987 rmb();
18988- if (atomic_read(&mce_paniced))
18989+ if (atomic_read_unchecked(&mce_paniced))
18990 wait_for_panic();
18991 if (!mca_cfg.monarch_timeout)
18992 goto out;
18993@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
18994 }
18995
18996 /* Call the installed machine check handler for this CPU setup. */
18997-void (*machine_check_vector)(struct pt_regs *, long error_code) =
18998+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
18999 unexpected_machine_check;
19000
19001 /*
19002@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19003 return;
19004 }
19005
19006+ pax_open_kernel();
19007 machine_check_vector = do_machine_check;
19008+ pax_close_kernel();
19009
19010 __mcheck_cpu_init_generic();
19011 __mcheck_cpu_init_vendor(c);
19012@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19013 */
19014
19015 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
19016-static int mce_chrdev_open_count; /* #times opened */
19017+static local_t mce_chrdev_open_count; /* #times opened */
19018 static int mce_chrdev_open_exclu; /* already open exclusive? */
19019
19020 static int mce_chrdev_open(struct inode *inode, struct file *file)
19021@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19022 spin_lock(&mce_chrdev_state_lock);
19023
19024 if (mce_chrdev_open_exclu ||
19025- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
19026+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
19027 spin_unlock(&mce_chrdev_state_lock);
19028
19029 return -EBUSY;
19030@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19031
19032 if (file->f_flags & O_EXCL)
19033 mce_chrdev_open_exclu = 1;
19034- mce_chrdev_open_count++;
19035+ local_inc(&mce_chrdev_open_count);
19036
19037 spin_unlock(&mce_chrdev_state_lock);
19038
19039@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
19040 {
19041 spin_lock(&mce_chrdev_state_lock);
19042
19043- mce_chrdev_open_count--;
19044+ local_dec(&mce_chrdev_open_count);
19045 mce_chrdev_open_exclu = 0;
19046
19047 spin_unlock(&mce_chrdev_state_lock);
19048@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
19049 return NOTIFY_OK;
19050 }
19051
19052-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
19053+static struct notifier_block mce_cpu_notifier = {
19054 .notifier_call = mce_cpu_callback,
19055 };
19056
19057@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
19058
19059 for (i = 0; i < mca_cfg.banks; i++) {
19060 struct mce_bank *b = &mce_banks[i];
19061- struct device_attribute *a = &b->attr;
19062+ device_attribute_no_const *a = &b->attr;
19063
19064 sysfs_attr_init(&a->attr);
19065 a->attr.name = b->attrname;
19066@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
19067 static void mce_reset(void)
19068 {
19069 cpu_missing = 0;
19070- atomic_set(&mce_fake_paniced, 0);
19071+ atomic_set_unchecked(&mce_fake_paniced, 0);
19072 atomic_set(&mce_executing, 0);
19073 atomic_set(&mce_callin, 0);
19074 atomic_set(&global_nwo, 0);
19075diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
19076index 1c044b1..37a2a43 100644
19077--- a/arch/x86/kernel/cpu/mcheck/p5.c
19078+++ b/arch/x86/kernel/cpu/mcheck/p5.c
19079@@ -11,6 +11,7 @@
19080 #include <asm/processor.h>
19081 #include <asm/mce.h>
19082 #include <asm/msr.h>
19083+#include <asm/pgtable.h>
19084
19085 /* By default disabled */
19086 int mce_p5_enabled __read_mostly;
19087@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
19088 if (!cpu_has(c, X86_FEATURE_MCE))
19089 return;
19090
19091+ pax_open_kernel();
19092 machine_check_vector = pentium_machine_check;
19093+ pax_close_kernel();
19094 /* Make sure the vector pointer is visible before we enable MCEs: */
19095 wmb();
19096
19097diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19098index 47a1870..8c019a7 100644
19099--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
19100+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19101@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
19102 return notifier_from_errno(err);
19103 }
19104
19105-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
19106+static struct notifier_block thermal_throttle_cpu_notifier =
19107 {
19108 .notifier_call = thermal_throttle_cpu_callback,
19109 };
19110diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
19111index e9a701a..35317d6 100644
19112--- a/arch/x86/kernel/cpu/mcheck/winchip.c
19113+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
19114@@ -10,6 +10,7 @@
19115 #include <asm/processor.h>
19116 #include <asm/mce.h>
19117 #include <asm/msr.h>
19118+#include <asm/pgtable.h>
19119
19120 /* Machine check handler for WinChip C6: */
19121 static void winchip_machine_check(struct pt_regs *regs, long error_code)
19122@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
19123 {
19124 u32 lo, hi;
19125
19126+ pax_open_kernel();
19127 machine_check_vector = winchip_machine_check;
19128+ pax_close_kernel();
19129 /* Make sure the vector pointer is visible before we enable MCEs: */
19130 wmb();
19131
19132diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
19133index ca22b73..9987afe 100644
19134--- a/arch/x86/kernel/cpu/mtrr/main.c
19135+++ b/arch/x86/kernel/cpu/mtrr/main.c
19136@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
19137 u64 size_or_mask, size_and_mask;
19138 static bool mtrr_aps_delayed_init;
19139
19140-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
19141+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
19142
19143 const struct mtrr_ops *mtrr_if;
19144
19145diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
19146index df5e41f..816c719 100644
19147--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
19148+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
19149@@ -25,7 +25,7 @@ struct mtrr_ops {
19150 int (*validate_add_page)(unsigned long base, unsigned long size,
19151 unsigned int type);
19152 int (*have_wrcomb)(void);
19153-};
19154+} __do_const;
19155
19156 extern int generic_get_free_region(unsigned long base, unsigned long size,
19157 int replace_reg);
19158diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
19159index 1025f3c..824f677 100644
19160--- a/arch/x86/kernel/cpu/perf_event.c
19161+++ b/arch/x86/kernel/cpu/perf_event.c
19162@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
19163 pr_info("no hardware sampling interrupt available.\n");
19164 }
19165
19166-static struct attribute_group x86_pmu_format_group = {
19167+static attribute_group_no_const x86_pmu_format_group = {
19168 .name = "format",
19169 .attrs = NULL,
19170 };
19171@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
19172 NULL,
19173 };
19174
19175-static struct attribute_group x86_pmu_events_group = {
19176+static attribute_group_no_const x86_pmu_events_group = {
19177 .name = "events",
19178 .attrs = events_attr,
19179 };
19180@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
19181 if (idx > GDT_ENTRIES)
19182 return 0;
19183
19184- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
19185+ desc = get_cpu_gdt_table(smp_processor_id());
19186 }
19187
19188 return get_desc_base(desc + idx);
19189@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
19190 break;
19191
19192 perf_callchain_store(entry, frame.return_address);
19193- fp = frame.next_frame;
19194+ fp = (const void __force_user *)frame.next_frame;
19195 }
19196 }
19197
19198diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
19199index a9e2207..d70c83a 100644
19200--- a/arch/x86/kernel/cpu/perf_event_intel.c
19201+++ b/arch/x86/kernel/cpu/perf_event_intel.c
19202@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
19203 * v2 and above have a perf capabilities MSR
19204 */
19205 if (version > 1) {
19206- u64 capabilities;
19207+ u64 capabilities = x86_pmu.intel_cap.capabilities;
19208
19209- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
19210- x86_pmu.intel_cap.capabilities = capabilities;
19211+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
19212+ x86_pmu.intel_cap.capabilities = capabilities;
19213 }
19214
19215 intel_ds_init();
19216diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19217index 52441a2..f94fae8 100644
19218--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19219+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19220@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
19221 static int __init uncore_type_init(struct intel_uncore_type *type)
19222 {
19223 struct intel_uncore_pmu *pmus;
19224- struct attribute_group *attr_group;
19225+ attribute_group_no_const *attr_group;
19226 struct attribute **attrs;
19227 int i, j;
19228
19229@@ -3518,7 +3518,7 @@ static int
19230 return NOTIFY_OK;
19231 }
19232
19233-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
19234+static struct notifier_block uncore_cpu_nb = {
19235 .notifier_call = uncore_cpu_notifier,
19236 /*
19237 * to migrate uncore events, our notifier should be executed
19238diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19239index f952891..4722ad4 100644
19240--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19241+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19242@@ -488,7 +488,7 @@ struct intel_uncore_box {
19243 struct uncore_event_desc {
19244 struct kobj_attribute attr;
19245 const char *config;
19246-};
19247+} __do_const;
19248
19249 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
19250 { \
19251diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
19252index 1e4dbcf..b9a34c2 100644
19253--- a/arch/x86/kernel/cpuid.c
19254+++ b/arch/x86/kernel/cpuid.c
19255@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
19256 return notifier_from_errno(err);
19257 }
19258
19259-static struct notifier_block __refdata cpuid_class_cpu_notifier =
19260+static struct notifier_block cpuid_class_cpu_notifier =
19261 {
19262 .notifier_call = cpuid_class_cpu_callback,
19263 };
19264diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
19265index 74467fe..18793d5 100644
19266--- a/arch/x86/kernel/crash.c
19267+++ b/arch/x86/kernel/crash.c
19268@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
19269 {
19270 #ifdef CONFIG_X86_32
19271 struct pt_regs fixed_regs;
19272-#endif
19273
19274-#ifdef CONFIG_X86_32
19275- if (!user_mode_vm(regs)) {
19276+ if (!user_mode(regs)) {
19277 crash_fixup_ss_esp(&fixed_regs, regs);
19278 regs = &fixed_regs;
19279 }
19280diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
19281index afa64ad..dce67dd 100644
19282--- a/arch/x86/kernel/crash_dump_64.c
19283+++ b/arch/x86/kernel/crash_dump_64.c
19284@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
19285 return -ENOMEM;
19286
19287 if (userbuf) {
19288- if (copy_to_user(buf, vaddr + offset, csize)) {
19289+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
19290 iounmap(vaddr);
19291 return -EFAULT;
19292 }
19293diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
19294index 155a13f..1672b9b 100644
19295--- a/arch/x86/kernel/doublefault_32.c
19296+++ b/arch/x86/kernel/doublefault_32.c
19297@@ -11,7 +11,7 @@
19298
19299 #define DOUBLEFAULT_STACKSIZE (1024)
19300 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
19301-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
19302+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
19303
19304 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
19305
19306@@ -21,7 +21,7 @@ static void doublefault_fn(void)
19307 unsigned long gdt, tss;
19308
19309 native_store_gdt(&gdt_desc);
19310- gdt = gdt_desc.address;
19311+ gdt = (unsigned long)gdt_desc.address;
19312
19313 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
19314
19315@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
19316 /* 0x2 bit is always set */
19317 .flags = X86_EFLAGS_SF | 0x2,
19318 .sp = STACK_START,
19319- .es = __USER_DS,
19320+ .es = __KERNEL_DS,
19321 .cs = __KERNEL_CS,
19322 .ss = __KERNEL_DS,
19323- .ds = __USER_DS,
19324+ .ds = __KERNEL_DS,
19325 .fs = __KERNEL_PERCPU,
19326
19327 .__cr3 = __pa_nodebug(swapper_pg_dir),
19328diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
19329index deb6421..76bbc12 100644
19330--- a/arch/x86/kernel/dumpstack.c
19331+++ b/arch/x86/kernel/dumpstack.c
19332@@ -2,6 +2,9 @@
19333 * Copyright (C) 1991, 1992 Linus Torvalds
19334 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
19335 */
19336+#ifdef CONFIG_GRKERNSEC_HIDESYM
19337+#define __INCLUDED_BY_HIDESYM 1
19338+#endif
19339 #include <linux/kallsyms.h>
19340 #include <linux/kprobes.h>
19341 #include <linux/uaccess.h>
19342@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
19343 static void
19344 print_ftrace_graph_addr(unsigned long addr, void *data,
19345 const struct stacktrace_ops *ops,
19346- struct thread_info *tinfo, int *graph)
19347+ struct task_struct *task, int *graph)
19348 {
19349- struct task_struct *task;
19350 unsigned long ret_addr;
19351 int index;
19352
19353 if (addr != (unsigned long)return_to_handler)
19354 return;
19355
19356- task = tinfo->task;
19357 index = task->curr_ret_stack;
19358
19359 if (!task->ret_stack || index < *graph)
19360@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
19361 static inline void
19362 print_ftrace_graph_addr(unsigned long addr, void *data,
19363 const struct stacktrace_ops *ops,
19364- struct thread_info *tinfo, int *graph)
19365+ struct task_struct *task, int *graph)
19366 { }
19367 #endif
19368
19369@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
19370 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
19371 */
19372
19373-static inline int valid_stack_ptr(struct thread_info *tinfo,
19374- void *p, unsigned int size, void *end)
19375+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
19376 {
19377- void *t = tinfo;
19378 if (end) {
19379 if (p < end && p >= (end-THREAD_SIZE))
19380 return 1;
19381@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
19382 }
19383
19384 unsigned long
19385-print_context_stack(struct thread_info *tinfo,
19386+print_context_stack(struct task_struct *task, void *stack_start,
19387 unsigned long *stack, unsigned long bp,
19388 const struct stacktrace_ops *ops, void *data,
19389 unsigned long *end, int *graph)
19390 {
19391 struct stack_frame *frame = (struct stack_frame *)bp;
19392
19393- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
19394+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
19395 unsigned long addr;
19396
19397 addr = *stack;
19398@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
19399 } else {
19400 ops->address(data, addr, 0);
19401 }
19402- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
19403+ print_ftrace_graph_addr(addr, data, ops, task, graph);
19404 }
19405 stack++;
19406 }
19407@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
19408 EXPORT_SYMBOL_GPL(print_context_stack);
19409
19410 unsigned long
19411-print_context_stack_bp(struct thread_info *tinfo,
19412+print_context_stack_bp(struct task_struct *task, void *stack_start,
19413 unsigned long *stack, unsigned long bp,
19414 const struct stacktrace_ops *ops, void *data,
19415 unsigned long *end, int *graph)
19416@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
19417 struct stack_frame *frame = (struct stack_frame *)bp;
19418 unsigned long *ret_addr = &frame->return_address;
19419
19420- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
19421+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
19422 unsigned long addr = *ret_addr;
19423
19424 if (!__kernel_text_address(addr))
19425@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
19426 ops->address(data, addr, 1);
19427 frame = frame->next_frame;
19428 ret_addr = &frame->return_address;
19429- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
19430+ print_ftrace_graph_addr(addr, data, ops, task, graph);
19431 }
19432
19433 return (unsigned long)frame;
19434@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
19435 static void print_trace_address(void *data, unsigned long addr, int reliable)
19436 {
19437 touch_nmi_watchdog();
19438- printk(data);
19439+ printk("%s", (char *)data);
19440 printk_address(addr, reliable);
19441 }
19442
19443@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
19444 }
19445 EXPORT_SYMBOL_GPL(oops_begin);
19446
19447+extern void gr_handle_kernel_exploit(void);
19448+
19449 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
19450 {
19451 if (regs && kexec_should_crash(current))
19452@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
19453 panic("Fatal exception in interrupt");
19454 if (panic_on_oops)
19455 panic("Fatal exception");
19456- do_exit(signr);
19457+
19458+ gr_handle_kernel_exploit();
19459+
19460+ do_group_exit(signr);
19461 }
19462
19463 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
19464@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
19465 print_modules();
19466 show_regs(regs);
19467 #ifdef CONFIG_X86_32
19468- if (user_mode_vm(regs)) {
19469+ if (user_mode(regs)) {
19470 sp = regs->sp;
19471 ss = regs->ss & 0xffff;
19472 } else {
19473@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
19474 unsigned long flags = oops_begin();
19475 int sig = SIGSEGV;
19476
19477- if (!user_mode_vm(regs))
19478+ if (!user_mode(regs))
19479 report_bug(regs->ip, regs);
19480
19481 if (__die(str, regs, err))
19482diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
19483index f2a1770..540657f 100644
19484--- a/arch/x86/kernel/dumpstack_32.c
19485+++ b/arch/x86/kernel/dumpstack_32.c
19486@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19487 bp = stack_frame(task, regs);
19488
19489 for (;;) {
19490- struct thread_info *context;
19491+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
19492
19493- context = (struct thread_info *)
19494- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
19495- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
19496+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
19497
19498- stack = (unsigned long *)context->previous_esp;
19499- if (!stack)
19500+ if (stack_start == task_stack_page(task))
19501 break;
19502+ stack = *(unsigned long **)stack_start;
19503 if (ops->stack(data, "IRQ") < 0)
19504 break;
19505 touch_nmi_watchdog();
19506@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
19507 int i;
19508
19509 show_regs_print_info(KERN_EMERG);
19510- __show_regs(regs, !user_mode_vm(regs));
19511+ __show_regs(regs, !user_mode(regs));
19512
19513 /*
19514 * When in-kernel, we also print out the stack and code at the
19515 * time of the fault..
19516 */
19517- if (!user_mode_vm(regs)) {
19518+ if (!user_mode(regs)) {
19519 unsigned int code_prologue = code_bytes * 43 / 64;
19520 unsigned int code_len = code_bytes;
19521 unsigned char c;
19522 u8 *ip;
19523+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
19524
19525 pr_emerg("Stack:\n");
19526 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
19527
19528 pr_emerg("Code:");
19529
19530- ip = (u8 *)regs->ip - code_prologue;
19531+ ip = (u8 *)regs->ip - code_prologue + cs_base;
19532 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
19533 /* try starting at IP */
19534- ip = (u8 *)regs->ip;
19535+ ip = (u8 *)regs->ip + cs_base;
19536 code_len = code_len - code_prologue + 1;
19537 }
19538 for (i = 0; i < code_len; i++, ip++) {
19539@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
19540 pr_cont(" Bad EIP value.");
19541 break;
19542 }
19543- if (ip == (u8 *)regs->ip)
19544+ if (ip == (u8 *)regs->ip + cs_base)
19545 pr_cont(" <%02x>", c);
19546 else
19547 pr_cont(" %02x", c);
19548@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
19549 {
19550 unsigned short ud2;
19551
19552+ ip = ktla_ktva(ip);
19553 if (ip < PAGE_OFFSET)
19554 return 0;
19555 if (probe_kernel_address((unsigned short *)ip, ud2))
19556@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
19557
19558 return ud2 == 0x0b0f;
19559 }
19560+
19561+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19562+void pax_check_alloca(unsigned long size)
19563+{
19564+ unsigned long sp = (unsigned long)&sp, stack_left;
19565+
19566+ /* all kernel stacks are of the same size */
19567+ stack_left = sp & (THREAD_SIZE - 1);
19568+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19569+}
19570+EXPORT_SYMBOL(pax_check_alloca);
19571+#endif
19572diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
19573index addb207..99635fa 100644
19574--- a/arch/x86/kernel/dumpstack_64.c
19575+++ b/arch/x86/kernel/dumpstack_64.c
19576@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19577 unsigned long *irq_stack_end =
19578 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
19579 unsigned used = 0;
19580- struct thread_info *tinfo;
19581 int graph = 0;
19582 unsigned long dummy;
19583+ void *stack_start;
19584
19585 if (!task)
19586 task = current;
19587@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19588 * current stack address. If the stacks consist of nested
19589 * exceptions
19590 */
19591- tinfo = task_thread_info(task);
19592 for (;;) {
19593 char *id;
19594 unsigned long *estack_end;
19595+
19596 estack_end = in_exception_stack(cpu, (unsigned long)stack,
19597 &used, &id);
19598
19599@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19600 if (ops->stack(data, id) < 0)
19601 break;
19602
19603- bp = ops->walk_stack(tinfo, stack, bp, ops,
19604+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
19605 data, estack_end, &graph);
19606 ops->stack(data, "<EOE>");
19607 /*
19608@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19609 * second-to-last pointer (index -2 to end) in the
19610 * exception stack:
19611 */
19612+ if ((u16)estack_end[-1] != __KERNEL_DS)
19613+ goto out;
19614 stack = (unsigned long *) estack_end[-2];
19615 continue;
19616 }
19617@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19618 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
19619 if (ops->stack(data, "IRQ") < 0)
19620 break;
19621- bp = ops->walk_stack(tinfo, stack, bp,
19622+ bp = ops->walk_stack(task, irq_stack, stack, bp,
19623 ops, data, irq_stack_end, &graph);
19624 /*
19625 * We link to the next stack (which would be
19626@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19627 /*
19628 * This handles the process stack:
19629 */
19630- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
19631+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
19632+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
19633+out:
19634 put_cpu();
19635 }
19636 EXPORT_SYMBOL(dump_trace);
19637@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
19638
19639 return ud2 == 0x0b0f;
19640 }
19641+
19642+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19643+void pax_check_alloca(unsigned long size)
19644+{
19645+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
19646+ unsigned cpu, used;
19647+ char *id;
19648+
19649+ /* check the process stack first */
19650+ stack_start = (unsigned long)task_stack_page(current);
19651+ stack_end = stack_start + THREAD_SIZE;
19652+ if (likely(stack_start <= sp && sp < stack_end)) {
19653+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
19654+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19655+ return;
19656+ }
19657+
19658+ cpu = get_cpu();
19659+
19660+ /* check the irq stacks */
19661+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
19662+ stack_start = stack_end - IRQ_STACK_SIZE;
19663+ if (stack_start <= sp && sp < stack_end) {
19664+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
19665+ put_cpu();
19666+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19667+ return;
19668+ }
19669+
19670+ /* check the exception stacks */
19671+ used = 0;
19672+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
19673+ stack_start = stack_end - EXCEPTION_STKSZ;
19674+ if (stack_end && stack_start <= sp && sp < stack_end) {
19675+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
19676+ put_cpu();
19677+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19678+ return;
19679+ }
19680+
19681+ put_cpu();
19682+
19683+ /* unknown stack */
19684+ BUG();
19685+}
19686+EXPORT_SYMBOL(pax_check_alloca);
19687+#endif
19688diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
19689index d32abea..74daf4f 100644
19690--- a/arch/x86/kernel/e820.c
19691+++ b/arch/x86/kernel/e820.c
19692@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
19693
19694 static void early_panic(char *msg)
19695 {
19696- early_printk(msg);
19697- panic(msg);
19698+ early_printk("%s", msg);
19699+ panic("%s", msg);
19700 }
19701
19702 static int userdef __initdata;
19703diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
19704index d15f575..d692043 100644
19705--- a/arch/x86/kernel/early_printk.c
19706+++ b/arch/x86/kernel/early_printk.c
19707@@ -7,6 +7,7 @@
19708 #include <linux/pci_regs.h>
19709 #include <linux/pci_ids.h>
19710 #include <linux/errno.h>
19711+#include <linux/sched.h>
19712 #include <asm/io.h>
19713 #include <asm/processor.h>
19714 #include <asm/fcntl.h>
19715diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
19716index 8f3e2de..6b71e39 100644
19717--- a/arch/x86/kernel/entry_32.S
19718+++ b/arch/x86/kernel/entry_32.S
19719@@ -177,13 +177,153 @@
19720 /*CFI_REL_OFFSET gs, PT_GS*/
19721 .endm
19722 .macro SET_KERNEL_GS reg
19723+
19724+#ifdef CONFIG_CC_STACKPROTECTOR
19725 movl $(__KERNEL_STACK_CANARY), \reg
19726+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
19727+ movl $(__USER_DS), \reg
19728+#else
19729+ xorl \reg, \reg
19730+#endif
19731+
19732 movl \reg, %gs
19733 .endm
19734
19735 #endif /* CONFIG_X86_32_LAZY_GS */
19736
19737-.macro SAVE_ALL
19738+.macro pax_enter_kernel
19739+#ifdef CONFIG_PAX_KERNEXEC
19740+ call pax_enter_kernel
19741+#endif
19742+.endm
19743+
19744+.macro pax_exit_kernel
19745+#ifdef CONFIG_PAX_KERNEXEC
19746+ call pax_exit_kernel
19747+#endif
19748+.endm
19749+
19750+#ifdef CONFIG_PAX_KERNEXEC
19751+ENTRY(pax_enter_kernel)
19752+#ifdef CONFIG_PARAVIRT
19753+ pushl %eax
19754+ pushl %ecx
19755+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
19756+ mov %eax, %esi
19757+#else
19758+ mov %cr0, %esi
19759+#endif
19760+ bts $16, %esi
19761+ jnc 1f
19762+ mov %cs, %esi
19763+ cmp $__KERNEL_CS, %esi
19764+ jz 3f
19765+ ljmp $__KERNEL_CS, $3f
19766+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
19767+2:
19768+#ifdef CONFIG_PARAVIRT
19769+ mov %esi, %eax
19770+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
19771+#else
19772+ mov %esi, %cr0
19773+#endif
19774+3:
19775+#ifdef CONFIG_PARAVIRT
19776+ popl %ecx
19777+ popl %eax
19778+#endif
19779+ ret
19780+ENDPROC(pax_enter_kernel)
19781+
19782+ENTRY(pax_exit_kernel)
19783+#ifdef CONFIG_PARAVIRT
19784+ pushl %eax
19785+ pushl %ecx
19786+#endif
19787+ mov %cs, %esi
19788+ cmp $__KERNEXEC_KERNEL_CS, %esi
19789+ jnz 2f
19790+#ifdef CONFIG_PARAVIRT
19791+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
19792+ mov %eax, %esi
19793+#else
19794+ mov %cr0, %esi
19795+#endif
19796+ btr $16, %esi
19797+ ljmp $__KERNEL_CS, $1f
19798+1:
19799+#ifdef CONFIG_PARAVIRT
19800+ mov %esi, %eax
19801+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
19802+#else
19803+ mov %esi, %cr0
19804+#endif
19805+2:
19806+#ifdef CONFIG_PARAVIRT
19807+ popl %ecx
19808+ popl %eax
19809+#endif
19810+ ret
19811+ENDPROC(pax_exit_kernel)
19812+#endif
19813+
19814+ .macro pax_erase_kstack
19815+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19816+ call pax_erase_kstack
19817+#endif
19818+ .endm
19819+
19820+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19821+/*
19822+ * ebp: thread_info
19823+ */
19824+ENTRY(pax_erase_kstack)
19825+ pushl %edi
19826+ pushl %ecx
19827+ pushl %eax
19828+
19829+ mov TI_lowest_stack(%ebp), %edi
19830+ mov $-0xBEEF, %eax
19831+ std
19832+
19833+1: mov %edi, %ecx
19834+ and $THREAD_SIZE_asm - 1, %ecx
19835+ shr $2, %ecx
19836+ repne scasl
19837+ jecxz 2f
19838+
19839+ cmp $2*16, %ecx
19840+ jc 2f
19841+
19842+ mov $2*16, %ecx
19843+ repe scasl
19844+ jecxz 2f
19845+ jne 1b
19846+
19847+2: cld
19848+ mov %esp, %ecx
19849+ sub %edi, %ecx
19850+
19851+ cmp $THREAD_SIZE_asm, %ecx
19852+ jb 3f
19853+ ud2
19854+3:
19855+
19856+ shr $2, %ecx
19857+ rep stosl
19858+
19859+ mov TI_task_thread_sp0(%ebp), %edi
19860+ sub $128, %edi
19861+ mov %edi, TI_lowest_stack(%ebp)
19862+
19863+ popl %eax
19864+ popl %ecx
19865+ popl %edi
19866+ ret
19867+ENDPROC(pax_erase_kstack)
19868+#endif
19869+
19870+.macro __SAVE_ALL _DS
19871 cld
19872 PUSH_GS
19873 pushl_cfi %fs
19874@@ -206,7 +346,7 @@
19875 CFI_REL_OFFSET ecx, 0
19876 pushl_cfi %ebx
19877 CFI_REL_OFFSET ebx, 0
19878- movl $(__USER_DS), %edx
19879+ movl $\_DS, %edx
19880 movl %edx, %ds
19881 movl %edx, %es
19882 movl $(__KERNEL_PERCPU), %edx
19883@@ -214,6 +354,15 @@
19884 SET_KERNEL_GS %edx
19885 .endm
19886
19887+.macro SAVE_ALL
19888+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
19889+ __SAVE_ALL __KERNEL_DS
19890+ pax_enter_kernel
19891+#else
19892+ __SAVE_ALL __USER_DS
19893+#endif
19894+.endm
19895+
19896 .macro RESTORE_INT_REGS
19897 popl_cfi %ebx
19898 CFI_RESTORE ebx
19899@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
19900 popfl_cfi
19901 jmp syscall_exit
19902 CFI_ENDPROC
19903-END(ret_from_fork)
19904+ENDPROC(ret_from_fork)
19905
19906 ENTRY(ret_from_kernel_thread)
19907 CFI_STARTPROC
19908@@ -344,7 +493,15 @@ ret_from_intr:
19909 andl $SEGMENT_RPL_MASK, %eax
19910 #endif
19911 cmpl $USER_RPL, %eax
19912+
19913+#ifdef CONFIG_PAX_KERNEXEC
19914+ jae resume_userspace
19915+
19916+ pax_exit_kernel
19917+ jmp resume_kernel
19918+#else
19919 jb resume_kernel # not returning to v8086 or userspace
19920+#endif
19921
19922 ENTRY(resume_userspace)
19923 LOCKDEP_SYS_EXIT
19924@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
19925 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
19926 # int/exception return?
19927 jne work_pending
19928- jmp restore_all
19929-END(ret_from_exception)
19930+ jmp restore_all_pax
19931+ENDPROC(ret_from_exception)
19932
19933 #ifdef CONFIG_PREEMPT
19934 ENTRY(resume_kernel)
19935@@ -372,7 +529,7 @@ need_resched:
19936 jz restore_all
19937 call preempt_schedule_irq
19938 jmp need_resched
19939-END(resume_kernel)
19940+ENDPROC(resume_kernel)
19941 #endif
19942 CFI_ENDPROC
19943 /*
19944@@ -406,30 +563,45 @@ sysenter_past_esp:
19945 /*CFI_REL_OFFSET cs, 0*/
19946 /*
19947 * Push current_thread_info()->sysenter_return to the stack.
19948- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
19949- * pushed above; +8 corresponds to copy_thread's esp0 setting.
19950 */
19951- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
19952+ pushl_cfi $0
19953 CFI_REL_OFFSET eip, 0
19954
19955 pushl_cfi %eax
19956 SAVE_ALL
19957+ GET_THREAD_INFO(%ebp)
19958+ movl TI_sysenter_return(%ebp),%ebp
19959+ movl %ebp,PT_EIP(%esp)
19960 ENABLE_INTERRUPTS(CLBR_NONE)
19961
19962 /*
19963 * Load the potential sixth argument from user stack.
19964 * Careful about security.
19965 */
19966+ movl PT_OLDESP(%esp),%ebp
19967+
19968+#ifdef CONFIG_PAX_MEMORY_UDEREF
19969+ mov PT_OLDSS(%esp),%ds
19970+1: movl %ds:(%ebp),%ebp
19971+ push %ss
19972+ pop %ds
19973+#else
19974 cmpl $__PAGE_OFFSET-3,%ebp
19975 jae syscall_fault
19976 ASM_STAC
19977 1: movl (%ebp),%ebp
19978 ASM_CLAC
19979+#endif
19980+
19981 movl %ebp,PT_EBP(%esp)
19982 _ASM_EXTABLE(1b,syscall_fault)
19983
19984 GET_THREAD_INFO(%ebp)
19985
19986+#ifdef CONFIG_PAX_RANDKSTACK
19987+ pax_erase_kstack
19988+#endif
19989+
19990 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
19991 jnz sysenter_audit
19992 sysenter_do_call:
19993@@ -444,12 +616,24 @@ sysenter_do_call:
19994 testl $_TIF_ALLWORK_MASK, %ecx
19995 jne sysexit_audit
19996 sysenter_exit:
19997+
19998+#ifdef CONFIG_PAX_RANDKSTACK
19999+ pushl_cfi %eax
20000+ movl %esp, %eax
20001+ call pax_randomize_kstack
20002+ popl_cfi %eax
20003+#endif
20004+
20005+ pax_erase_kstack
20006+
20007 /* if something modifies registers it must also disable sysexit */
20008 movl PT_EIP(%esp), %edx
20009 movl PT_OLDESP(%esp), %ecx
20010 xorl %ebp,%ebp
20011 TRACE_IRQS_ON
20012 1: mov PT_FS(%esp), %fs
20013+2: mov PT_DS(%esp), %ds
20014+3: mov PT_ES(%esp), %es
20015 PTGS_TO_GS
20016 ENABLE_INTERRUPTS_SYSEXIT
20017
20018@@ -466,6 +650,9 @@ sysenter_audit:
20019 movl %eax,%edx /* 2nd arg: syscall number */
20020 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
20021 call __audit_syscall_entry
20022+
20023+ pax_erase_kstack
20024+
20025 pushl_cfi %ebx
20026 movl PT_EAX(%esp),%eax /* reload syscall number */
20027 jmp sysenter_do_call
20028@@ -491,10 +678,16 @@ sysexit_audit:
20029
20030 CFI_ENDPROC
20031 .pushsection .fixup,"ax"
20032-2: movl $0,PT_FS(%esp)
20033+4: movl $0,PT_FS(%esp)
20034+ jmp 1b
20035+5: movl $0,PT_DS(%esp)
20036+ jmp 1b
20037+6: movl $0,PT_ES(%esp)
20038 jmp 1b
20039 .popsection
20040- _ASM_EXTABLE(1b,2b)
20041+ _ASM_EXTABLE(1b,4b)
20042+ _ASM_EXTABLE(2b,5b)
20043+ _ASM_EXTABLE(3b,6b)
20044 PTGS_TO_GS_EX
20045 ENDPROC(ia32_sysenter_target)
20046
20047@@ -509,6 +702,11 @@ ENTRY(system_call)
20048 pushl_cfi %eax # save orig_eax
20049 SAVE_ALL
20050 GET_THREAD_INFO(%ebp)
20051+
20052+#ifdef CONFIG_PAX_RANDKSTACK
20053+ pax_erase_kstack
20054+#endif
20055+
20056 # system call tracing in operation / emulation
20057 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20058 jnz syscall_trace_entry
20059@@ -527,6 +725,15 @@ syscall_exit:
20060 testl $_TIF_ALLWORK_MASK, %ecx # current->work
20061 jne syscall_exit_work
20062
20063+restore_all_pax:
20064+
20065+#ifdef CONFIG_PAX_RANDKSTACK
20066+ movl %esp, %eax
20067+ call pax_randomize_kstack
20068+#endif
20069+
20070+ pax_erase_kstack
20071+
20072 restore_all:
20073 TRACE_IRQS_IRET
20074 restore_all_notrace:
20075@@ -583,14 +790,34 @@ ldt_ss:
20076 * compensating for the offset by changing to the ESPFIX segment with
20077 * a base address that matches for the difference.
20078 */
20079-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
20080+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
20081 mov %esp, %edx /* load kernel esp */
20082 mov PT_OLDESP(%esp), %eax /* load userspace esp */
20083 mov %dx, %ax /* eax: new kernel esp */
20084 sub %eax, %edx /* offset (low word is 0) */
20085+#ifdef CONFIG_SMP
20086+ movl PER_CPU_VAR(cpu_number), %ebx
20087+ shll $PAGE_SHIFT_asm, %ebx
20088+ addl $cpu_gdt_table, %ebx
20089+#else
20090+ movl $cpu_gdt_table, %ebx
20091+#endif
20092 shr $16, %edx
20093- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
20094- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
20095+
20096+#ifdef CONFIG_PAX_KERNEXEC
20097+ mov %cr0, %esi
20098+ btr $16, %esi
20099+ mov %esi, %cr0
20100+#endif
20101+
20102+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
20103+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
20104+
20105+#ifdef CONFIG_PAX_KERNEXEC
20106+ bts $16, %esi
20107+ mov %esi, %cr0
20108+#endif
20109+
20110 pushl_cfi $__ESPFIX_SS
20111 pushl_cfi %eax /* new kernel esp */
20112 /* Disable interrupts, but do not irqtrace this section: we
20113@@ -619,20 +846,18 @@ work_resched:
20114 movl TI_flags(%ebp), %ecx
20115 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
20116 # than syscall tracing?
20117- jz restore_all
20118+ jz restore_all_pax
20119 testb $_TIF_NEED_RESCHED, %cl
20120 jnz work_resched
20121
20122 work_notifysig: # deal with pending signals and
20123 # notify-resume requests
20124+ movl %esp, %eax
20125 #ifdef CONFIG_VM86
20126 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
20127- movl %esp, %eax
20128 jne work_notifysig_v86 # returning to kernel-space or
20129 # vm86-space
20130 1:
20131-#else
20132- movl %esp, %eax
20133 #endif
20134 TRACE_IRQS_ON
20135 ENABLE_INTERRUPTS(CLBR_NONE)
20136@@ -653,7 +878,7 @@ work_notifysig_v86:
20137 movl %eax, %esp
20138 jmp 1b
20139 #endif
20140-END(work_pending)
20141+ENDPROC(work_pending)
20142
20143 # perform syscall exit tracing
20144 ALIGN
20145@@ -661,11 +886,14 @@ syscall_trace_entry:
20146 movl $-ENOSYS,PT_EAX(%esp)
20147 movl %esp, %eax
20148 call syscall_trace_enter
20149+
20150+ pax_erase_kstack
20151+
20152 /* What it returned is what we'll actually use. */
20153 cmpl $(NR_syscalls), %eax
20154 jnae syscall_call
20155 jmp syscall_exit
20156-END(syscall_trace_entry)
20157+ENDPROC(syscall_trace_entry)
20158
20159 # perform syscall exit tracing
20160 ALIGN
20161@@ -678,21 +906,25 @@ syscall_exit_work:
20162 movl %esp, %eax
20163 call syscall_trace_leave
20164 jmp resume_userspace
20165-END(syscall_exit_work)
20166+ENDPROC(syscall_exit_work)
20167 CFI_ENDPROC
20168
20169 RING0_INT_FRAME # can't unwind into user space anyway
20170 syscall_fault:
20171+#ifdef CONFIG_PAX_MEMORY_UDEREF
20172+ push %ss
20173+ pop %ds
20174+#endif
20175 ASM_CLAC
20176 GET_THREAD_INFO(%ebp)
20177 movl $-EFAULT,PT_EAX(%esp)
20178 jmp resume_userspace
20179-END(syscall_fault)
20180+ENDPROC(syscall_fault)
20181
20182 syscall_badsys:
20183 movl $-ENOSYS,PT_EAX(%esp)
20184 jmp resume_userspace
20185-END(syscall_badsys)
20186+ENDPROC(syscall_badsys)
20187 CFI_ENDPROC
20188 /*
20189 * End of kprobes section
20190@@ -708,8 +940,15 @@ END(syscall_badsys)
20191 * normal stack and adjusts ESP with the matching offset.
20192 */
20193 /* fixup the stack */
20194- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
20195- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
20196+#ifdef CONFIG_SMP
20197+ movl PER_CPU_VAR(cpu_number), %ebx
20198+ shll $PAGE_SHIFT_asm, %ebx
20199+ addl $cpu_gdt_table, %ebx
20200+#else
20201+ movl $cpu_gdt_table, %ebx
20202+#endif
20203+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
20204+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
20205 shl $16, %eax
20206 addl %esp, %eax /* the adjusted stack pointer */
20207 pushl_cfi $__KERNEL_DS
20208@@ -762,7 +1001,7 @@ vector=vector+1
20209 .endr
20210 2: jmp common_interrupt
20211 .endr
20212-END(irq_entries_start)
20213+ENDPROC(irq_entries_start)
20214
20215 .previous
20216 END(interrupt)
20217@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
20218 pushl_cfi $do_coprocessor_error
20219 jmp error_code
20220 CFI_ENDPROC
20221-END(coprocessor_error)
20222+ENDPROC(coprocessor_error)
20223
20224 ENTRY(simd_coprocessor_error)
20225 RING0_INT_FRAME
20226@@ -826,7 +1065,7 @@ ENTRY(simd_coprocessor_error)
20227 .section .altinstructions,"a"
20228 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
20229 .previous
20230-.section .altinstr_replacement,"ax"
20231+.section .altinstr_replacement,"a"
20232 663: pushl $do_simd_coprocessor_error
20233 664:
20234 .previous
20235@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
20236 #endif
20237 jmp error_code
20238 CFI_ENDPROC
20239-END(simd_coprocessor_error)
20240+ENDPROC(simd_coprocessor_error)
20241
20242 ENTRY(device_not_available)
20243 RING0_INT_FRAME
20244@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
20245 pushl_cfi $do_device_not_available
20246 jmp error_code
20247 CFI_ENDPROC
20248-END(device_not_available)
20249+ENDPROC(device_not_available)
20250
20251 #ifdef CONFIG_PARAVIRT
20252 ENTRY(native_iret)
20253 iret
20254 _ASM_EXTABLE(native_iret, iret_exc)
20255-END(native_iret)
20256+ENDPROC(native_iret)
20257
20258 ENTRY(native_irq_enable_sysexit)
20259 sti
20260 sysexit
20261-END(native_irq_enable_sysexit)
20262+ENDPROC(native_irq_enable_sysexit)
20263 #endif
20264
20265 ENTRY(overflow)
20266@@ -865,7 +1104,7 @@ ENTRY(overflow)
20267 pushl_cfi $do_overflow
20268 jmp error_code
20269 CFI_ENDPROC
20270-END(overflow)
20271+ENDPROC(overflow)
20272
20273 ENTRY(bounds)
20274 RING0_INT_FRAME
20275@@ -874,7 +1113,7 @@ ENTRY(bounds)
20276 pushl_cfi $do_bounds
20277 jmp error_code
20278 CFI_ENDPROC
20279-END(bounds)
20280+ENDPROC(bounds)
20281
20282 ENTRY(invalid_op)
20283 RING0_INT_FRAME
20284@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
20285 pushl_cfi $do_invalid_op
20286 jmp error_code
20287 CFI_ENDPROC
20288-END(invalid_op)
20289+ENDPROC(invalid_op)
20290
20291 ENTRY(coprocessor_segment_overrun)
20292 RING0_INT_FRAME
20293@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
20294 pushl_cfi $do_coprocessor_segment_overrun
20295 jmp error_code
20296 CFI_ENDPROC
20297-END(coprocessor_segment_overrun)
20298+ENDPROC(coprocessor_segment_overrun)
20299
20300 ENTRY(invalid_TSS)
20301 RING0_EC_FRAME
20302@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
20303 pushl_cfi $do_invalid_TSS
20304 jmp error_code
20305 CFI_ENDPROC
20306-END(invalid_TSS)
20307+ENDPROC(invalid_TSS)
20308
20309 ENTRY(segment_not_present)
20310 RING0_EC_FRAME
20311@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
20312 pushl_cfi $do_segment_not_present
20313 jmp error_code
20314 CFI_ENDPROC
20315-END(segment_not_present)
20316+ENDPROC(segment_not_present)
20317
20318 ENTRY(stack_segment)
20319 RING0_EC_FRAME
20320@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
20321 pushl_cfi $do_stack_segment
20322 jmp error_code
20323 CFI_ENDPROC
20324-END(stack_segment)
20325+ENDPROC(stack_segment)
20326
20327 ENTRY(alignment_check)
20328 RING0_EC_FRAME
20329@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
20330 pushl_cfi $do_alignment_check
20331 jmp error_code
20332 CFI_ENDPROC
20333-END(alignment_check)
20334+ENDPROC(alignment_check)
20335
20336 ENTRY(divide_error)
20337 RING0_INT_FRAME
20338@@ -933,7 +1172,7 @@ ENTRY(divide_error)
20339 pushl_cfi $do_divide_error
20340 jmp error_code
20341 CFI_ENDPROC
20342-END(divide_error)
20343+ENDPROC(divide_error)
20344
20345 #ifdef CONFIG_X86_MCE
20346 ENTRY(machine_check)
20347@@ -943,7 +1182,7 @@ ENTRY(machine_check)
20348 pushl_cfi machine_check_vector
20349 jmp error_code
20350 CFI_ENDPROC
20351-END(machine_check)
20352+ENDPROC(machine_check)
20353 #endif
20354
20355 ENTRY(spurious_interrupt_bug)
20356@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
20357 pushl_cfi $do_spurious_interrupt_bug
20358 jmp error_code
20359 CFI_ENDPROC
20360-END(spurious_interrupt_bug)
20361+ENDPROC(spurious_interrupt_bug)
20362 /*
20363 * End of kprobes section
20364 */
20365@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
20366
20367 ENTRY(mcount)
20368 ret
20369-END(mcount)
20370+ENDPROC(mcount)
20371
20372 ENTRY(ftrace_caller)
20373 cmpl $0, function_trace_stop
20374@@ -1096,7 +1335,7 @@ ftrace_graph_call:
20375 .globl ftrace_stub
20376 ftrace_stub:
20377 ret
20378-END(ftrace_caller)
20379+ENDPROC(ftrace_caller)
20380
20381 ENTRY(ftrace_regs_caller)
20382 pushf /* push flags before compare (in cs location) */
20383@@ -1197,7 +1436,7 @@ trace:
20384 popl %ecx
20385 popl %eax
20386 jmp ftrace_stub
20387-END(mcount)
20388+ENDPROC(mcount)
20389 #endif /* CONFIG_DYNAMIC_FTRACE */
20390 #endif /* CONFIG_FUNCTION_TRACER */
20391
20392@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
20393 popl %ecx
20394 popl %eax
20395 ret
20396-END(ftrace_graph_caller)
20397+ENDPROC(ftrace_graph_caller)
20398
20399 .globl return_to_handler
20400 return_to_handler:
20401@@ -1271,15 +1510,18 @@ error_code:
20402 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
20403 REG_TO_PTGS %ecx
20404 SET_KERNEL_GS %ecx
20405- movl $(__USER_DS), %ecx
20406+ movl $(__KERNEL_DS), %ecx
20407 movl %ecx, %ds
20408 movl %ecx, %es
20409+
20410+ pax_enter_kernel
20411+
20412 TRACE_IRQS_OFF
20413 movl %esp,%eax # pt_regs pointer
20414 call *%edi
20415 jmp ret_from_exception
20416 CFI_ENDPROC
20417-END(page_fault)
20418+ENDPROC(page_fault)
20419
20420 /*
20421 * Debug traps and NMI can happen at the one SYSENTER instruction
20422@@ -1322,7 +1564,7 @@ debug_stack_correct:
20423 call do_debug
20424 jmp ret_from_exception
20425 CFI_ENDPROC
20426-END(debug)
20427+ENDPROC(debug)
20428
20429 /*
20430 * NMI is doubly nasty. It can happen _while_ we're handling
20431@@ -1360,6 +1602,9 @@ nmi_stack_correct:
20432 xorl %edx,%edx # zero error code
20433 movl %esp,%eax # pt_regs pointer
20434 call do_nmi
20435+
20436+ pax_exit_kernel
20437+
20438 jmp restore_all_notrace
20439 CFI_ENDPROC
20440
20441@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
20442 FIXUP_ESPFIX_STACK # %eax == %esp
20443 xorl %edx,%edx # zero error code
20444 call do_nmi
20445+
20446+ pax_exit_kernel
20447+
20448 RESTORE_REGS
20449 lss 12+4(%esp), %esp # back to espfix stack
20450 CFI_ADJUST_CFA_OFFSET -24
20451 jmp irq_return
20452 CFI_ENDPROC
20453-END(nmi)
20454+ENDPROC(nmi)
20455
20456 ENTRY(int3)
20457 RING0_INT_FRAME
20458@@ -1414,14 +1662,14 @@ ENTRY(int3)
20459 call do_int3
20460 jmp ret_from_exception
20461 CFI_ENDPROC
20462-END(int3)
20463+ENDPROC(int3)
20464
20465 ENTRY(general_protection)
20466 RING0_EC_FRAME
20467 pushl_cfi $do_general_protection
20468 jmp error_code
20469 CFI_ENDPROC
20470-END(general_protection)
20471+ENDPROC(general_protection)
20472
20473 #ifdef CONFIG_KVM_GUEST
20474 ENTRY(async_page_fault)
20475@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
20476 pushl_cfi $do_async_page_fault
20477 jmp error_code
20478 CFI_ENDPROC
20479-END(async_page_fault)
20480+ENDPROC(async_page_fault)
20481 #endif
20482
20483 /*
20484diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
20485index 7272089..ee191c7 100644
20486--- a/arch/x86/kernel/entry_64.S
20487+++ b/arch/x86/kernel/entry_64.S
20488@@ -59,6 +59,8 @@
20489 #include <asm/context_tracking.h>
20490 #include <asm/smap.h>
20491 #include <linux/err.h>
20492+#include <asm/pgtable.h>
20493+#include <asm/alternative-asm.h>
20494
20495 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
20496 #include <linux/elf-em.h>
20497@@ -80,8 +82,9 @@
20498 #ifdef CONFIG_DYNAMIC_FTRACE
20499
20500 ENTRY(function_hook)
20501+ pax_force_retaddr
20502 retq
20503-END(function_hook)
20504+ENDPROC(function_hook)
20505
20506 /* skip is set if stack has been adjusted */
20507 .macro ftrace_caller_setup skip=0
20508@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
20509 #endif
20510
20511 GLOBAL(ftrace_stub)
20512+ pax_force_retaddr
20513 retq
20514-END(ftrace_caller)
20515+ENDPROC(ftrace_caller)
20516
20517 ENTRY(ftrace_regs_caller)
20518 /* Save the current flags before compare (in SS location)*/
20519@@ -191,7 +195,7 @@ ftrace_restore_flags:
20520 popfq
20521 jmp ftrace_stub
20522
20523-END(ftrace_regs_caller)
20524+ENDPROC(ftrace_regs_caller)
20525
20526
20527 #else /* ! CONFIG_DYNAMIC_FTRACE */
20528@@ -212,6 +216,7 @@ ENTRY(function_hook)
20529 #endif
20530
20531 GLOBAL(ftrace_stub)
20532+ pax_force_retaddr
20533 retq
20534
20535 trace:
20536@@ -225,12 +230,13 @@ trace:
20537 #endif
20538 subq $MCOUNT_INSN_SIZE, %rdi
20539
20540+ pax_force_fptr ftrace_trace_function
20541 call *ftrace_trace_function
20542
20543 MCOUNT_RESTORE_FRAME
20544
20545 jmp ftrace_stub
20546-END(function_hook)
20547+ENDPROC(function_hook)
20548 #endif /* CONFIG_DYNAMIC_FTRACE */
20549 #endif /* CONFIG_FUNCTION_TRACER */
20550
20551@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
20552
20553 MCOUNT_RESTORE_FRAME
20554
20555+ pax_force_retaddr
20556 retq
20557-END(ftrace_graph_caller)
20558+ENDPROC(ftrace_graph_caller)
20559
20560 GLOBAL(return_to_handler)
20561 subq $24, %rsp
20562@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
20563 movq 8(%rsp), %rdx
20564 movq (%rsp), %rax
20565 addq $24, %rsp
20566+ pax_force_fptr %rdi
20567 jmp *%rdi
20568+ENDPROC(return_to_handler)
20569 #endif
20570
20571
20572@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
20573 ENDPROC(native_usergs_sysret64)
20574 #endif /* CONFIG_PARAVIRT */
20575
20576+ .macro ljmpq sel, off
20577+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
20578+ .byte 0x48; ljmp *1234f(%rip)
20579+ .pushsection .rodata
20580+ .align 16
20581+ 1234: .quad \off; .word \sel
20582+ .popsection
20583+#else
20584+ pushq $\sel
20585+ pushq $\off
20586+ lretq
20587+#endif
20588+ .endm
20589+
20590+ .macro pax_enter_kernel
20591+ pax_set_fptr_mask
20592+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20593+ call pax_enter_kernel
20594+#endif
20595+ .endm
20596+
20597+ .macro pax_exit_kernel
20598+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20599+ call pax_exit_kernel
20600+#endif
20601+
20602+ .endm
20603+
20604+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20605+ENTRY(pax_enter_kernel)
20606+ pushq %rdi
20607+
20608+#ifdef CONFIG_PARAVIRT
20609+ PV_SAVE_REGS(CLBR_RDI)
20610+#endif
20611+
20612+#ifdef CONFIG_PAX_KERNEXEC
20613+ GET_CR0_INTO_RDI
20614+ bts $16,%rdi
20615+ jnc 3f
20616+ mov %cs,%edi
20617+ cmp $__KERNEL_CS,%edi
20618+ jnz 2f
20619+1:
20620+#endif
20621+
20622+#ifdef CONFIG_PAX_MEMORY_UDEREF
20623+ 661: jmp 111f
20624+ .pushsection .altinstr_replacement, "a"
20625+ 662: ASM_NOP2
20626+ .popsection
20627+ .pushsection .altinstructions, "a"
20628+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20629+ .popsection
20630+ GET_CR3_INTO_RDI
20631+ cmp $0,%dil
20632+ jnz 112f
20633+ mov $__KERNEL_DS,%edi
20634+ mov %edi,%ss
20635+ jmp 111f
20636+112: cmp $1,%dil
20637+ jz 113f
20638+ ud2
20639+113: sub $4097,%rdi
20640+ bts $63,%rdi
20641+ SET_RDI_INTO_CR3
20642+ mov $__UDEREF_KERNEL_DS,%edi
20643+ mov %edi,%ss
20644+111:
20645+#endif
20646+
20647+#ifdef CONFIG_PARAVIRT
20648+ PV_RESTORE_REGS(CLBR_RDI)
20649+#endif
20650+
20651+ popq %rdi
20652+ pax_force_retaddr
20653+ retq
20654+
20655+#ifdef CONFIG_PAX_KERNEXEC
20656+2: ljmpq __KERNEL_CS,1b
20657+3: ljmpq __KERNEXEC_KERNEL_CS,4f
20658+4: SET_RDI_INTO_CR0
20659+ jmp 1b
20660+#endif
20661+ENDPROC(pax_enter_kernel)
20662+
20663+ENTRY(pax_exit_kernel)
20664+ pushq %rdi
20665+
20666+#ifdef CONFIG_PARAVIRT
20667+ PV_SAVE_REGS(CLBR_RDI)
20668+#endif
20669+
20670+#ifdef CONFIG_PAX_KERNEXEC
20671+ mov %cs,%rdi
20672+ cmp $__KERNEXEC_KERNEL_CS,%edi
20673+ jz 2f
20674+ GET_CR0_INTO_RDI
20675+ bts $16,%rdi
20676+ jnc 4f
20677+1:
20678+#endif
20679+
20680+#ifdef CONFIG_PAX_MEMORY_UDEREF
20681+ 661: jmp 111f
20682+ .pushsection .altinstr_replacement, "a"
20683+ 662: ASM_NOP2
20684+ .popsection
20685+ .pushsection .altinstructions, "a"
20686+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20687+ .popsection
20688+ mov %ss,%edi
20689+ cmp $__UDEREF_KERNEL_DS,%edi
20690+ jnz 111f
20691+ GET_CR3_INTO_RDI
20692+ cmp $0,%dil
20693+ jz 112f
20694+ ud2
20695+112: add $4097,%rdi
20696+ bts $63,%rdi
20697+ SET_RDI_INTO_CR3
20698+ mov $__KERNEL_DS,%edi
20699+ mov %edi,%ss
20700+111:
20701+#endif
20702+
20703+#ifdef CONFIG_PARAVIRT
20704+ PV_RESTORE_REGS(CLBR_RDI);
20705+#endif
20706+
20707+ popq %rdi
20708+ pax_force_retaddr
20709+ retq
20710+
20711+#ifdef CONFIG_PAX_KERNEXEC
20712+2: GET_CR0_INTO_RDI
20713+ btr $16,%rdi
20714+ jnc 4f
20715+ ljmpq __KERNEL_CS,3f
20716+3: SET_RDI_INTO_CR0
20717+ jmp 1b
20718+4: ud2
20719+ jmp 4b
20720+#endif
20721+ENDPROC(pax_exit_kernel)
20722+#endif
20723+
20724+ .macro pax_enter_kernel_user
20725+ pax_set_fptr_mask
20726+#ifdef CONFIG_PAX_MEMORY_UDEREF
20727+ call pax_enter_kernel_user
20728+#endif
20729+ .endm
20730+
20731+ .macro pax_exit_kernel_user
20732+#ifdef CONFIG_PAX_MEMORY_UDEREF
20733+ call pax_exit_kernel_user
20734+#endif
20735+#ifdef CONFIG_PAX_RANDKSTACK
20736+ pushq %rax
20737+ pushq %r11
20738+ call pax_randomize_kstack
20739+ popq %r11
20740+ popq %rax
20741+#endif
20742+ .endm
20743+
20744+#ifdef CONFIG_PAX_MEMORY_UDEREF
20745+ENTRY(pax_enter_kernel_user)
20746+ pushq %rdi
20747+ pushq %rbx
20748+
20749+#ifdef CONFIG_PARAVIRT
20750+ PV_SAVE_REGS(CLBR_RDI)
20751+#endif
20752+
20753+ 661: jmp 111f
20754+ .pushsection .altinstr_replacement, "a"
20755+ 662: ASM_NOP2
20756+ .popsection
20757+ .pushsection .altinstructions, "a"
20758+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20759+ .popsection
20760+ GET_CR3_INTO_RDI
20761+ cmp $1,%dil
20762+ jnz 4f
20763+ sub $4097,%rdi
20764+ bts $63,%rdi
20765+ SET_RDI_INTO_CR3
20766+ jmp 3f
20767+111:
20768+
20769+ GET_CR3_INTO_RDI
20770+ mov %rdi,%rbx
20771+ add $__START_KERNEL_map,%rbx
20772+ sub phys_base(%rip),%rbx
20773+
20774+#ifdef CONFIG_PARAVIRT
20775+ cmpl $0, pv_info+PARAVIRT_enabled
20776+ jz 1f
20777+ pushq %rdi
20778+ i = 0
20779+ .rept USER_PGD_PTRS
20780+ mov i*8(%rbx),%rsi
20781+ mov $0,%sil
20782+ lea i*8(%rbx),%rdi
20783+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
20784+ i = i + 1
20785+ .endr
20786+ popq %rdi
20787+ jmp 2f
20788+1:
20789+#endif
20790+
20791+ i = 0
20792+ .rept USER_PGD_PTRS
20793+ movb $0,i*8(%rbx)
20794+ i = i + 1
20795+ .endr
20796+
20797+2: SET_RDI_INTO_CR3
20798+
20799+#ifdef CONFIG_PAX_KERNEXEC
20800+ GET_CR0_INTO_RDI
20801+ bts $16,%rdi
20802+ SET_RDI_INTO_CR0
20803+#endif
20804+
20805+3:
20806+
20807+#ifdef CONFIG_PARAVIRT
20808+ PV_RESTORE_REGS(CLBR_RDI)
20809+#endif
20810+
20811+ popq %rbx
20812+ popq %rdi
20813+ pax_force_retaddr
20814+ retq
20815+4: ud2
20816+ENDPROC(pax_enter_kernel_user)
20817+
20818+ENTRY(pax_exit_kernel_user)
20819+ pushq %rdi
20820+ pushq %rbx
20821+
20822+#ifdef CONFIG_PARAVIRT
20823+ PV_SAVE_REGS(CLBR_RDI)
20824+#endif
20825+
20826+ GET_CR3_INTO_RDI
20827+ 661: jmp 1f
20828+ .pushsection .altinstr_replacement, "a"
20829+ 662: ASM_NOP2
20830+ .popsection
20831+ .pushsection .altinstructions, "a"
20832+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20833+ .popsection
20834+ cmp $0,%dil
20835+ jnz 3f
20836+ add $4097,%rdi
20837+ bts $63,%rdi
20838+ SET_RDI_INTO_CR3
20839+ jmp 2f
20840+1:
20841+ mov %rdi,%rbx
20842+ add $__START_KERNEL_map,%rbx
20843+ sub phys_base(%rip),%rbx
20844+
20845+#ifdef CONFIG_PARAVIRT
20846+ cmpl $0, pv_info+PARAVIRT_enabled
20847+ jz 1f
20848+ pushq %rdi
20849+ i = 0
20850+ .rept USER_PGD_PTRS
20851+ mov i*8(%rbx),%rsi
20852+ mov $0x67,%sil
20853+ lea i*8(%rbx),%rdi
20854+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
20855+ i = i + 1
20856+ .endr
20857+ popq %rdi
20858+ jmp 2f
20859+1:
20860+#endif
20861+
20862+#ifdef CONFIG_PAX_KERNEXEC
20863+ GET_CR0_INTO_RDI
20864+ btr $16,%rdi
20865+ jnc 3f
20866+ SET_RDI_INTO_CR0
20867+#endif
20868+
20869+ i = 0
20870+ .rept USER_PGD_PTRS
20871+ movb $0x67,i*8(%rbx)
20872+ i = i + 1
20873+ .endr
20874+2:
20875+
20876+#ifdef CONFIG_PARAVIRT
20877+ PV_RESTORE_REGS(CLBR_RDI)
20878+#endif
20879+
20880+ popq %rbx
20881+ popq %rdi
20882+ pax_force_retaddr
20883+ retq
20884+3: ud2
20885+ENDPROC(pax_exit_kernel_user)
20886+#endif
20887+
20888+ .macro pax_enter_kernel_nmi
20889+ pax_set_fptr_mask
20890+
20891+#ifdef CONFIG_PAX_KERNEXEC
20892+ GET_CR0_INTO_RDI
20893+ bts $16,%rdi
20894+ jc 110f
20895+ SET_RDI_INTO_CR0
20896+ or $2,%ebx
20897+110:
20898+#endif
20899+
20900+#ifdef CONFIG_PAX_MEMORY_UDEREF
20901+ 661: jmp 111f
20902+ .pushsection .altinstr_replacement, "a"
20903+ 662: ASM_NOP2
20904+ .popsection
20905+ .pushsection .altinstructions, "a"
20906+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20907+ .popsection
20908+ GET_CR3_INTO_RDI
20909+ cmp $0,%dil
20910+ jz 111f
20911+ sub $4097,%rdi
20912+ or $4,%ebx
20913+ bts $63,%rdi
20914+ SET_RDI_INTO_CR3
20915+ mov $__UDEREF_KERNEL_DS,%edi
20916+ mov %edi,%ss
20917+111:
20918+#endif
20919+ .endm
20920+
20921+ .macro pax_exit_kernel_nmi
20922+#ifdef CONFIG_PAX_KERNEXEC
20923+ btr $1,%ebx
20924+ jnc 110f
20925+ GET_CR0_INTO_RDI
20926+ btr $16,%rdi
20927+ SET_RDI_INTO_CR0
20928+110:
20929+#endif
20930+
20931+#ifdef CONFIG_PAX_MEMORY_UDEREF
20932+ btr $2,%ebx
20933+ jnc 111f
20934+ GET_CR3_INTO_RDI
20935+ add $4097,%rdi
20936+ bts $63,%rdi
20937+ SET_RDI_INTO_CR3
20938+ mov $__KERNEL_DS,%edi
20939+ mov %edi,%ss
20940+111:
20941+#endif
20942+ .endm
20943+
20944+ .macro pax_erase_kstack
20945+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20946+ call pax_erase_kstack
20947+#endif
20948+ .endm
20949+
20950+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20951+ENTRY(pax_erase_kstack)
20952+ pushq %rdi
20953+ pushq %rcx
20954+ pushq %rax
20955+ pushq %r11
20956+
20957+ GET_THREAD_INFO(%r11)
20958+ mov TI_lowest_stack(%r11), %rdi
20959+ mov $-0xBEEF, %rax
20960+ std
20961+
20962+1: mov %edi, %ecx
20963+ and $THREAD_SIZE_asm - 1, %ecx
20964+ shr $3, %ecx
20965+ repne scasq
20966+ jecxz 2f
20967+
20968+ cmp $2*8, %ecx
20969+ jc 2f
20970+
20971+ mov $2*8, %ecx
20972+ repe scasq
20973+ jecxz 2f
20974+ jne 1b
20975+
20976+2: cld
20977+ mov %esp, %ecx
20978+ sub %edi, %ecx
20979+
20980+ cmp $THREAD_SIZE_asm, %rcx
20981+ jb 3f
20982+ ud2
20983+3:
20984+
20985+ shr $3, %ecx
20986+ rep stosq
20987+
20988+ mov TI_task_thread_sp0(%r11), %rdi
20989+ sub $256, %rdi
20990+ mov %rdi, TI_lowest_stack(%r11)
20991+
20992+ popq %r11
20993+ popq %rax
20994+ popq %rcx
20995+ popq %rdi
20996+ pax_force_retaddr
20997+ ret
20998+ENDPROC(pax_erase_kstack)
20999+#endif
21000
21001 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
21002 #ifdef CONFIG_TRACE_IRQFLAGS
21003@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
21004 .endm
21005
21006 .macro UNFAKE_STACK_FRAME
21007- addq $8*6, %rsp
21008- CFI_ADJUST_CFA_OFFSET -(6*8)
21009+ addq $8*6 + ARG_SKIP, %rsp
21010+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
21011 .endm
21012
21013 /*
21014@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
21015 movq %rsp, %rsi
21016
21017 leaq -RBP(%rsp),%rdi /* arg1 for handler */
21018- testl $3, CS-RBP(%rsi)
21019+ testb $3, CS-RBP(%rsi)
21020 je 1f
21021 SWAPGS
21022 /*
21023@@ -498,9 +931,10 @@ ENTRY(save_rest)
21024 movq_cfi r15, R15+16
21025 movq %r11, 8(%rsp) /* return address */
21026 FIXUP_TOP_OF_STACK %r11, 16
21027+ pax_force_retaddr
21028 ret
21029 CFI_ENDPROC
21030-END(save_rest)
21031+ENDPROC(save_rest)
21032
21033 /* save complete stack frame */
21034 .pushsection .kprobes.text, "ax"
21035@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
21036 js 1f /* negative -> in kernel */
21037 SWAPGS
21038 xorl %ebx,%ebx
21039-1: ret
21040+1: pax_force_retaddr_bts
21041+ ret
21042 CFI_ENDPROC
21043-END(save_paranoid)
21044+ENDPROC(save_paranoid)
21045 .popsection
21046
21047 /*
21048@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
21049
21050 RESTORE_REST
21051
21052- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21053+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21054 jz 1f
21055
21056 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
21057@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
21058 RESTORE_REST
21059 jmp int_ret_from_sys_call
21060 CFI_ENDPROC
21061-END(ret_from_fork)
21062+ENDPROC(ret_from_fork)
21063
21064 /*
21065 * System call entry. Up to 6 arguments in registers are supported.
21066@@ -608,7 +1043,7 @@ END(ret_from_fork)
21067 ENTRY(system_call)
21068 CFI_STARTPROC simple
21069 CFI_SIGNAL_FRAME
21070- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
21071+ CFI_DEF_CFA rsp,0
21072 CFI_REGISTER rip,rcx
21073 /*CFI_REGISTER rflags,r11*/
21074 SWAPGS_UNSAFE_STACK
21075@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
21076
21077 movq %rsp,PER_CPU_VAR(old_rsp)
21078 movq PER_CPU_VAR(kernel_stack),%rsp
21079+ SAVE_ARGS 8*6,0
21080+ pax_enter_kernel_user
21081+
21082+#ifdef CONFIG_PAX_RANDKSTACK
21083+ pax_erase_kstack
21084+#endif
21085+
21086 /*
21087 * No need to follow this irqs off/on section - it's straight
21088 * and short:
21089 */
21090 ENABLE_INTERRUPTS(CLBR_NONE)
21091- SAVE_ARGS 8,0
21092 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
21093 movq %rcx,RIP-ARGOFFSET(%rsp)
21094 CFI_REL_OFFSET rip,RIP-ARGOFFSET
21095- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21096+ GET_THREAD_INFO(%rcx)
21097+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
21098 jnz tracesys
21099 system_call_fastpath:
21100 #if __SYSCALL_MASK == ~0
21101@@ -640,7 +1082,7 @@ system_call_fastpath:
21102 cmpl $__NR_syscall_max,%eax
21103 #endif
21104 ja badsys
21105- movq %r10,%rcx
21106+ movq R10-ARGOFFSET(%rsp),%rcx
21107 call *sys_call_table(,%rax,8) # XXX: rip relative
21108 movq %rax,RAX-ARGOFFSET(%rsp)
21109 /*
21110@@ -654,10 +1096,13 @@ sysret_check:
21111 LOCKDEP_SYS_EXIT
21112 DISABLE_INTERRUPTS(CLBR_NONE)
21113 TRACE_IRQS_OFF
21114- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
21115+ GET_THREAD_INFO(%rcx)
21116+ movl TI_flags(%rcx),%edx
21117 andl %edi,%edx
21118 jnz sysret_careful
21119 CFI_REMEMBER_STATE
21120+ pax_exit_kernel_user
21121+ pax_erase_kstack
21122 /*
21123 * sysretq will re-enable interrupts:
21124 */
21125@@ -709,14 +1154,18 @@ badsys:
21126 * jump back to the normal fast path.
21127 */
21128 auditsys:
21129- movq %r10,%r9 /* 6th arg: 4th syscall arg */
21130+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
21131 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
21132 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
21133 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
21134 movq %rax,%rsi /* 2nd arg: syscall number */
21135 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
21136 call __audit_syscall_entry
21137+
21138+ pax_erase_kstack
21139+
21140 LOAD_ARGS 0 /* reload call-clobbered registers */
21141+ pax_set_fptr_mask
21142 jmp system_call_fastpath
21143
21144 /*
21145@@ -737,7 +1186,7 @@ sysret_audit:
21146 /* Do syscall tracing */
21147 tracesys:
21148 #ifdef CONFIG_AUDITSYSCALL
21149- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21150+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
21151 jz auditsys
21152 #endif
21153 SAVE_REST
21154@@ -745,12 +1194,16 @@ tracesys:
21155 FIXUP_TOP_OF_STACK %rdi
21156 movq %rsp,%rdi
21157 call syscall_trace_enter
21158+
21159+ pax_erase_kstack
21160+
21161 /*
21162 * Reload arg registers from stack in case ptrace changed them.
21163 * We don't reload %rax because syscall_trace_enter() returned
21164 * the value it wants us to use in the table lookup.
21165 */
21166 LOAD_ARGS ARGOFFSET, 1
21167+ pax_set_fptr_mask
21168 RESTORE_REST
21169 #if __SYSCALL_MASK == ~0
21170 cmpq $__NR_syscall_max,%rax
21171@@ -759,7 +1212,7 @@ tracesys:
21172 cmpl $__NR_syscall_max,%eax
21173 #endif
21174 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
21175- movq %r10,%rcx /* fixup for C */
21176+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
21177 call *sys_call_table(,%rax,8)
21178 movq %rax,RAX-ARGOFFSET(%rsp)
21179 /* Use IRET because user could have changed frame */
21180@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
21181 andl %edi,%edx
21182 jnz int_careful
21183 andl $~TS_COMPAT,TI_status(%rcx)
21184- jmp retint_swapgs
21185+ pax_exit_kernel_user
21186+ pax_erase_kstack
21187+ jmp retint_swapgs_pax
21188
21189 /* Either reschedule or signal or syscall exit tracking needed. */
21190 /* First do a reschedule test. */
21191@@ -826,7 +1281,7 @@ int_restore_rest:
21192 TRACE_IRQS_OFF
21193 jmp int_with_check
21194 CFI_ENDPROC
21195-END(system_call)
21196+ENDPROC(system_call)
21197
21198 .macro FORK_LIKE func
21199 ENTRY(stub_\func)
21200@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
21201 DEFAULT_FRAME 0 8 /* offset 8: return address */
21202 call sys_\func
21203 RESTORE_TOP_OF_STACK %r11, 8
21204+ pax_force_retaddr
21205 ret $REST_SKIP /* pop extended registers */
21206 CFI_ENDPROC
21207-END(stub_\func)
21208+ENDPROC(stub_\func)
21209 .endm
21210
21211 .macro FIXED_FRAME label,func
21212@@ -851,9 +1307,10 @@ ENTRY(\label)
21213 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
21214 call \func
21215 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
21216+ pax_force_retaddr
21217 ret
21218 CFI_ENDPROC
21219-END(\label)
21220+ENDPROC(\label)
21221 .endm
21222
21223 FORK_LIKE clone
21224@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
21225 movq_cfi_restore R12+8, r12
21226 movq_cfi_restore RBP+8, rbp
21227 movq_cfi_restore RBX+8, rbx
21228+ pax_force_retaddr
21229 ret $REST_SKIP /* pop extended registers */
21230 CFI_ENDPROC
21231-END(ptregscall_common)
21232+ENDPROC(ptregscall_common)
21233
21234 ENTRY(stub_execve)
21235 CFI_STARTPROC
21236@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
21237 RESTORE_REST
21238 jmp int_ret_from_sys_call
21239 CFI_ENDPROC
21240-END(stub_execve)
21241+ENDPROC(stub_execve)
21242
21243 /*
21244 * sigreturn is special because it needs to restore all registers on return.
21245@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
21246 RESTORE_REST
21247 jmp int_ret_from_sys_call
21248 CFI_ENDPROC
21249-END(stub_rt_sigreturn)
21250+ENDPROC(stub_rt_sigreturn)
21251
21252 #ifdef CONFIG_X86_X32_ABI
21253 ENTRY(stub_x32_rt_sigreturn)
21254@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
21255 RESTORE_REST
21256 jmp int_ret_from_sys_call
21257 CFI_ENDPROC
21258-END(stub_x32_rt_sigreturn)
21259+ENDPROC(stub_x32_rt_sigreturn)
21260
21261 ENTRY(stub_x32_execve)
21262 CFI_STARTPROC
21263@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
21264 RESTORE_REST
21265 jmp int_ret_from_sys_call
21266 CFI_ENDPROC
21267-END(stub_x32_execve)
21268+ENDPROC(stub_x32_execve)
21269
21270 #endif
21271
21272@@ -967,7 +1425,7 @@ vector=vector+1
21273 2: jmp common_interrupt
21274 .endr
21275 CFI_ENDPROC
21276-END(irq_entries_start)
21277+ENDPROC(irq_entries_start)
21278
21279 .previous
21280 END(interrupt)
21281@@ -987,6 +1445,16 @@ END(interrupt)
21282 subq $ORIG_RAX-RBP, %rsp
21283 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
21284 SAVE_ARGS_IRQ
21285+#ifdef CONFIG_PAX_MEMORY_UDEREF
21286+ testb $3, CS(%rdi)
21287+ jnz 1f
21288+ pax_enter_kernel
21289+ jmp 2f
21290+1: pax_enter_kernel_user
21291+2:
21292+#else
21293+ pax_enter_kernel
21294+#endif
21295 call \func
21296 .endm
21297
21298@@ -1019,7 +1487,7 @@ ret_from_intr:
21299
21300 exit_intr:
21301 GET_THREAD_INFO(%rcx)
21302- testl $3,CS-ARGOFFSET(%rsp)
21303+ testb $3,CS-ARGOFFSET(%rsp)
21304 je retint_kernel
21305
21306 /* Interrupt came from user space */
21307@@ -1041,12 +1509,16 @@ retint_swapgs: /* return to user-space */
21308 * The iretq could re-enable interrupts:
21309 */
21310 DISABLE_INTERRUPTS(CLBR_ANY)
21311+ pax_exit_kernel_user
21312+retint_swapgs_pax:
21313 TRACE_IRQS_IRETQ
21314 SWAPGS
21315 jmp restore_args
21316
21317 retint_restore_args: /* return to kernel space */
21318 DISABLE_INTERRUPTS(CLBR_ANY)
21319+ pax_exit_kernel
21320+ pax_force_retaddr (RIP-ARGOFFSET)
21321 /*
21322 * The iretq could re-enable interrupts:
21323 */
21324@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
21325 #endif
21326
21327 CFI_ENDPROC
21328-END(common_interrupt)
21329+ENDPROC(common_interrupt)
21330 /*
21331 * End of kprobes section
21332 */
21333@@ -1147,7 +1619,7 @@ ENTRY(\sym)
21334 interrupt \do_sym
21335 jmp ret_from_intr
21336 CFI_ENDPROC
21337-END(\sym)
21338+ENDPROC(\sym)
21339 .endm
21340
21341 #ifdef CONFIG_SMP
21342@@ -1208,12 +1680,22 @@ ENTRY(\sym)
21343 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21344 call error_entry
21345 DEFAULT_FRAME 0
21346+#ifdef CONFIG_PAX_MEMORY_UDEREF
21347+ testb $3, CS(%rsp)
21348+ jnz 1f
21349+ pax_enter_kernel
21350+ jmp 2f
21351+1: pax_enter_kernel_user
21352+2:
21353+#else
21354+ pax_enter_kernel
21355+#endif
21356 movq %rsp,%rdi /* pt_regs pointer */
21357 xorl %esi,%esi /* no error code */
21358 call \do_sym
21359 jmp error_exit /* %ebx: no swapgs flag */
21360 CFI_ENDPROC
21361-END(\sym)
21362+ENDPROC(\sym)
21363 .endm
21364
21365 .macro paranoidzeroentry sym do_sym
21366@@ -1226,15 +1708,25 @@ ENTRY(\sym)
21367 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21368 call save_paranoid
21369 TRACE_IRQS_OFF
21370+#ifdef CONFIG_PAX_MEMORY_UDEREF
21371+ testb $3, CS(%rsp)
21372+ jnz 1f
21373+ pax_enter_kernel
21374+ jmp 2f
21375+1: pax_enter_kernel_user
21376+2:
21377+#else
21378+ pax_enter_kernel
21379+#endif
21380 movq %rsp,%rdi /* pt_regs pointer */
21381 xorl %esi,%esi /* no error code */
21382 call \do_sym
21383 jmp paranoid_exit /* %ebx: no swapgs flag */
21384 CFI_ENDPROC
21385-END(\sym)
21386+ENDPROC(\sym)
21387 .endm
21388
21389-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
21390+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
21391 .macro paranoidzeroentry_ist sym do_sym ist
21392 ENTRY(\sym)
21393 INTR_FRAME
21394@@ -1245,14 +1737,30 @@ ENTRY(\sym)
21395 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21396 call save_paranoid
21397 TRACE_IRQS_OFF_DEBUG
21398+#ifdef CONFIG_PAX_MEMORY_UDEREF
21399+ testb $3, CS(%rsp)
21400+ jnz 1f
21401+ pax_enter_kernel
21402+ jmp 2f
21403+1: pax_enter_kernel_user
21404+2:
21405+#else
21406+ pax_enter_kernel
21407+#endif
21408 movq %rsp,%rdi /* pt_regs pointer */
21409 xorl %esi,%esi /* no error code */
21410+#ifdef CONFIG_SMP
21411+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
21412+ lea init_tss(%r12), %r12
21413+#else
21414+ lea init_tss(%rip), %r12
21415+#endif
21416 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
21417 call \do_sym
21418 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
21419 jmp paranoid_exit /* %ebx: no swapgs flag */
21420 CFI_ENDPROC
21421-END(\sym)
21422+ENDPROC(\sym)
21423 .endm
21424
21425 .macro errorentry sym do_sym
21426@@ -1264,13 +1772,23 @@ ENTRY(\sym)
21427 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21428 call error_entry
21429 DEFAULT_FRAME 0
21430+#ifdef CONFIG_PAX_MEMORY_UDEREF
21431+ testb $3, CS(%rsp)
21432+ jnz 1f
21433+ pax_enter_kernel
21434+ jmp 2f
21435+1: pax_enter_kernel_user
21436+2:
21437+#else
21438+ pax_enter_kernel
21439+#endif
21440 movq %rsp,%rdi /* pt_regs pointer */
21441 movq ORIG_RAX(%rsp),%rsi /* get error code */
21442 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
21443 call \do_sym
21444 jmp error_exit /* %ebx: no swapgs flag */
21445 CFI_ENDPROC
21446-END(\sym)
21447+ENDPROC(\sym)
21448 .endm
21449
21450 /* error code is on the stack already */
21451@@ -1284,13 +1802,23 @@ ENTRY(\sym)
21452 call save_paranoid
21453 DEFAULT_FRAME 0
21454 TRACE_IRQS_OFF
21455+#ifdef CONFIG_PAX_MEMORY_UDEREF
21456+ testb $3, CS(%rsp)
21457+ jnz 1f
21458+ pax_enter_kernel
21459+ jmp 2f
21460+1: pax_enter_kernel_user
21461+2:
21462+#else
21463+ pax_enter_kernel
21464+#endif
21465 movq %rsp,%rdi /* pt_regs pointer */
21466 movq ORIG_RAX(%rsp),%rsi /* get error code */
21467 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
21468 call \do_sym
21469 jmp paranoid_exit /* %ebx: no swapgs flag */
21470 CFI_ENDPROC
21471-END(\sym)
21472+ENDPROC(\sym)
21473 .endm
21474
21475 zeroentry divide_error do_divide_error
21476@@ -1320,9 +1848,10 @@ gs_change:
21477 2: mfence /* workaround */
21478 SWAPGS
21479 popfq_cfi
21480+ pax_force_retaddr
21481 ret
21482 CFI_ENDPROC
21483-END(native_load_gs_index)
21484+ENDPROC(native_load_gs_index)
21485
21486 _ASM_EXTABLE(gs_change,bad_gs)
21487 .section .fixup,"ax"
21488@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
21489 CFI_DEF_CFA_REGISTER rsp
21490 CFI_ADJUST_CFA_OFFSET -8
21491 decl PER_CPU_VAR(irq_count)
21492+ pax_force_retaddr
21493 ret
21494 CFI_ENDPROC
21495-END(call_softirq)
21496+ENDPROC(call_softirq)
21497
21498 #ifdef CONFIG_XEN
21499 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
21500@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
21501 decl PER_CPU_VAR(irq_count)
21502 jmp error_exit
21503 CFI_ENDPROC
21504-END(xen_do_hypervisor_callback)
21505+ENDPROC(xen_do_hypervisor_callback)
21506
21507 /*
21508 * Hypervisor uses this for application faults while it executes.
21509@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
21510 SAVE_ALL
21511 jmp error_exit
21512 CFI_ENDPROC
21513-END(xen_failsafe_callback)
21514+ENDPROC(xen_failsafe_callback)
21515
21516 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
21517 xen_hvm_callback_vector xen_evtchn_do_upcall
21518@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
21519 DEFAULT_FRAME
21520 DISABLE_INTERRUPTS(CLBR_NONE)
21521 TRACE_IRQS_OFF_DEBUG
21522- testl %ebx,%ebx /* swapgs needed? */
21523+ testl $1,%ebx /* swapgs needed? */
21524 jnz paranoid_restore
21525- testl $3,CS(%rsp)
21526+ testb $3,CS(%rsp)
21527 jnz paranoid_userspace
21528+#ifdef CONFIG_PAX_MEMORY_UDEREF
21529+ pax_exit_kernel
21530+ TRACE_IRQS_IRETQ 0
21531+ SWAPGS_UNSAFE_STACK
21532+ RESTORE_ALL 8
21533+ pax_force_retaddr_bts
21534+ jmp irq_return
21535+#endif
21536 paranoid_swapgs:
21537+#ifdef CONFIG_PAX_MEMORY_UDEREF
21538+ pax_exit_kernel_user
21539+#else
21540+ pax_exit_kernel
21541+#endif
21542 TRACE_IRQS_IRETQ 0
21543 SWAPGS_UNSAFE_STACK
21544 RESTORE_ALL 8
21545 jmp irq_return
21546 paranoid_restore:
21547+ pax_exit_kernel
21548 TRACE_IRQS_IRETQ_DEBUG 0
21549 RESTORE_ALL 8
21550+ pax_force_retaddr_bts
21551 jmp irq_return
21552 paranoid_userspace:
21553 GET_THREAD_INFO(%rcx)
21554@@ -1541,7 +2086,7 @@ paranoid_schedule:
21555 TRACE_IRQS_OFF
21556 jmp paranoid_userspace
21557 CFI_ENDPROC
21558-END(paranoid_exit)
21559+ENDPROC(paranoid_exit)
21560
21561 /*
21562 * Exception entry point. This expects an error code/orig_rax on the stack.
21563@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
21564 movq_cfi r14, R14+8
21565 movq_cfi r15, R15+8
21566 xorl %ebx,%ebx
21567- testl $3,CS+8(%rsp)
21568+ testb $3,CS+8(%rsp)
21569 je error_kernelspace
21570 error_swapgs:
21571 SWAPGS
21572 error_sti:
21573 TRACE_IRQS_OFF
21574+ pax_force_retaddr_bts
21575 ret
21576
21577 /*
21578@@ -1600,7 +2146,7 @@ bstep_iret:
21579 movq %rcx,RIP+8(%rsp)
21580 jmp error_swapgs
21581 CFI_ENDPROC
21582-END(error_entry)
21583+ENDPROC(error_entry)
21584
21585
21586 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
21587@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
21588 DISABLE_INTERRUPTS(CLBR_NONE)
21589 TRACE_IRQS_OFF
21590 GET_THREAD_INFO(%rcx)
21591- testl %eax,%eax
21592+ testl $1,%eax
21593 jne retint_kernel
21594 LOCKDEP_SYS_EXIT_IRQ
21595 movl TI_flags(%rcx),%edx
21596@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
21597 jnz retint_careful
21598 jmp retint_swapgs
21599 CFI_ENDPROC
21600-END(error_exit)
21601+ENDPROC(error_exit)
21602
21603 /*
21604 * Test if a given stack is an NMI stack or not.
21605@@ -1678,9 +2224,11 @@ ENTRY(nmi)
21606 * If %cs was not the kernel segment, then the NMI triggered in user
21607 * space, which means it is definitely not nested.
21608 */
21609+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
21610+ je 1f
21611 cmpl $__KERNEL_CS, 16(%rsp)
21612 jne first_nmi
21613-
21614+1:
21615 /*
21616 * Check the special variable on the stack to see if NMIs are
21617 * executing.
21618@@ -1714,8 +2262,7 @@ nested_nmi:
21619
21620 1:
21621 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
21622- leaq -1*8(%rsp), %rdx
21623- movq %rdx, %rsp
21624+ subq $8, %rsp
21625 CFI_ADJUST_CFA_OFFSET 1*8
21626 leaq -10*8(%rsp), %rdx
21627 pushq_cfi $__KERNEL_DS
21628@@ -1733,6 +2280,7 @@ nested_nmi_out:
21629 CFI_RESTORE rdx
21630
21631 /* No need to check faults here */
21632+# pax_force_retaddr_bts
21633 INTERRUPT_RETURN
21634
21635 CFI_RESTORE_STATE
21636@@ -1849,6 +2397,8 @@ end_repeat_nmi:
21637 */
21638 movq %cr2, %r12
21639
21640+ pax_enter_kernel_nmi
21641+
21642 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
21643 movq %rsp,%rdi
21644 movq $-1,%rsi
21645@@ -1861,26 +2411,31 @@ end_repeat_nmi:
21646 movq %r12, %cr2
21647 1:
21648
21649- testl %ebx,%ebx /* swapgs needed? */
21650+ testl $1,%ebx /* swapgs needed? */
21651 jnz nmi_restore
21652 nmi_swapgs:
21653 SWAPGS_UNSAFE_STACK
21654 nmi_restore:
21655+ pax_exit_kernel_nmi
21656 /* Pop the extra iret frame at once */
21657 RESTORE_ALL 6*8
21658+ testb $3, 8(%rsp)
21659+ jnz 1f
21660+ pax_force_retaddr_bts
21661+1:
21662
21663 /* Clear the NMI executing stack variable */
21664 movq $0, 5*8(%rsp)
21665 jmp irq_return
21666 CFI_ENDPROC
21667-END(nmi)
21668+ENDPROC(nmi)
21669
21670 ENTRY(ignore_sysret)
21671 CFI_STARTPROC
21672 mov $-ENOSYS,%eax
21673 sysret
21674 CFI_ENDPROC
21675-END(ignore_sysret)
21676+ENDPROC(ignore_sysret)
21677
21678 /*
21679 * End of kprobes section
21680diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
21681index 42a392a..fbbd930 100644
21682--- a/arch/x86/kernel/ftrace.c
21683+++ b/arch/x86/kernel/ftrace.c
21684@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
21685 {
21686 unsigned char replaced[MCOUNT_INSN_SIZE];
21687
21688+ ip = ktla_ktva(ip);
21689+
21690 /*
21691 * Note: Due to modules and __init, code can
21692 * disappear and change, we need to protect against faulting
21693@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
21694 unsigned char old[MCOUNT_INSN_SIZE], *new;
21695 int ret;
21696
21697- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
21698+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
21699 new = ftrace_call_replace(ip, (unsigned long)func);
21700
21701 /* See comment above by declaration of modifying_ftrace_code */
21702@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
21703 /* Also update the regs callback function */
21704 if (!ret) {
21705 ip = (unsigned long)(&ftrace_regs_call);
21706- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
21707+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
21708 new = ftrace_call_replace(ip, (unsigned long)func);
21709 ret = ftrace_modify_code(ip, old, new);
21710 }
21711@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
21712 * kernel identity mapping to modify code.
21713 */
21714 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
21715- ip = (unsigned long)__va(__pa_symbol(ip));
21716+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
21717
21718 return probe_kernel_write((void *)ip, val, size);
21719 }
21720@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
21721 unsigned char replaced[MCOUNT_INSN_SIZE];
21722 unsigned char brk = BREAKPOINT_INSTRUCTION;
21723
21724- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
21725+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
21726 return -EFAULT;
21727
21728 /* Make sure it is what we expect it to be */
21729@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
21730 return ret;
21731
21732 fail_update:
21733- probe_kernel_write((void *)ip, &old_code[0], 1);
21734+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
21735 goto out;
21736 }
21737
21738@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
21739 {
21740 unsigned char code[MCOUNT_INSN_SIZE];
21741
21742+ ip = ktla_ktva(ip);
21743+
21744 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
21745 return -EFAULT;
21746
21747diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
21748index 55b6761..a6456fc 100644
21749--- a/arch/x86/kernel/head64.c
21750+++ b/arch/x86/kernel/head64.c
21751@@ -67,12 +67,12 @@ again:
21752 pgd = *pgd_p;
21753
21754 /*
21755- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
21756- * critical -- __PAGE_OFFSET would point us back into the dynamic
21757+ * The use of __early_va rather than __va here is critical:
21758+ * __va would point us back into the dynamic
21759 * range and we might end up looping forever...
21760 */
21761 if (pgd)
21762- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
21763+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
21764 else {
21765 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
21766 reset_early_page_tables();
21767@@ -82,13 +82,13 @@ again:
21768 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
21769 for (i = 0; i < PTRS_PER_PUD; i++)
21770 pud_p[i] = 0;
21771- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
21772+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
21773 }
21774 pud_p += pud_index(address);
21775 pud = *pud_p;
21776
21777 if (pud)
21778- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
21779+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
21780 else {
21781 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
21782 reset_early_page_tables();
21783@@ -98,7 +98,7 @@ again:
21784 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
21785 for (i = 0; i < PTRS_PER_PMD; i++)
21786 pmd_p[i] = 0;
21787- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
21788+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
21789 }
21790 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
21791 pmd_p[pmd_index(address)] = pmd;
21792@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
21793 if (console_loglevel == 10)
21794 early_printk("Kernel alive\n");
21795
21796- clear_page(init_level4_pgt);
21797 /* set init_level4_pgt kernel high mapping*/
21798 init_level4_pgt[511] = early_level4_pgt[511];
21799
21800diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
21801index 73afd11..0ef46f2 100644
21802--- a/arch/x86/kernel/head_32.S
21803+++ b/arch/x86/kernel/head_32.S
21804@@ -26,6 +26,12 @@
21805 /* Physical address */
21806 #define pa(X) ((X) - __PAGE_OFFSET)
21807
21808+#ifdef CONFIG_PAX_KERNEXEC
21809+#define ta(X) (X)
21810+#else
21811+#define ta(X) ((X) - __PAGE_OFFSET)
21812+#endif
21813+
21814 /*
21815 * References to members of the new_cpu_data structure.
21816 */
21817@@ -55,11 +61,7 @@
21818 * and small than max_low_pfn, otherwise will waste some page table entries
21819 */
21820
21821-#if PTRS_PER_PMD > 1
21822-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
21823-#else
21824-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
21825-#endif
21826+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
21827
21828 /* Number of possible pages in the lowmem region */
21829 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
21830@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
21831 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
21832
21833 /*
21834+ * Real beginning of normal "text" segment
21835+ */
21836+ENTRY(stext)
21837+ENTRY(_stext)
21838+
21839+/*
21840 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
21841 * %esi points to the real-mode code as a 32-bit pointer.
21842 * CS and DS must be 4 GB flat segments, but we don't depend on
21843@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
21844 * can.
21845 */
21846 __HEAD
21847+
21848+#ifdef CONFIG_PAX_KERNEXEC
21849+ jmp startup_32
21850+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
21851+.fill PAGE_SIZE-5,1,0xcc
21852+#endif
21853+
21854 ENTRY(startup_32)
21855 movl pa(stack_start),%ecx
21856
21857@@ -106,6 +121,59 @@ ENTRY(startup_32)
21858 2:
21859 leal -__PAGE_OFFSET(%ecx),%esp
21860
21861+#ifdef CONFIG_SMP
21862+ movl $pa(cpu_gdt_table),%edi
21863+ movl $__per_cpu_load,%eax
21864+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
21865+ rorl $16,%eax
21866+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
21867+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
21868+ movl $__per_cpu_end - 1,%eax
21869+ subl $__per_cpu_start,%eax
21870+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
21871+#endif
21872+
21873+#ifdef CONFIG_PAX_MEMORY_UDEREF
21874+ movl $NR_CPUS,%ecx
21875+ movl $pa(cpu_gdt_table),%edi
21876+1:
21877+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
21878+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
21879+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
21880+ addl $PAGE_SIZE_asm,%edi
21881+ loop 1b
21882+#endif
21883+
21884+#ifdef CONFIG_PAX_KERNEXEC
21885+ movl $pa(boot_gdt),%edi
21886+ movl $__LOAD_PHYSICAL_ADDR,%eax
21887+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
21888+ rorl $16,%eax
21889+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
21890+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
21891+ rorl $16,%eax
21892+
21893+ ljmp $(__BOOT_CS),$1f
21894+1:
21895+
21896+ movl $NR_CPUS,%ecx
21897+ movl $pa(cpu_gdt_table),%edi
21898+ addl $__PAGE_OFFSET,%eax
21899+1:
21900+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
21901+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
21902+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
21903+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
21904+ rorl $16,%eax
21905+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
21906+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
21907+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
21908+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
21909+ rorl $16,%eax
21910+ addl $PAGE_SIZE_asm,%edi
21911+ loop 1b
21912+#endif
21913+
21914 /*
21915 * Clear BSS first so that there are no surprises...
21916 */
21917@@ -201,8 +269,11 @@ ENTRY(startup_32)
21918 movl %eax, pa(max_pfn_mapped)
21919
21920 /* Do early initialization of the fixmap area */
21921- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
21922- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
21923+#ifdef CONFIG_COMPAT_VDSO
21924+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
21925+#else
21926+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
21927+#endif
21928 #else /* Not PAE */
21929
21930 page_pde_offset = (__PAGE_OFFSET >> 20);
21931@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
21932 movl %eax, pa(max_pfn_mapped)
21933
21934 /* Do early initialization of the fixmap area */
21935- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
21936- movl %eax,pa(initial_page_table+0xffc)
21937+#ifdef CONFIG_COMPAT_VDSO
21938+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
21939+#else
21940+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
21941+#endif
21942 #endif
21943
21944 #ifdef CONFIG_PARAVIRT
21945@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
21946 cmpl $num_subarch_entries, %eax
21947 jae bad_subarch
21948
21949- movl pa(subarch_entries)(,%eax,4), %eax
21950- subl $__PAGE_OFFSET, %eax
21951- jmp *%eax
21952+ jmp *pa(subarch_entries)(,%eax,4)
21953
21954 bad_subarch:
21955 WEAK(lguest_entry)
21956@@ -261,10 +333,10 @@ WEAK(xen_entry)
21957 __INITDATA
21958
21959 subarch_entries:
21960- .long default_entry /* normal x86/PC */
21961- .long lguest_entry /* lguest hypervisor */
21962- .long xen_entry /* Xen hypervisor */
21963- .long default_entry /* Moorestown MID */
21964+ .long ta(default_entry) /* normal x86/PC */
21965+ .long ta(lguest_entry) /* lguest hypervisor */
21966+ .long ta(xen_entry) /* Xen hypervisor */
21967+ .long ta(default_entry) /* Moorestown MID */
21968 num_subarch_entries = (. - subarch_entries) / 4
21969 .previous
21970 #else
21971@@ -355,6 +427,7 @@ default_entry:
21972 movl pa(mmu_cr4_features),%eax
21973 movl %eax,%cr4
21974
21975+#ifdef CONFIG_X86_PAE
21976 testb $X86_CR4_PAE, %al # check if PAE is enabled
21977 jz enable_paging
21978
21979@@ -383,6 +456,9 @@ default_entry:
21980 /* Make changes effective */
21981 wrmsr
21982
21983+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
21984+#endif
21985+
21986 enable_paging:
21987
21988 /*
21989@@ -451,14 +527,20 @@ is486:
21990 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
21991 movl %eax,%ss # after changing gdt.
21992
21993- movl $(__USER_DS),%eax # DS/ES contains default USER segment
21994+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
21995 movl %eax,%ds
21996 movl %eax,%es
21997
21998 movl $(__KERNEL_PERCPU), %eax
21999 movl %eax,%fs # set this cpu's percpu
22000
22001+#ifdef CONFIG_CC_STACKPROTECTOR
22002 movl $(__KERNEL_STACK_CANARY),%eax
22003+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22004+ movl $(__USER_DS),%eax
22005+#else
22006+ xorl %eax,%eax
22007+#endif
22008 movl %eax,%gs
22009
22010 xorl %eax,%eax # Clear LDT
22011@@ -534,8 +616,11 @@ setup_once:
22012 * relocation. Manually set base address in stack canary
22013 * segment descriptor.
22014 */
22015- movl $gdt_page,%eax
22016+ movl $cpu_gdt_table,%eax
22017 movl $stack_canary,%ecx
22018+#ifdef CONFIG_SMP
22019+ addl $__per_cpu_load,%ecx
22020+#endif
22021 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
22022 shrl $16, %ecx
22023 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
22024@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
22025 /* This is global to keep gas from relaxing the jumps */
22026 ENTRY(early_idt_handler)
22027 cld
22028- cmpl $2,%ss:early_recursion_flag
22029+ cmpl $1,%ss:early_recursion_flag
22030 je hlt_loop
22031 incl %ss:early_recursion_flag
22032
22033@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
22034 pushl (20+6*4)(%esp) /* trapno */
22035 pushl $fault_msg
22036 call printk
22037-#endif
22038 call dump_stack
22039+#endif
22040 hlt_loop:
22041 hlt
22042 jmp hlt_loop
22043@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
22044 /* This is the default interrupt "handler" :-) */
22045 ALIGN
22046 ignore_int:
22047- cld
22048 #ifdef CONFIG_PRINTK
22049+ cmpl $2,%ss:early_recursion_flag
22050+ je hlt_loop
22051+ incl %ss:early_recursion_flag
22052+ cld
22053 pushl %eax
22054 pushl %ecx
22055 pushl %edx
22056@@ -634,9 +722,6 @@ ignore_int:
22057 movl $(__KERNEL_DS),%eax
22058 movl %eax,%ds
22059 movl %eax,%es
22060- cmpl $2,early_recursion_flag
22061- je hlt_loop
22062- incl early_recursion_flag
22063 pushl 16(%esp)
22064 pushl 24(%esp)
22065 pushl 32(%esp)
22066@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
22067 /*
22068 * BSS section
22069 */
22070-__PAGE_ALIGNED_BSS
22071- .align PAGE_SIZE
22072 #ifdef CONFIG_X86_PAE
22073+.section .initial_pg_pmd,"a",@progbits
22074 initial_pg_pmd:
22075 .fill 1024*KPMDS,4,0
22076 #else
22077+.section .initial_page_table,"a",@progbits
22078 ENTRY(initial_page_table)
22079 .fill 1024,4,0
22080 #endif
22081+.section .initial_pg_fixmap,"a",@progbits
22082 initial_pg_fixmap:
22083 .fill 1024,4,0
22084+.section .empty_zero_page,"a",@progbits
22085 ENTRY(empty_zero_page)
22086 .fill 4096,1,0
22087+.section .swapper_pg_dir,"a",@progbits
22088 ENTRY(swapper_pg_dir)
22089+#ifdef CONFIG_X86_PAE
22090+ .fill 4,8,0
22091+#else
22092 .fill 1024,4,0
22093+#endif
22094+
22095+/*
22096+ * The IDT has to be page-aligned to simplify the Pentium
22097+ * F0 0F bug workaround.. We have a special link segment
22098+ * for this.
22099+ */
22100+.section .idt,"a",@progbits
22101+ENTRY(idt_table)
22102+ .fill 256,8,0
22103
22104 /*
22105 * This starts the data section.
22106 */
22107 #ifdef CONFIG_X86_PAE
22108-__PAGE_ALIGNED_DATA
22109- /* Page-aligned for the benefit of paravirt? */
22110- .align PAGE_SIZE
22111+.section .initial_page_table,"a",@progbits
22112 ENTRY(initial_page_table)
22113 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
22114 # if KPMDS == 3
22115@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
22116 # error "Kernel PMDs should be 1, 2 or 3"
22117 # endif
22118 .align PAGE_SIZE /* needs to be page-sized too */
22119+
22120+#ifdef CONFIG_PAX_PER_CPU_PGD
22121+ENTRY(cpu_pgd)
22122+ .rept 2*NR_CPUS
22123+ .fill 4,8,0
22124+ .endr
22125+#endif
22126+
22127 #endif
22128
22129 .data
22130 .balign 4
22131 ENTRY(stack_start)
22132- .long init_thread_union+THREAD_SIZE
22133+ .long init_thread_union+THREAD_SIZE-8
22134
22135 __INITRODATA
22136 int_msg:
22137@@ -744,7 +851,7 @@ fault_msg:
22138 * segment size, and 32-bit linear address value:
22139 */
22140
22141- .data
22142+.section .rodata,"a",@progbits
22143 .globl boot_gdt_descr
22144 .globl idt_descr
22145
22146@@ -753,7 +860,7 @@ fault_msg:
22147 .word 0 # 32 bit align gdt_desc.address
22148 boot_gdt_descr:
22149 .word __BOOT_DS+7
22150- .long boot_gdt - __PAGE_OFFSET
22151+ .long pa(boot_gdt)
22152
22153 .word 0 # 32-bit align idt_desc.address
22154 idt_descr:
22155@@ -764,7 +871,7 @@ idt_descr:
22156 .word 0 # 32 bit align gdt_desc.address
22157 ENTRY(early_gdt_descr)
22158 .word GDT_ENTRIES*8-1
22159- .long gdt_page /* Overwritten for secondary CPUs */
22160+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
22161
22162 /*
22163 * The boot_gdt must mirror the equivalent in setup.S and is
22164@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
22165 .align L1_CACHE_BYTES
22166 ENTRY(boot_gdt)
22167 .fill GDT_ENTRY_BOOT_CS,8,0
22168- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
22169- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
22170+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
22171+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
22172+
22173+ .align PAGE_SIZE_asm
22174+ENTRY(cpu_gdt_table)
22175+ .rept NR_CPUS
22176+ .quad 0x0000000000000000 /* NULL descriptor */
22177+ .quad 0x0000000000000000 /* 0x0b reserved */
22178+ .quad 0x0000000000000000 /* 0x13 reserved */
22179+ .quad 0x0000000000000000 /* 0x1b reserved */
22180+
22181+#ifdef CONFIG_PAX_KERNEXEC
22182+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
22183+#else
22184+ .quad 0x0000000000000000 /* 0x20 unused */
22185+#endif
22186+
22187+ .quad 0x0000000000000000 /* 0x28 unused */
22188+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
22189+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
22190+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
22191+ .quad 0x0000000000000000 /* 0x4b reserved */
22192+ .quad 0x0000000000000000 /* 0x53 reserved */
22193+ .quad 0x0000000000000000 /* 0x5b reserved */
22194+
22195+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
22196+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
22197+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
22198+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
22199+
22200+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
22201+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
22202+
22203+ /*
22204+ * Segments used for calling PnP BIOS have byte granularity.
22205+ * The code segments and data segments have fixed 64k limits,
22206+ * the transfer segment sizes are set at run time.
22207+ */
22208+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
22209+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
22210+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
22211+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
22212+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
22213+
22214+ /*
22215+ * The APM segments have byte granularity and their bases
22216+ * are set at run time. All have 64k limits.
22217+ */
22218+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
22219+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
22220+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
22221+
22222+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
22223+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
22224+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
22225+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
22226+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
22227+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
22228+
22229+ /* Be sure this is zeroed to avoid false validations in Xen */
22230+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
22231+ .endr
22232diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
22233index a836860..1b5c665 100644
22234--- a/arch/x86/kernel/head_64.S
22235+++ b/arch/x86/kernel/head_64.S
22236@@ -20,6 +20,8 @@
22237 #include <asm/processor-flags.h>
22238 #include <asm/percpu.h>
22239 #include <asm/nops.h>
22240+#include <asm/cpufeature.h>
22241+#include <asm/alternative-asm.h>
22242
22243 #ifdef CONFIG_PARAVIRT
22244 #include <asm/asm-offsets.h>
22245@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
22246 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
22247 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
22248 L3_START_KERNEL = pud_index(__START_KERNEL_map)
22249+L4_VMALLOC_START = pgd_index(VMALLOC_START)
22250+L3_VMALLOC_START = pud_index(VMALLOC_START)
22251+L4_VMALLOC_END = pgd_index(VMALLOC_END)
22252+L3_VMALLOC_END = pud_index(VMALLOC_END)
22253+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
22254+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
22255
22256 .text
22257 __HEAD
22258@@ -89,11 +97,23 @@ startup_64:
22259 * Fixup the physical addresses in the page table
22260 */
22261 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
22262+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
22263+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
22264+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
22265+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
22266+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
22267
22268- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
22269- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
22270+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
22271+#ifndef CONFIG_XEN
22272+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
22273+#endif
22274
22275- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
22276+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
22277+
22278+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
22279+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
22280+
22281+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
22282
22283 /*
22284 * Set up the identity mapping for the switchover. These
22285@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
22286 movq $(init_level4_pgt - __START_KERNEL_map), %rax
22287 1:
22288
22289- /* Enable PAE mode and PGE */
22290- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
22291+ /* Enable PAE mode and PSE/PGE */
22292+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
22293 movq %rcx, %cr4
22294
22295 /* Setup early boot stage 4 level pagetables. */
22296@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
22297 movl $MSR_EFER, %ecx
22298 rdmsr
22299 btsl $_EFER_SCE, %eax /* Enable System Call */
22300- btl $20,%edi /* No Execute supported? */
22301+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
22302 jnc 1f
22303 btsl $_EFER_NX, %eax
22304 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
22305+ leaq init_level4_pgt(%rip), %rdi
22306+#ifndef CONFIG_EFI
22307+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
22308+#endif
22309+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
22310+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
22311+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
22312+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
22313 1: wrmsr /* Make changes effective */
22314
22315 /* Setup cr0 */
22316@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
22317 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
22318 * address given in m16:64.
22319 */
22320+ pax_set_fptr_mask
22321 movq initial_code(%rip),%rax
22322 pushq $0 # fake return address to stop unwinder
22323 pushq $__KERNEL_CS # set correct cs
22324@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
22325 call dump_stack
22326 #ifdef CONFIG_KALLSYMS
22327 leaq early_idt_ripmsg(%rip),%rdi
22328- movq 40(%rsp),%rsi # %rip again
22329+ movq 88(%rsp),%rsi # %rip again
22330 call __print_symbol
22331 #endif
22332 #endif /* EARLY_PRINTK */
22333@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
22334 early_recursion_flag:
22335 .long 0
22336
22337+ .section .rodata,"a",@progbits
22338 #ifdef CONFIG_EARLY_PRINTK
22339 early_idt_msg:
22340 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
22341@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
22342 NEXT_PAGE(early_dynamic_pgts)
22343 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
22344
22345- .data
22346+ .section .rodata,"a",@progbits
22347
22348-#ifndef CONFIG_XEN
22349 NEXT_PAGE(init_level4_pgt)
22350- .fill 512,8,0
22351-#else
22352-NEXT_PAGE(init_level4_pgt)
22353- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
22354 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
22355 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
22356+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
22357+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
22358+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
22359+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
22360+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
22361+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
22362 .org init_level4_pgt + L4_START_KERNEL*8, 0
22363 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
22364 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
22365
22366+#ifdef CONFIG_PAX_PER_CPU_PGD
22367+NEXT_PAGE(cpu_pgd)
22368+ .rept 2*NR_CPUS
22369+ .fill 512,8,0
22370+ .endr
22371+#endif
22372+
22373 NEXT_PAGE(level3_ident_pgt)
22374 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
22375+#ifdef CONFIG_XEN
22376 .fill 511, 8, 0
22377+#else
22378+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
22379+ .fill 510,8,0
22380+#endif
22381+
22382+NEXT_PAGE(level3_vmalloc_start_pgt)
22383+ .fill 512,8,0
22384+
22385+NEXT_PAGE(level3_vmalloc_end_pgt)
22386+ .fill 512,8,0
22387+
22388+NEXT_PAGE(level3_vmemmap_pgt)
22389+ .fill L3_VMEMMAP_START,8,0
22390+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
22391+
22392 NEXT_PAGE(level2_ident_pgt)
22393- /* Since I easily can, map the first 1G.
22394+ /* Since I easily can, map the first 2G.
22395 * Don't set NX because code runs from these pages.
22396 */
22397- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
22398-#endif
22399+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
22400
22401 NEXT_PAGE(level3_kernel_pgt)
22402 .fill L3_START_KERNEL,8,0
22403@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
22404 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
22405 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
22406
22407+NEXT_PAGE(level2_vmemmap_pgt)
22408+ .fill 512,8,0
22409+
22410 NEXT_PAGE(level2_kernel_pgt)
22411 /*
22412 * 512 MB kernel mapping. We spend a full page on this pagetable
22413@@ -488,39 +544,70 @@ NEXT_PAGE(level2_kernel_pgt)
22414 KERNEL_IMAGE_SIZE/PMD_SIZE)
22415
22416 NEXT_PAGE(level2_fixmap_pgt)
22417- .fill 506,8,0
22418- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
22419- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
22420- .fill 5,8,0
22421+ .fill 507,8,0
22422+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
22423+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
22424+ .fill 4,8,0
22425
22426-NEXT_PAGE(level1_fixmap_pgt)
22427+NEXT_PAGE(level1_vsyscall_pgt)
22428 .fill 512,8,0
22429
22430 #undef PMDS
22431
22432- .data
22433+ .align PAGE_SIZE
22434+ENTRY(cpu_gdt_table)
22435+ .rept NR_CPUS
22436+ .quad 0x0000000000000000 /* NULL descriptor */
22437+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
22438+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
22439+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
22440+ .quad 0x00cffb000000ffff /* __USER32_CS */
22441+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
22442+ .quad 0x00affb000000ffff /* __USER_CS */
22443+
22444+#ifdef CONFIG_PAX_KERNEXEC
22445+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
22446+#else
22447+ .quad 0x0 /* unused */
22448+#endif
22449+
22450+ .quad 0,0 /* TSS */
22451+ .quad 0,0 /* LDT */
22452+ .quad 0,0,0 /* three TLS descriptors */
22453+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
22454+ /* asm/segment.h:GDT_ENTRIES must match this */
22455+
22456+#ifdef CONFIG_PAX_MEMORY_UDEREF
22457+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
22458+#else
22459+ .quad 0x0 /* unused */
22460+#endif
22461+
22462+ /* zero the remaining page */
22463+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
22464+ .endr
22465+
22466 .align 16
22467 .globl early_gdt_descr
22468 early_gdt_descr:
22469 .word GDT_ENTRIES*8-1
22470 early_gdt_descr_base:
22471- .quad INIT_PER_CPU_VAR(gdt_page)
22472+ .quad cpu_gdt_table
22473
22474 ENTRY(phys_base)
22475 /* This must match the first entry in level2_kernel_pgt */
22476 .quad 0x0000000000000000
22477
22478 #include "../../x86/xen/xen-head.S"
22479-
22480- .section .bss, "aw", @nobits
22481+
22482+ .section .rodata,"a",@progbits
22483+NEXT_PAGE(empty_zero_page)
22484+ .skip PAGE_SIZE
22485+
22486 .align PAGE_SIZE
22487 ENTRY(idt_table)
22488- .skip IDT_ENTRIES * 16
22489+ .fill 512,8,0
22490
22491 .align L1_CACHE_BYTES
22492 ENTRY(nmi_idt_table)
22493- .skip IDT_ENTRIES * 16
22494-
22495- __PAGE_ALIGNED_BSS
22496-NEXT_PAGE(empty_zero_page)
22497- .skip PAGE_SIZE
22498+ .fill 512,8,0
22499diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
22500index 0fa6912..37fce70 100644
22501--- a/arch/x86/kernel/i386_ksyms_32.c
22502+++ b/arch/x86/kernel/i386_ksyms_32.c
22503@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
22504 EXPORT_SYMBOL(cmpxchg8b_emu);
22505 #endif
22506
22507+EXPORT_SYMBOL_GPL(cpu_gdt_table);
22508+
22509 /* Networking helper routines. */
22510 EXPORT_SYMBOL(csum_partial_copy_generic);
22511+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
22512+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
22513
22514 EXPORT_SYMBOL(__get_user_1);
22515 EXPORT_SYMBOL(__get_user_2);
22516@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
22517
22518 EXPORT_SYMBOL(csum_partial);
22519 EXPORT_SYMBOL(empty_zero_page);
22520+
22521+#ifdef CONFIG_PAX_KERNEXEC
22522+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
22523+#endif
22524diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
22525index f7ea30d..6318acc 100644
22526--- a/arch/x86/kernel/i387.c
22527+++ b/arch/x86/kernel/i387.c
22528@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
22529 static inline bool interrupted_user_mode(void)
22530 {
22531 struct pt_regs *regs = get_irq_regs();
22532- return regs && user_mode_vm(regs);
22533+ return regs && user_mode(regs);
22534 }
22535
22536 /*
22537diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
22538index 9a5c460..84868423 100644
22539--- a/arch/x86/kernel/i8259.c
22540+++ b/arch/x86/kernel/i8259.c
22541@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
22542 static void make_8259A_irq(unsigned int irq)
22543 {
22544 disable_irq_nosync(irq);
22545- io_apic_irqs &= ~(1<<irq);
22546+ io_apic_irqs &= ~(1UL<<irq);
22547 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
22548 i8259A_chip.name);
22549 enable_irq(irq);
22550@@ -209,7 +209,7 @@ spurious_8259A_irq:
22551 "spurious 8259A interrupt: IRQ%d.\n", irq);
22552 spurious_irq_mask |= irqmask;
22553 }
22554- atomic_inc(&irq_err_count);
22555+ atomic_inc_unchecked(&irq_err_count);
22556 /*
22557 * Theoretically we do not have to handle this IRQ,
22558 * but in Linux this does not cause problems and is
22559@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
22560 /* (slave's support for AEOI in flat mode is to be investigated) */
22561 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
22562
22563+ pax_open_kernel();
22564 if (auto_eoi)
22565 /*
22566 * In AEOI mode we just have to mask the interrupt
22567 * when acking.
22568 */
22569- i8259A_chip.irq_mask_ack = disable_8259A_irq;
22570+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
22571 else
22572- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
22573+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
22574+ pax_close_kernel();
22575
22576 udelay(100); /* wait for 8259A to initialize */
22577
22578diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
22579index a979b5b..1d6db75 100644
22580--- a/arch/x86/kernel/io_delay.c
22581+++ b/arch/x86/kernel/io_delay.c
22582@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
22583 * Quirk table for systems that misbehave (lock up, etc.) if port
22584 * 0x80 is used:
22585 */
22586-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
22587+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
22588 {
22589 .callback = dmi_io_delay_0xed_port,
22590 .ident = "Compaq Presario V6000",
22591diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
22592index 4ddaf66..6292f4e 100644
22593--- a/arch/x86/kernel/ioport.c
22594+++ b/arch/x86/kernel/ioport.c
22595@@ -6,6 +6,7 @@
22596 #include <linux/sched.h>
22597 #include <linux/kernel.h>
22598 #include <linux/capability.h>
22599+#include <linux/security.h>
22600 #include <linux/errno.h>
22601 #include <linux/types.h>
22602 #include <linux/ioport.h>
22603@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
22604
22605 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
22606 return -EINVAL;
22607+#ifdef CONFIG_GRKERNSEC_IO
22608+ if (turn_on && grsec_disable_privio) {
22609+ gr_handle_ioperm();
22610+ return -EPERM;
22611+ }
22612+#endif
22613 if (turn_on && !capable(CAP_SYS_RAWIO))
22614 return -EPERM;
22615
22616@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
22617 * because the ->io_bitmap_max value must match the bitmap
22618 * contents:
22619 */
22620- tss = &per_cpu(init_tss, get_cpu());
22621+ tss = init_tss + get_cpu();
22622
22623 if (turn_on)
22624 bitmap_clear(t->io_bitmap_ptr, from, num);
22625@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
22626 return -EINVAL;
22627 /* Trying to gain more privileges? */
22628 if (level > old) {
22629+#ifdef CONFIG_GRKERNSEC_IO
22630+ if (grsec_disable_privio) {
22631+ gr_handle_iopl();
22632+ return -EPERM;
22633+ }
22634+#endif
22635 if (!capable(CAP_SYS_RAWIO))
22636 return -EPERM;
22637 }
22638diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
22639index ac0631d..ff7cb62 100644
22640--- a/arch/x86/kernel/irq.c
22641+++ b/arch/x86/kernel/irq.c
22642@@ -18,7 +18,7 @@
22643 #include <asm/mce.h>
22644 #include <asm/hw_irq.h>
22645
22646-atomic_t irq_err_count;
22647+atomic_unchecked_t irq_err_count;
22648
22649 /* Function pointer for generic interrupt vector handling */
22650 void (*x86_platform_ipi_callback)(void) = NULL;
22651@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
22652 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
22653 seq_printf(p, " Machine check polls\n");
22654 #endif
22655- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
22656+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
22657 #if defined(CONFIG_X86_IO_APIC)
22658- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
22659+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
22660 #endif
22661 return 0;
22662 }
22663@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
22664
22665 u64 arch_irq_stat(void)
22666 {
22667- u64 sum = atomic_read(&irq_err_count);
22668+ u64 sum = atomic_read_unchecked(&irq_err_count);
22669 return sum;
22670 }
22671
22672diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
22673index 344faf8..355f60d 100644
22674--- a/arch/x86/kernel/irq_32.c
22675+++ b/arch/x86/kernel/irq_32.c
22676@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
22677 __asm__ __volatile__("andl %%esp,%0" :
22678 "=r" (sp) : "0" (THREAD_SIZE - 1));
22679
22680- return sp < (sizeof(struct thread_info) + STACK_WARN);
22681+ return sp < STACK_WARN;
22682 }
22683
22684 static void print_stack_overflow(void)
22685@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
22686 * per-CPU IRQ handling contexts (thread information and stack)
22687 */
22688 union irq_ctx {
22689- struct thread_info tinfo;
22690- u32 stack[THREAD_SIZE/sizeof(u32)];
22691+ unsigned long previous_esp;
22692+ u32 stack[THREAD_SIZE/sizeof(u32)];
22693 } __attribute__((aligned(THREAD_SIZE)));
22694
22695 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
22696@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
22697 static inline int
22698 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22699 {
22700- union irq_ctx *curctx, *irqctx;
22701+ union irq_ctx *irqctx;
22702 u32 *isp, arg1, arg2;
22703
22704- curctx = (union irq_ctx *) current_thread_info();
22705 irqctx = __this_cpu_read(hardirq_ctx);
22706
22707 /*
22708@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22709 * handler) we can't do that and just have to keep using the
22710 * current stack (which is the irq stack already after all)
22711 */
22712- if (unlikely(curctx == irqctx))
22713+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
22714 return 0;
22715
22716 /* build the stack frame on the IRQ stack */
22717- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
22718- irqctx->tinfo.task = curctx->tinfo.task;
22719- irqctx->tinfo.previous_esp = current_stack_pointer;
22720+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
22721+ irqctx->previous_esp = current_stack_pointer;
22722
22723- /* Copy the preempt_count so that the [soft]irq checks work. */
22724- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
22725+#ifdef CONFIG_PAX_MEMORY_UDEREF
22726+ __set_fs(MAKE_MM_SEG(0));
22727+#endif
22728
22729 if (unlikely(overflow))
22730 call_on_stack(print_stack_overflow, isp);
22731@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22732 : "0" (irq), "1" (desc), "2" (isp),
22733 "D" (desc->handle_irq)
22734 : "memory", "cc", "ecx");
22735+
22736+#ifdef CONFIG_PAX_MEMORY_UDEREF
22737+ __set_fs(current_thread_info()->addr_limit);
22738+#endif
22739+
22740 return 1;
22741 }
22742
22743@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22744 */
22745 void __cpuinit irq_ctx_init(int cpu)
22746 {
22747- union irq_ctx *irqctx;
22748-
22749 if (per_cpu(hardirq_ctx, cpu))
22750 return;
22751
22752- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
22753- THREADINFO_GFP,
22754- THREAD_SIZE_ORDER));
22755- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
22756- irqctx->tinfo.cpu = cpu;
22757- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
22758- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
22759-
22760- per_cpu(hardirq_ctx, cpu) = irqctx;
22761-
22762- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
22763- THREADINFO_GFP,
22764- THREAD_SIZE_ORDER));
22765- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
22766- irqctx->tinfo.cpu = cpu;
22767- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
22768-
22769- per_cpu(softirq_ctx, cpu) = irqctx;
22770+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
22771+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
22772+
22773+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
22774+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
22775
22776 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
22777 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
22778@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
22779 asmlinkage void do_softirq(void)
22780 {
22781 unsigned long flags;
22782- struct thread_info *curctx;
22783 union irq_ctx *irqctx;
22784 u32 *isp;
22785
22786@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
22787 local_irq_save(flags);
22788
22789 if (local_softirq_pending()) {
22790- curctx = current_thread_info();
22791 irqctx = __this_cpu_read(softirq_ctx);
22792- irqctx->tinfo.task = curctx->task;
22793- irqctx->tinfo.previous_esp = current_stack_pointer;
22794+ irqctx->previous_esp = current_stack_pointer;
22795
22796 /* build the stack frame on the softirq stack */
22797- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
22798+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
22799+
22800+#ifdef CONFIG_PAX_MEMORY_UDEREF
22801+ __set_fs(MAKE_MM_SEG(0));
22802+#endif
22803
22804 call_on_stack(__do_softirq, isp);
22805+
22806+#ifdef CONFIG_PAX_MEMORY_UDEREF
22807+ __set_fs(current_thread_info()->addr_limit);
22808+#endif
22809+
22810 /*
22811 * Shouldn't happen, we returned above if in_interrupt():
22812 */
22813@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
22814 if (unlikely(!desc))
22815 return false;
22816
22817- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
22818+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
22819 if (unlikely(overflow))
22820 print_stack_overflow();
22821 desc->handle_irq(irq, desc);
22822diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
22823index d04d3ec..ea4b374 100644
22824--- a/arch/x86/kernel/irq_64.c
22825+++ b/arch/x86/kernel/irq_64.c
22826@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
22827 u64 estack_top, estack_bottom;
22828 u64 curbase = (u64)task_stack_page(current);
22829
22830- if (user_mode_vm(regs))
22831+ if (user_mode(regs))
22832 return;
22833
22834 if (regs->sp >= curbase + sizeof(struct thread_info) +
22835diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
22836index dc1404b..bbc43e7 100644
22837--- a/arch/x86/kernel/kdebugfs.c
22838+++ b/arch/x86/kernel/kdebugfs.c
22839@@ -27,7 +27,7 @@ struct setup_data_node {
22840 u32 len;
22841 };
22842
22843-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
22844+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
22845 size_t count, loff_t *ppos)
22846 {
22847 struct setup_data_node *node = file->private_data;
22848diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
22849index 836f832..a8bda67 100644
22850--- a/arch/x86/kernel/kgdb.c
22851+++ b/arch/x86/kernel/kgdb.c
22852@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
22853 #ifdef CONFIG_X86_32
22854 switch (regno) {
22855 case GDB_SS:
22856- if (!user_mode_vm(regs))
22857+ if (!user_mode(regs))
22858 *(unsigned long *)mem = __KERNEL_DS;
22859 break;
22860 case GDB_SP:
22861- if (!user_mode_vm(regs))
22862+ if (!user_mode(regs))
22863 *(unsigned long *)mem = kernel_stack_pointer(regs);
22864 break;
22865 case GDB_GS:
22866@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
22867 bp->attr.bp_addr = breakinfo[breakno].addr;
22868 bp->attr.bp_len = breakinfo[breakno].len;
22869 bp->attr.bp_type = breakinfo[breakno].type;
22870- info->address = breakinfo[breakno].addr;
22871+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
22872+ info->address = ktla_ktva(breakinfo[breakno].addr);
22873+ else
22874+ info->address = breakinfo[breakno].addr;
22875 info->len = breakinfo[breakno].len;
22876 info->type = breakinfo[breakno].type;
22877 val = arch_install_hw_breakpoint(bp);
22878@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
22879 case 'k':
22880 /* clear the trace bit */
22881 linux_regs->flags &= ~X86_EFLAGS_TF;
22882- atomic_set(&kgdb_cpu_doing_single_step, -1);
22883+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
22884
22885 /* set the trace bit if we're stepping */
22886 if (remcomInBuffer[0] == 's') {
22887 linux_regs->flags |= X86_EFLAGS_TF;
22888- atomic_set(&kgdb_cpu_doing_single_step,
22889+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
22890 raw_smp_processor_id());
22891 }
22892
22893@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
22894
22895 switch (cmd) {
22896 case DIE_DEBUG:
22897- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
22898+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
22899 if (user_mode(regs))
22900 return single_step_cont(regs, args);
22901 break;
22902@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
22903 #endif /* CONFIG_DEBUG_RODATA */
22904
22905 bpt->type = BP_BREAKPOINT;
22906- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
22907+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
22908 BREAK_INSTR_SIZE);
22909 if (err)
22910 return err;
22911- err = probe_kernel_write((char *)bpt->bpt_addr,
22912+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
22913 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
22914 #ifdef CONFIG_DEBUG_RODATA
22915 if (!err)
22916@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
22917 return -EBUSY;
22918 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
22919 BREAK_INSTR_SIZE);
22920- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
22921+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
22922 if (err)
22923 return err;
22924 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
22925@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
22926 if (mutex_is_locked(&text_mutex))
22927 goto knl_write;
22928 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
22929- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
22930+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
22931 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
22932 goto knl_write;
22933 return err;
22934 knl_write:
22935 #endif /* CONFIG_DEBUG_RODATA */
22936- return probe_kernel_write((char *)bpt->bpt_addr,
22937+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
22938 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
22939 }
22940
22941diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
22942index 211bce4..6e2580a 100644
22943--- a/arch/x86/kernel/kprobes/core.c
22944+++ b/arch/x86/kernel/kprobes/core.c
22945@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
22946 s32 raddr;
22947 } __packed *insn;
22948
22949- insn = (struct __arch_relative_insn *)from;
22950+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
22951+
22952+ pax_open_kernel();
22953 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
22954 insn->op = op;
22955+ pax_close_kernel();
22956 }
22957
22958 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
22959@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
22960 kprobe_opcode_t opcode;
22961 kprobe_opcode_t *orig_opcodes = opcodes;
22962
22963- if (search_exception_tables((unsigned long)opcodes))
22964+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
22965 return 0; /* Page fault may occur on this address. */
22966
22967 retry:
22968@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
22969 * for the first byte, we can recover the original instruction
22970 * from it and kp->opcode.
22971 */
22972- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
22973+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
22974 buf[0] = kp->opcode;
22975- return (unsigned long)buf;
22976+ return ktva_ktla((unsigned long)buf);
22977 }
22978
22979 /*
22980@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
22981 /* Another subsystem puts a breakpoint, failed to recover */
22982 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
22983 return 0;
22984+ pax_open_kernel();
22985 memcpy(dest, insn.kaddr, insn.length);
22986+ pax_close_kernel();
22987
22988 #ifdef CONFIG_X86_64
22989 if (insn_rip_relative(&insn)) {
22990@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
22991 return 0;
22992 }
22993 disp = (u8 *) dest + insn_offset_displacement(&insn);
22994+ pax_open_kernel();
22995 *(s32 *) disp = (s32) newdisp;
22996+ pax_close_kernel();
22997 }
22998 #endif
22999 return insn.length;
23000@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23001 * nor set current_kprobe, because it doesn't use single
23002 * stepping.
23003 */
23004- regs->ip = (unsigned long)p->ainsn.insn;
23005+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23006 preempt_enable_no_resched();
23007 return;
23008 }
23009@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23010 regs->flags &= ~X86_EFLAGS_IF;
23011 /* single step inline if the instruction is an int3 */
23012 if (p->opcode == BREAKPOINT_INSTRUCTION)
23013- regs->ip = (unsigned long)p->addr;
23014+ regs->ip = ktla_ktva((unsigned long)p->addr);
23015 else
23016- regs->ip = (unsigned long)p->ainsn.insn;
23017+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23018 }
23019
23020 /*
23021@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
23022 setup_singlestep(p, regs, kcb, 0);
23023 return 1;
23024 }
23025- } else if (*addr != BREAKPOINT_INSTRUCTION) {
23026+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
23027 /*
23028 * The breakpoint instruction was removed right
23029 * after we hit it. Another cpu has removed
23030@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
23031 " movq %rax, 152(%rsp)\n"
23032 RESTORE_REGS_STRING
23033 " popfq\n"
23034+#ifdef KERNEXEC_PLUGIN
23035+ " btsq $63,(%rsp)\n"
23036+#endif
23037 #else
23038 " pushf\n"
23039 SAVE_REGS_STRING
23040@@ -779,7 +789,7 @@ static void __kprobes
23041 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
23042 {
23043 unsigned long *tos = stack_addr(regs);
23044- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
23045+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
23046 unsigned long orig_ip = (unsigned long)p->addr;
23047 kprobe_opcode_t *insn = p->ainsn.insn;
23048
23049@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
23050 struct die_args *args = data;
23051 int ret = NOTIFY_DONE;
23052
23053- if (args->regs && user_mode_vm(args->regs))
23054+ if (args->regs && user_mode(args->regs))
23055 return ret;
23056
23057 switch (val) {
23058diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
23059index 76dc6f0..66bdfc3 100644
23060--- a/arch/x86/kernel/kprobes/opt.c
23061+++ b/arch/x86/kernel/kprobes/opt.c
23062@@ -79,6 +79,7 @@ found:
23063 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
23064 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
23065 {
23066+ pax_open_kernel();
23067 #ifdef CONFIG_X86_64
23068 *addr++ = 0x48;
23069 *addr++ = 0xbf;
23070@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
23071 *addr++ = 0xb8;
23072 #endif
23073 *(unsigned long *)addr = val;
23074+ pax_close_kernel();
23075 }
23076
23077 static void __used __kprobes kprobes_optinsn_template_holder(void)
23078@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23079 * Verify if the address gap is in 2GB range, because this uses
23080 * a relative jump.
23081 */
23082- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
23083+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
23084 if (abs(rel) > 0x7fffffff)
23085 return -ERANGE;
23086
23087@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23088 op->optinsn.size = ret;
23089
23090 /* Copy arch-dep-instance from template */
23091- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
23092+ pax_open_kernel();
23093+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
23094+ pax_close_kernel();
23095
23096 /* Set probe information */
23097 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
23098
23099 /* Set probe function call */
23100- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
23101+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
23102
23103 /* Set returning jmp instruction at the tail of out-of-line buffer */
23104- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
23105+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
23106 (u8 *)op->kp.addr + op->optinsn.size);
23107
23108 flush_icache_range((unsigned long) buf,
23109@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
23110 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
23111
23112 /* Backup instructions which will be replaced by jump address */
23113- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
23114+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
23115 RELATIVE_ADDR_SIZE);
23116
23117 insn_buf[0] = RELATIVEJUMP_OPCODE;
23118@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
23119 /* This kprobe is really able to run optimized path. */
23120 op = container_of(p, struct optimized_kprobe, kp);
23121 /* Detour through copied instructions */
23122- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
23123+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
23124 if (!reenter)
23125 reset_current_kprobe();
23126 preempt_enable_no_resched();
23127diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
23128index cd6d9a5..16245a4 100644
23129--- a/arch/x86/kernel/kvm.c
23130+++ b/arch/x86/kernel/kvm.c
23131@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
23132 return NOTIFY_OK;
23133 }
23134
23135-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
23136+static struct notifier_block kvm_cpu_notifier = {
23137 .notifier_call = kvm_cpu_notify,
23138 };
23139 #endif
23140diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
23141index ebc9873..1b9724b 100644
23142--- a/arch/x86/kernel/ldt.c
23143+++ b/arch/x86/kernel/ldt.c
23144@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
23145 if (reload) {
23146 #ifdef CONFIG_SMP
23147 preempt_disable();
23148- load_LDT(pc);
23149+ load_LDT_nolock(pc);
23150 if (!cpumask_equal(mm_cpumask(current->mm),
23151 cpumask_of(smp_processor_id())))
23152 smp_call_function(flush_ldt, current->mm, 1);
23153 preempt_enable();
23154 #else
23155- load_LDT(pc);
23156+ load_LDT_nolock(pc);
23157 #endif
23158 }
23159 if (oldsize) {
23160@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
23161 return err;
23162
23163 for (i = 0; i < old->size; i++)
23164- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
23165+ write_ldt_entry(new->ldt, i, old->ldt + i);
23166 return 0;
23167 }
23168
23169@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23170 retval = copy_ldt(&mm->context, &old_mm->context);
23171 mutex_unlock(&old_mm->context.lock);
23172 }
23173+
23174+ if (tsk == current) {
23175+ mm->context.vdso = 0;
23176+
23177+#ifdef CONFIG_X86_32
23178+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23179+ mm->context.user_cs_base = 0UL;
23180+ mm->context.user_cs_limit = ~0UL;
23181+
23182+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
23183+ cpus_clear(mm->context.cpu_user_cs_mask);
23184+#endif
23185+
23186+#endif
23187+#endif
23188+
23189+ }
23190+
23191 return retval;
23192 }
23193
23194@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
23195 }
23196 }
23197
23198+#ifdef CONFIG_PAX_SEGMEXEC
23199+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
23200+ error = -EINVAL;
23201+ goto out_unlock;
23202+ }
23203+#endif
23204+
23205 fill_ldt(&ldt, &ldt_info);
23206 if (oldmode)
23207 ldt.avl = 0;
23208diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
23209index 5b19e4d..6476a76 100644
23210--- a/arch/x86/kernel/machine_kexec_32.c
23211+++ b/arch/x86/kernel/machine_kexec_32.c
23212@@ -26,7 +26,7 @@
23213 #include <asm/cacheflush.h>
23214 #include <asm/debugreg.h>
23215
23216-static void set_idt(void *newidt, __u16 limit)
23217+static void set_idt(struct desc_struct *newidt, __u16 limit)
23218 {
23219 struct desc_ptr curidt;
23220
23221@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
23222 }
23223
23224
23225-static void set_gdt(void *newgdt, __u16 limit)
23226+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
23227 {
23228 struct desc_ptr curgdt;
23229
23230@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
23231 }
23232
23233 control_page = page_address(image->control_code_page);
23234- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
23235+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
23236
23237 relocate_kernel_ptr = control_page;
23238 page_list[PA_CONTROL_PAGE] = __pa(control_page);
23239diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
23240index 22db92b..d546bec 100644
23241--- a/arch/x86/kernel/microcode_core.c
23242+++ b/arch/x86/kernel/microcode_core.c
23243@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
23244 return NOTIFY_OK;
23245 }
23246
23247-static struct notifier_block __refdata mc_cpu_notifier = {
23248+static struct notifier_block mc_cpu_notifier = {
23249 .notifier_call = mc_cpu_callback,
23250 };
23251
23252diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
23253index 5fb2ceb..3ae90bb 100644
23254--- a/arch/x86/kernel/microcode_intel.c
23255+++ b/arch/x86/kernel/microcode_intel.c
23256@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
23257
23258 static int get_ucode_user(void *to, const void *from, size_t n)
23259 {
23260- return copy_from_user(to, from, n);
23261+ return copy_from_user(to, (const void __force_user *)from, n);
23262 }
23263
23264 static enum ucode_state
23265 request_microcode_user(int cpu, const void __user *buf, size_t size)
23266 {
23267- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
23268+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
23269 }
23270
23271 static void microcode_fini_cpu(int cpu)
23272diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
23273index 216a4d7..228255a 100644
23274--- a/arch/x86/kernel/module.c
23275+++ b/arch/x86/kernel/module.c
23276@@ -43,15 +43,60 @@ do { \
23277 } while (0)
23278 #endif
23279
23280-void *module_alloc(unsigned long size)
23281+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
23282 {
23283- if (PAGE_ALIGN(size) > MODULES_LEN)
23284+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
23285 return NULL;
23286 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
23287- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
23288+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
23289 -1, __builtin_return_address(0));
23290 }
23291
23292+void *module_alloc(unsigned long size)
23293+{
23294+
23295+#ifdef CONFIG_PAX_KERNEXEC
23296+ return __module_alloc(size, PAGE_KERNEL);
23297+#else
23298+ return __module_alloc(size, PAGE_KERNEL_EXEC);
23299+#endif
23300+
23301+}
23302+
23303+#ifdef CONFIG_PAX_KERNEXEC
23304+#ifdef CONFIG_X86_32
23305+void *module_alloc_exec(unsigned long size)
23306+{
23307+ struct vm_struct *area;
23308+
23309+ if (size == 0)
23310+ return NULL;
23311+
23312+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
23313+ return area ? area->addr : NULL;
23314+}
23315+EXPORT_SYMBOL(module_alloc_exec);
23316+
23317+void module_free_exec(struct module *mod, void *module_region)
23318+{
23319+ vunmap(module_region);
23320+}
23321+EXPORT_SYMBOL(module_free_exec);
23322+#else
23323+void module_free_exec(struct module *mod, void *module_region)
23324+{
23325+ module_free(mod, module_region);
23326+}
23327+EXPORT_SYMBOL(module_free_exec);
23328+
23329+void *module_alloc_exec(unsigned long size)
23330+{
23331+ return __module_alloc(size, PAGE_KERNEL_RX);
23332+}
23333+EXPORT_SYMBOL(module_alloc_exec);
23334+#endif
23335+#endif
23336+
23337 #ifdef CONFIG_X86_32
23338 int apply_relocate(Elf32_Shdr *sechdrs,
23339 const char *strtab,
23340@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
23341 unsigned int i;
23342 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
23343 Elf32_Sym *sym;
23344- uint32_t *location;
23345+ uint32_t *plocation, location;
23346
23347 DEBUGP("Applying relocate section %u to %u\n",
23348 relsec, sechdrs[relsec].sh_info);
23349 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
23350 /* This is where to make the change */
23351- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
23352- + rel[i].r_offset;
23353+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
23354+ location = (uint32_t)plocation;
23355+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
23356+ plocation = ktla_ktva((void *)plocation);
23357 /* This is the symbol it is referring to. Note that all
23358 undefined symbols have been resolved. */
23359 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
23360@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
23361 switch (ELF32_R_TYPE(rel[i].r_info)) {
23362 case R_386_32:
23363 /* We add the value into the location given */
23364- *location += sym->st_value;
23365+ pax_open_kernel();
23366+ *plocation += sym->st_value;
23367+ pax_close_kernel();
23368 break;
23369 case R_386_PC32:
23370 /* Add the value, subtract its position */
23371- *location += sym->st_value - (uint32_t)location;
23372+ pax_open_kernel();
23373+ *plocation += sym->st_value - location;
23374+ pax_close_kernel();
23375 break;
23376 default:
23377 pr_err("%s: Unknown relocation: %u\n",
23378@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
23379 case R_X86_64_NONE:
23380 break;
23381 case R_X86_64_64:
23382+ pax_open_kernel();
23383 *(u64 *)loc = val;
23384+ pax_close_kernel();
23385 break;
23386 case R_X86_64_32:
23387+ pax_open_kernel();
23388 *(u32 *)loc = val;
23389+ pax_close_kernel();
23390 if (val != *(u32 *)loc)
23391 goto overflow;
23392 break;
23393 case R_X86_64_32S:
23394+ pax_open_kernel();
23395 *(s32 *)loc = val;
23396+ pax_close_kernel();
23397 if ((s64)val != *(s32 *)loc)
23398 goto overflow;
23399 break;
23400 case R_X86_64_PC32:
23401 val -= (u64)loc;
23402+ pax_open_kernel();
23403 *(u32 *)loc = val;
23404+ pax_close_kernel();
23405+
23406 #if 0
23407 if ((s64)val != *(s32 *)loc)
23408 goto overflow;
23409diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
23410index ce13049..e2e9c3c 100644
23411--- a/arch/x86/kernel/msr.c
23412+++ b/arch/x86/kernel/msr.c
23413@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
23414 return notifier_from_errno(err);
23415 }
23416
23417-static struct notifier_block __refdata msr_class_cpu_notifier = {
23418+static struct notifier_block msr_class_cpu_notifier = {
23419 .notifier_call = msr_class_cpu_callback,
23420 };
23421
23422diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
23423index 6030805..2d33f21 100644
23424--- a/arch/x86/kernel/nmi.c
23425+++ b/arch/x86/kernel/nmi.c
23426@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
23427 return handled;
23428 }
23429
23430-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
23431+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
23432 {
23433 struct nmi_desc *desc = nmi_to_desc(type);
23434 unsigned long flags;
23435@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
23436 * event confuses some handlers (kdump uses this flag)
23437 */
23438 if (action->flags & NMI_FLAG_FIRST)
23439- list_add_rcu(&action->list, &desc->head);
23440+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
23441 else
23442- list_add_tail_rcu(&action->list, &desc->head);
23443+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
23444
23445 spin_unlock_irqrestore(&desc->lock, flags);
23446 return 0;
23447@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
23448 if (!strcmp(n->name, name)) {
23449 WARN(in_nmi(),
23450 "Trying to free NMI (%s) from NMI context!\n", n->name);
23451- list_del_rcu(&n->list);
23452+ pax_list_del_rcu((struct list_head *)&n->list);
23453 break;
23454 }
23455 }
23456@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
23457 dotraplinkage notrace __kprobes void
23458 do_nmi(struct pt_regs *regs, long error_code)
23459 {
23460+
23461+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23462+ if (!user_mode(regs)) {
23463+ unsigned long cs = regs->cs & 0xFFFF;
23464+ unsigned long ip = ktva_ktla(regs->ip);
23465+
23466+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
23467+ regs->ip = ip;
23468+ }
23469+#endif
23470+
23471 nmi_nesting_preprocess(regs);
23472
23473 nmi_enter();
23474diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
23475index 6d9582e..f746287 100644
23476--- a/arch/x86/kernel/nmi_selftest.c
23477+++ b/arch/x86/kernel/nmi_selftest.c
23478@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
23479 {
23480 /* trap all the unknown NMIs we may generate */
23481 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
23482- __initdata);
23483+ __initconst);
23484 }
23485
23486 static void __init cleanup_nmi_testsuite(void)
23487@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
23488 unsigned long timeout;
23489
23490 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
23491- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
23492+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
23493 nmi_fail = FAILURE;
23494 return;
23495 }
23496diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
23497index 676b8c7..870ba04 100644
23498--- a/arch/x86/kernel/paravirt-spinlocks.c
23499+++ b/arch/x86/kernel/paravirt-spinlocks.c
23500@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
23501 arch_spin_lock(lock);
23502 }
23503
23504-struct pv_lock_ops pv_lock_ops = {
23505+struct pv_lock_ops pv_lock_ops __read_only = {
23506 #ifdef CONFIG_SMP
23507 .spin_is_locked = __ticket_spin_is_locked,
23508 .spin_is_contended = __ticket_spin_is_contended,
23509diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
23510index cd6de64..27c6af0 100644
23511--- a/arch/x86/kernel/paravirt.c
23512+++ b/arch/x86/kernel/paravirt.c
23513@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
23514 {
23515 return x;
23516 }
23517+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23518+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
23519+#endif
23520
23521 void __init default_banner(void)
23522 {
23523@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
23524 if (opfunc == NULL)
23525 /* If there's no function, patch it with a ud2a (BUG) */
23526 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
23527- else if (opfunc == _paravirt_nop)
23528+ else if (opfunc == (void *)_paravirt_nop)
23529 /* If the operation is a nop, then nop the callsite */
23530 ret = paravirt_patch_nop();
23531
23532 /* identity functions just return their single argument */
23533- else if (opfunc == _paravirt_ident_32)
23534+ else if (opfunc == (void *)_paravirt_ident_32)
23535 ret = paravirt_patch_ident_32(insnbuf, len);
23536- else if (opfunc == _paravirt_ident_64)
23537+ else if (opfunc == (void *)_paravirt_ident_64)
23538 ret = paravirt_patch_ident_64(insnbuf, len);
23539+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23540+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
23541+ ret = paravirt_patch_ident_64(insnbuf, len);
23542+#endif
23543
23544 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
23545 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
23546@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
23547 if (insn_len > len || start == NULL)
23548 insn_len = len;
23549 else
23550- memcpy(insnbuf, start, insn_len);
23551+ memcpy(insnbuf, ktla_ktva(start), insn_len);
23552
23553 return insn_len;
23554 }
23555@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
23556 return this_cpu_read(paravirt_lazy_mode);
23557 }
23558
23559-struct pv_info pv_info = {
23560+struct pv_info pv_info __read_only = {
23561 .name = "bare hardware",
23562 .paravirt_enabled = 0,
23563 .kernel_rpl = 0,
23564@@ -315,16 +322,16 @@ struct pv_info pv_info = {
23565 #endif
23566 };
23567
23568-struct pv_init_ops pv_init_ops = {
23569+struct pv_init_ops pv_init_ops __read_only = {
23570 .patch = native_patch,
23571 };
23572
23573-struct pv_time_ops pv_time_ops = {
23574+struct pv_time_ops pv_time_ops __read_only = {
23575 .sched_clock = native_sched_clock,
23576 .steal_clock = native_steal_clock,
23577 };
23578
23579-struct pv_irq_ops pv_irq_ops = {
23580+struct pv_irq_ops pv_irq_ops __read_only = {
23581 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
23582 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
23583 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
23584@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
23585 #endif
23586 };
23587
23588-struct pv_cpu_ops pv_cpu_ops = {
23589+struct pv_cpu_ops pv_cpu_ops __read_only = {
23590 .cpuid = native_cpuid,
23591 .get_debugreg = native_get_debugreg,
23592 .set_debugreg = native_set_debugreg,
23593@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
23594 .end_context_switch = paravirt_nop,
23595 };
23596
23597-struct pv_apic_ops pv_apic_ops = {
23598+struct pv_apic_ops pv_apic_ops __read_only= {
23599 #ifdef CONFIG_X86_LOCAL_APIC
23600 .startup_ipi_hook = paravirt_nop,
23601 #endif
23602 };
23603
23604-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
23605+#ifdef CONFIG_X86_32
23606+#ifdef CONFIG_X86_PAE
23607+/* 64-bit pagetable entries */
23608+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
23609+#else
23610 /* 32-bit pagetable entries */
23611 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
23612+#endif
23613 #else
23614 /* 64-bit pagetable entries */
23615 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
23616 #endif
23617
23618-struct pv_mmu_ops pv_mmu_ops = {
23619+struct pv_mmu_ops pv_mmu_ops __read_only = {
23620
23621 .read_cr2 = native_read_cr2,
23622 .write_cr2 = native_write_cr2,
23623@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
23624 .make_pud = PTE_IDENT,
23625
23626 .set_pgd = native_set_pgd,
23627+ .set_pgd_batched = native_set_pgd_batched,
23628 #endif
23629 #endif /* PAGETABLE_LEVELS >= 3 */
23630
23631@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
23632 },
23633
23634 .set_fixmap = native_set_fixmap,
23635+
23636+#ifdef CONFIG_PAX_KERNEXEC
23637+ .pax_open_kernel = native_pax_open_kernel,
23638+ .pax_close_kernel = native_pax_close_kernel,
23639+#endif
23640+
23641 };
23642
23643 EXPORT_SYMBOL_GPL(pv_time_ops);
23644diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
23645index 299d493..2ccb0ee 100644
23646--- a/arch/x86/kernel/pci-calgary_64.c
23647+++ b/arch/x86/kernel/pci-calgary_64.c
23648@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
23649 tce_space = be64_to_cpu(readq(target));
23650 tce_space = tce_space & TAR_SW_BITS;
23651
23652- tce_space = tce_space & (~specified_table_size);
23653+ tce_space = tce_space & (~(unsigned long)specified_table_size);
23654 info->tce_space = (u64 *)__va(tce_space);
23655 }
23656 }
23657diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
23658index 35ccf75..7a15747 100644
23659--- a/arch/x86/kernel/pci-iommu_table.c
23660+++ b/arch/x86/kernel/pci-iommu_table.c
23661@@ -2,7 +2,7 @@
23662 #include <asm/iommu_table.h>
23663 #include <linux/string.h>
23664 #include <linux/kallsyms.h>
23665-
23666+#include <linux/sched.h>
23667
23668 #define DEBUG 1
23669
23670diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
23671index 6c483ba..d10ce2f 100644
23672--- a/arch/x86/kernel/pci-swiotlb.c
23673+++ b/arch/x86/kernel/pci-swiotlb.c
23674@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
23675 void *vaddr, dma_addr_t dma_addr,
23676 struct dma_attrs *attrs)
23677 {
23678- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
23679+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
23680 }
23681
23682 static struct dma_map_ops swiotlb_dma_ops = {
23683diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
23684index 81a5f5e..20f8b58 100644
23685--- a/arch/x86/kernel/process.c
23686+++ b/arch/x86/kernel/process.c
23687@@ -36,7 +36,8 @@
23688 * section. Since TSS's are completely CPU-local, we want them
23689 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
23690 */
23691-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
23692+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
23693+EXPORT_SYMBOL(init_tss);
23694
23695 #ifdef CONFIG_X86_64
23696 static DEFINE_PER_CPU(unsigned char, is_idle);
23697@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
23698 task_xstate_cachep =
23699 kmem_cache_create("task_xstate", xstate_size,
23700 __alignof__(union thread_xstate),
23701- SLAB_PANIC | SLAB_NOTRACK, NULL);
23702+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
23703 }
23704
23705 /*
23706@@ -105,7 +106,7 @@ void exit_thread(void)
23707 unsigned long *bp = t->io_bitmap_ptr;
23708
23709 if (bp) {
23710- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
23711+ struct tss_struct *tss = init_tss + get_cpu();
23712
23713 t->io_bitmap_ptr = NULL;
23714 clear_thread_flag(TIF_IO_BITMAP);
23715@@ -125,6 +126,9 @@ void flush_thread(void)
23716 {
23717 struct task_struct *tsk = current;
23718
23719+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
23720+ loadsegment(gs, 0);
23721+#endif
23722 flush_ptrace_hw_breakpoint(tsk);
23723 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
23724 drop_init_fpu(tsk);
23725@@ -271,7 +275,7 @@ static void __exit_idle(void)
23726 void exit_idle(void)
23727 {
23728 /* idle loop has pid 0 */
23729- if (current->pid)
23730+ if (task_pid_nr(current))
23731 return;
23732 __exit_idle();
23733 }
23734@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
23735 return ret;
23736 }
23737 #endif
23738-void stop_this_cpu(void *dummy)
23739+__noreturn void stop_this_cpu(void *dummy)
23740 {
23741 local_irq_disable();
23742 /*
23743@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
23744 }
23745 early_param("idle", idle_setup);
23746
23747-unsigned long arch_align_stack(unsigned long sp)
23748+#ifdef CONFIG_PAX_RANDKSTACK
23749+void pax_randomize_kstack(struct pt_regs *regs)
23750 {
23751- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
23752- sp -= get_random_int() % 8192;
23753- return sp & ~0xf;
23754-}
23755+ struct thread_struct *thread = &current->thread;
23756+ unsigned long time;
23757
23758-unsigned long arch_randomize_brk(struct mm_struct *mm)
23759-{
23760- unsigned long range_end = mm->brk + 0x02000000;
23761- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
23762-}
23763+ if (!randomize_va_space)
23764+ return;
23765+
23766+ if (v8086_mode(regs))
23767+ return;
23768
23769+ rdtscl(time);
23770+
23771+ /* P4 seems to return a 0 LSB, ignore it */
23772+#ifdef CONFIG_MPENTIUM4
23773+ time &= 0x3EUL;
23774+ time <<= 2;
23775+#elif defined(CONFIG_X86_64)
23776+ time &= 0xFUL;
23777+ time <<= 4;
23778+#else
23779+ time &= 0x1FUL;
23780+ time <<= 3;
23781+#endif
23782+
23783+ thread->sp0 ^= time;
23784+ load_sp0(init_tss + smp_processor_id(), thread);
23785+
23786+#ifdef CONFIG_X86_64
23787+ this_cpu_write(kernel_stack, thread->sp0);
23788+#endif
23789+}
23790+#endif
23791diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
23792index 7305f7d..22f73d6 100644
23793--- a/arch/x86/kernel/process_32.c
23794+++ b/arch/x86/kernel/process_32.c
23795@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
23796 unsigned long thread_saved_pc(struct task_struct *tsk)
23797 {
23798 return ((unsigned long *)tsk->thread.sp)[3];
23799+//XXX return tsk->thread.eip;
23800 }
23801
23802 void __show_regs(struct pt_regs *regs, int all)
23803@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
23804 unsigned long sp;
23805 unsigned short ss, gs;
23806
23807- if (user_mode_vm(regs)) {
23808+ if (user_mode(regs)) {
23809 sp = regs->sp;
23810 ss = regs->ss & 0xffff;
23811- gs = get_user_gs(regs);
23812 } else {
23813 sp = kernel_stack_pointer(regs);
23814 savesegment(ss, ss);
23815- savesegment(gs, gs);
23816 }
23817+ gs = get_user_gs(regs);
23818
23819 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
23820 (u16)regs->cs, regs->ip, regs->flags,
23821- smp_processor_id());
23822+ raw_smp_processor_id());
23823 print_symbol("EIP is at %s\n", regs->ip);
23824
23825 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
23826@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
23827 int copy_thread(unsigned long clone_flags, unsigned long sp,
23828 unsigned long arg, struct task_struct *p)
23829 {
23830- struct pt_regs *childregs = task_pt_regs(p);
23831+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
23832 struct task_struct *tsk;
23833 int err;
23834
23835 p->thread.sp = (unsigned long) childregs;
23836 p->thread.sp0 = (unsigned long) (childregs+1);
23837+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
23838
23839 if (unlikely(p->flags & PF_KTHREAD)) {
23840 /* kernel thread */
23841 memset(childregs, 0, sizeof(struct pt_regs));
23842 p->thread.ip = (unsigned long) ret_from_kernel_thread;
23843- task_user_gs(p) = __KERNEL_STACK_CANARY;
23844- childregs->ds = __USER_DS;
23845- childregs->es = __USER_DS;
23846+ savesegment(gs, childregs->gs);
23847+ childregs->ds = __KERNEL_DS;
23848+ childregs->es = __KERNEL_DS;
23849 childregs->fs = __KERNEL_PERCPU;
23850 childregs->bx = sp; /* function */
23851 childregs->bp = arg;
23852@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23853 struct thread_struct *prev = &prev_p->thread,
23854 *next = &next_p->thread;
23855 int cpu = smp_processor_id();
23856- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23857+ struct tss_struct *tss = init_tss + cpu;
23858 fpu_switch_t fpu;
23859
23860 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
23861@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23862 */
23863 lazy_save_gs(prev->gs);
23864
23865+#ifdef CONFIG_PAX_MEMORY_UDEREF
23866+ __set_fs(task_thread_info(next_p)->addr_limit);
23867+#endif
23868+
23869 /*
23870 * Load the per-thread Thread-Local Storage descriptor.
23871 */
23872@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23873 */
23874 arch_end_context_switch(next_p);
23875
23876+ this_cpu_write(current_task, next_p);
23877+ this_cpu_write(current_tinfo, &next_p->tinfo);
23878+
23879 /*
23880 * Restore %gs if needed (which is common)
23881 */
23882@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23883
23884 switch_fpu_finish(next_p, fpu);
23885
23886- this_cpu_write(current_task, next_p);
23887-
23888 return prev_p;
23889 }
23890
23891@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
23892 } while (count++ < 16);
23893 return 0;
23894 }
23895-
23896diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
23897index 355ae06..560fbbe 100644
23898--- a/arch/x86/kernel/process_64.c
23899+++ b/arch/x86/kernel/process_64.c
23900@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
23901 struct pt_regs *childregs;
23902 struct task_struct *me = current;
23903
23904- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
23905+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
23906 childregs = task_pt_regs(p);
23907 p->thread.sp = (unsigned long) childregs;
23908 p->thread.usersp = me->thread.usersp;
23909+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
23910 set_tsk_thread_flag(p, TIF_FORK);
23911 p->fpu_counter = 0;
23912 p->thread.io_bitmap_ptr = NULL;
23913@@ -165,6 +166,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
23914 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
23915 savesegment(es, p->thread.es);
23916 savesegment(ds, p->thread.ds);
23917+ savesegment(ss, p->thread.ss);
23918+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
23919 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
23920
23921 if (unlikely(p->flags & PF_KTHREAD)) {
23922@@ -273,7 +276,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23923 struct thread_struct *prev = &prev_p->thread;
23924 struct thread_struct *next = &next_p->thread;
23925 int cpu = smp_processor_id();
23926- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23927+ struct tss_struct *tss = init_tss + cpu;
23928 unsigned fsindex, gsindex;
23929 fpu_switch_t fpu;
23930
23931@@ -296,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23932 if (unlikely(next->ds | prev->ds))
23933 loadsegment(ds, next->ds);
23934
23935+ savesegment(ss, prev->ss);
23936+ if (unlikely(next->ss != prev->ss))
23937+ loadsegment(ss, next->ss);
23938
23939 /* We must save %fs and %gs before load_TLS() because
23940 * %fs and %gs may be cleared by load_TLS().
23941@@ -355,10 +361,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23942 prev->usersp = this_cpu_read(old_rsp);
23943 this_cpu_write(old_rsp, next->usersp);
23944 this_cpu_write(current_task, next_p);
23945+ this_cpu_write(current_tinfo, &next_p->tinfo);
23946
23947- this_cpu_write(kernel_stack,
23948- (unsigned long)task_stack_page(next_p) +
23949- THREAD_SIZE - KERNEL_STACK_OFFSET);
23950+ this_cpu_write(kernel_stack, next->sp0);
23951
23952 /*
23953 * Now maybe reload the debug registers and handle I/O bitmaps
23954@@ -427,12 +432,11 @@ unsigned long get_wchan(struct task_struct *p)
23955 if (!p || p == current || p->state == TASK_RUNNING)
23956 return 0;
23957 stack = (unsigned long)task_stack_page(p);
23958- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
23959+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
23960 return 0;
23961 fp = *(u64 *)(p->thread.sp);
23962 do {
23963- if (fp < (unsigned long)stack ||
23964- fp >= (unsigned long)stack+THREAD_SIZE)
23965+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
23966 return 0;
23967 ip = *(u64 *)(fp+8);
23968 if (!in_sched_functions(ip))
23969diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
23970index 29a8120..a50b5ee 100644
23971--- a/arch/x86/kernel/ptrace.c
23972+++ b/arch/x86/kernel/ptrace.c
23973@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
23974 {
23975 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
23976 unsigned long sp = (unsigned long)&regs->sp;
23977- struct thread_info *tinfo;
23978
23979- if (context == (sp & ~(THREAD_SIZE - 1)))
23980+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
23981 return sp;
23982
23983- tinfo = (struct thread_info *)context;
23984- if (tinfo->previous_esp)
23985- return tinfo->previous_esp;
23986+ sp = *(unsigned long *)context;
23987+ if (sp)
23988+ return sp;
23989
23990 return (unsigned long)regs;
23991 }
23992@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
23993 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
23994 {
23995 int i;
23996- int dr7 = 0;
23997+ unsigned long dr7 = 0;
23998 struct arch_hw_breakpoint *info;
23999
24000 for (i = 0; i < HBP_NUM; i++) {
24001@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
24002 unsigned long addr, unsigned long data)
24003 {
24004 int ret;
24005- unsigned long __user *datap = (unsigned long __user *)data;
24006+ unsigned long __user *datap = (__force unsigned long __user *)data;
24007
24008 switch (request) {
24009 /* read the word at location addr in the USER area. */
24010@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
24011 if ((int) addr < 0)
24012 return -EIO;
24013 ret = do_get_thread_area(child, addr,
24014- (struct user_desc __user *)data);
24015+ (__force struct user_desc __user *) data);
24016 break;
24017
24018 case PTRACE_SET_THREAD_AREA:
24019 if ((int) addr < 0)
24020 return -EIO;
24021 ret = do_set_thread_area(child, addr,
24022- (struct user_desc __user *)data, 0);
24023+ (__force struct user_desc __user *) data, 0);
24024 break;
24025 #endif
24026
24027@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
24028
24029 #ifdef CONFIG_X86_64
24030
24031-static struct user_regset x86_64_regsets[] __read_mostly = {
24032+static user_regset_no_const x86_64_regsets[] __read_only = {
24033 [REGSET_GENERAL] = {
24034 .core_note_type = NT_PRSTATUS,
24035 .n = sizeof(struct user_regs_struct) / sizeof(long),
24036@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
24037 #endif /* CONFIG_X86_64 */
24038
24039 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
24040-static struct user_regset x86_32_regsets[] __read_mostly = {
24041+static user_regset_no_const x86_32_regsets[] __read_only = {
24042 [REGSET_GENERAL] = {
24043 .core_note_type = NT_PRSTATUS,
24044 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
24045@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
24046 */
24047 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
24048
24049-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24050+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24051 {
24052 #ifdef CONFIG_X86_64
24053 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
24054@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
24055 memset(info, 0, sizeof(*info));
24056 info->si_signo = SIGTRAP;
24057 info->si_code = si_code;
24058- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
24059+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
24060 }
24061
24062 void user_single_step_siginfo(struct task_struct *tsk,
24063@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
24064 # define IS_IA32 0
24065 #endif
24066
24067+#ifdef CONFIG_GRKERNSEC_SETXID
24068+extern void gr_delayed_cred_worker(void);
24069+#endif
24070+
24071 /*
24072 * We must return the syscall number to actually look up in the table.
24073 * This can be -1L to skip running any syscall at all.
24074@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
24075
24076 user_exit();
24077
24078+#ifdef CONFIG_GRKERNSEC_SETXID
24079+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24080+ gr_delayed_cred_worker();
24081+#endif
24082+
24083 /*
24084 * If we stepped into a sysenter/syscall insn, it trapped in
24085 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
24086@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
24087 */
24088 user_exit();
24089
24090+#ifdef CONFIG_GRKERNSEC_SETXID
24091+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24092+ gr_delayed_cred_worker();
24093+#endif
24094+
24095 audit_syscall_exit(regs);
24096
24097 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
24098diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
24099index 2cb9470..ff1fd80 100644
24100--- a/arch/x86/kernel/pvclock.c
24101+++ b/arch/x86/kernel/pvclock.c
24102@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
24103 return pv_tsc_khz;
24104 }
24105
24106-static atomic64_t last_value = ATOMIC64_INIT(0);
24107+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
24108
24109 void pvclock_resume(void)
24110 {
24111- atomic64_set(&last_value, 0);
24112+ atomic64_set_unchecked(&last_value, 0);
24113 }
24114
24115 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
24116@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
24117 * updating at the same time, and one of them could be slightly behind,
24118 * making the assumption that last_value always go forward fail to hold.
24119 */
24120- last = atomic64_read(&last_value);
24121+ last = atomic64_read_unchecked(&last_value);
24122 do {
24123 if (ret < last)
24124 return last;
24125- last = atomic64_cmpxchg(&last_value, last, ret);
24126+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
24127 } while (unlikely(last != ret));
24128
24129 return ret;
24130diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
24131index 76fa1e9..abf09ea 100644
24132--- a/arch/x86/kernel/reboot.c
24133+++ b/arch/x86/kernel/reboot.c
24134@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
24135 EXPORT_SYMBOL(pm_power_off);
24136
24137 static const struct desc_ptr no_idt = {};
24138-static int reboot_mode;
24139+static unsigned short reboot_mode;
24140 enum reboot_type reboot_type = BOOT_ACPI;
24141 int reboot_force;
24142
24143@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
24144
24145 void __noreturn machine_real_restart(unsigned int type)
24146 {
24147+
24148+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
24149+ struct desc_struct *gdt;
24150+#endif
24151+
24152 local_irq_disable();
24153
24154 /*
24155@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
24156
24157 /* Jump to the identity-mapped low memory code */
24158 #ifdef CONFIG_X86_32
24159- asm volatile("jmpl *%0" : :
24160+
24161+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
24162+ gdt = get_cpu_gdt_table(smp_processor_id());
24163+ pax_open_kernel();
24164+#ifdef CONFIG_PAX_MEMORY_UDEREF
24165+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
24166+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
24167+ loadsegment(ds, __KERNEL_DS);
24168+ loadsegment(es, __KERNEL_DS);
24169+ loadsegment(ss, __KERNEL_DS);
24170+#endif
24171+#ifdef CONFIG_PAX_KERNEXEC
24172+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
24173+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
24174+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
24175+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
24176+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
24177+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
24178+#endif
24179+ pax_close_kernel();
24180+#endif
24181+
24182+ asm volatile("ljmpl *%0" : :
24183 "rm" (real_mode_header->machine_real_restart_asm),
24184 "a" (type));
24185 #else
24186@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
24187 * try to force a triple fault and then cycle between hitting the keyboard
24188 * controller and doing that
24189 */
24190-static void native_machine_emergency_restart(void)
24191+static void __noreturn native_machine_emergency_restart(void)
24192 {
24193 int i;
24194 int attempt = 0;
24195@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
24196 #endif
24197 }
24198
24199-static void __machine_emergency_restart(int emergency)
24200+static void __noreturn __machine_emergency_restart(int emergency)
24201 {
24202 reboot_emergency = emergency;
24203 machine_ops.emergency_restart();
24204 }
24205
24206-static void native_machine_restart(char *__unused)
24207+static void __noreturn native_machine_restart(char *__unused)
24208 {
24209 pr_notice("machine restart\n");
24210
24211@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
24212 __machine_emergency_restart(0);
24213 }
24214
24215-static void native_machine_halt(void)
24216+static void __noreturn native_machine_halt(void)
24217 {
24218 /* Stop other cpus and apics */
24219 machine_shutdown();
24220@@ -679,7 +706,7 @@ static void native_machine_halt(void)
24221 stop_this_cpu(NULL);
24222 }
24223
24224-static void native_machine_power_off(void)
24225+static void __noreturn native_machine_power_off(void)
24226 {
24227 if (pm_power_off) {
24228 if (!reboot_force)
24229@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
24230 }
24231 /* A fallback in case there is no PM info available */
24232 tboot_shutdown(TB_SHUTDOWN_HALT);
24233+ unreachable();
24234 }
24235
24236-struct machine_ops machine_ops = {
24237+struct machine_ops machine_ops __read_only = {
24238 .power_off = native_machine_power_off,
24239 .shutdown = native_machine_shutdown,
24240 .emergency_restart = native_machine_emergency_restart,
24241diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
24242index c8e41e9..64049ef 100644
24243--- a/arch/x86/kernel/reboot_fixups_32.c
24244+++ b/arch/x86/kernel/reboot_fixups_32.c
24245@@ -57,7 +57,7 @@ struct device_fixup {
24246 unsigned int vendor;
24247 unsigned int device;
24248 void (*reboot_fixup)(struct pci_dev *);
24249-};
24250+} __do_const;
24251
24252 /*
24253 * PCI ids solely used for fixups_table go here
24254diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
24255index f2bb9c9..bed145d7 100644
24256--- a/arch/x86/kernel/relocate_kernel_64.S
24257+++ b/arch/x86/kernel/relocate_kernel_64.S
24258@@ -11,6 +11,7 @@
24259 #include <asm/kexec.h>
24260 #include <asm/processor-flags.h>
24261 #include <asm/pgtable_types.h>
24262+#include <asm/alternative-asm.h>
24263
24264 /*
24265 * Must be relocatable PIC code callable as a C function
24266@@ -167,6 +168,7 @@ identity_mapped:
24267 xorq %r14, %r14
24268 xorq %r15, %r15
24269
24270+ pax_force_retaddr 0, 1
24271 ret
24272
24273 1:
24274diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
24275index 56f7fcf..3b88ad1 100644
24276--- a/arch/x86/kernel/setup.c
24277+++ b/arch/x86/kernel/setup.c
24278@@ -110,6 +110,7 @@
24279 #include <asm/mce.h>
24280 #include <asm/alternative.h>
24281 #include <asm/prom.h>
24282+#include <asm/boot.h>
24283
24284 /*
24285 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
24286@@ -205,10 +206,12 @@ EXPORT_SYMBOL(boot_cpu_data);
24287 #endif
24288
24289
24290-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
24291-unsigned long mmu_cr4_features;
24292+#ifdef CONFIG_X86_64
24293+unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
24294+#elif defined(CONFIG_X86_PAE)
24295+unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
24296 #else
24297-unsigned long mmu_cr4_features = X86_CR4_PAE;
24298+unsigned long mmu_cr4_features __read_only;
24299 #endif
24300
24301 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
24302@@ -444,7 +447,7 @@ static void __init parse_setup_data(void)
24303
24304 switch (data->type) {
24305 case SETUP_E820_EXT:
24306- parse_e820_ext(data);
24307+ parse_e820_ext((struct setup_data __force_kernel *)data);
24308 break;
24309 case SETUP_DTB:
24310 add_dtb(pa_data);
24311@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
24312 * area (640->1Mb) as ram even though it is not.
24313 * take them out.
24314 */
24315- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
24316+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
24317
24318 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
24319 }
24320@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
24321 /* called before trim_bios_range() to spare extra sanitize */
24322 static void __init e820_add_kernel_range(void)
24323 {
24324- u64 start = __pa_symbol(_text);
24325+ u64 start = __pa_symbol(ktla_ktva(_text));
24326 u64 size = __pa_symbol(_end) - start;
24327
24328 /*
24329@@ -841,8 +844,12 @@ static void __init trim_low_memory_range(void)
24330
24331 void __init setup_arch(char **cmdline_p)
24332 {
24333+#ifdef CONFIG_X86_32
24334+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
24335+#else
24336 memblock_reserve(__pa_symbol(_text),
24337 (unsigned long)__bss_stop - (unsigned long)_text);
24338+#endif
24339
24340 early_reserve_initrd();
24341
24342@@ -934,14 +941,14 @@ void __init setup_arch(char **cmdline_p)
24343
24344 if (!boot_params.hdr.root_flags)
24345 root_mountflags &= ~MS_RDONLY;
24346- init_mm.start_code = (unsigned long) _text;
24347- init_mm.end_code = (unsigned long) _etext;
24348+ init_mm.start_code = ktla_ktva((unsigned long) _text);
24349+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
24350 init_mm.end_data = (unsigned long) _edata;
24351 init_mm.brk = _brk_end;
24352
24353- code_resource.start = __pa_symbol(_text);
24354- code_resource.end = __pa_symbol(_etext)-1;
24355- data_resource.start = __pa_symbol(_etext);
24356+ code_resource.start = __pa_symbol(ktla_ktva(_text));
24357+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
24358+ data_resource.start = __pa_symbol(_sdata);
24359 data_resource.end = __pa_symbol(_edata)-1;
24360 bss_resource.start = __pa_symbol(__bss_start);
24361 bss_resource.end = __pa_symbol(__bss_stop)-1;
24362diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
24363index 5cdff03..80fa283 100644
24364--- a/arch/x86/kernel/setup_percpu.c
24365+++ b/arch/x86/kernel/setup_percpu.c
24366@@ -21,19 +21,17 @@
24367 #include <asm/cpu.h>
24368 #include <asm/stackprotector.h>
24369
24370-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
24371+#ifdef CONFIG_SMP
24372+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
24373 EXPORT_PER_CPU_SYMBOL(cpu_number);
24374+#endif
24375
24376-#ifdef CONFIG_X86_64
24377 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
24378-#else
24379-#define BOOT_PERCPU_OFFSET 0
24380-#endif
24381
24382 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
24383 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
24384
24385-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
24386+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
24387 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
24388 };
24389 EXPORT_SYMBOL(__per_cpu_offset);
24390@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
24391 {
24392 #ifdef CONFIG_NEED_MULTIPLE_NODES
24393 pg_data_t *last = NULL;
24394- unsigned int cpu;
24395+ int cpu;
24396
24397 for_each_possible_cpu(cpu) {
24398 int node = early_cpu_to_node(cpu);
24399@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
24400 {
24401 #ifdef CONFIG_X86_32
24402 struct desc_struct gdt;
24403+ unsigned long base = per_cpu_offset(cpu);
24404
24405- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
24406- 0x2 | DESCTYPE_S, 0x8);
24407- gdt.s = 1;
24408+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
24409+ 0x83 | DESCTYPE_S, 0xC);
24410 write_gdt_entry(get_cpu_gdt_table(cpu),
24411 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
24412 #endif
24413@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
24414 /* alrighty, percpu areas up and running */
24415 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
24416 for_each_possible_cpu(cpu) {
24417+#ifdef CONFIG_CC_STACKPROTECTOR
24418+#ifdef CONFIG_X86_32
24419+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
24420+#endif
24421+#endif
24422 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
24423 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
24424 per_cpu(cpu_number, cpu) = cpu;
24425@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
24426 */
24427 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
24428 #endif
24429+#ifdef CONFIG_CC_STACKPROTECTOR
24430+#ifdef CONFIG_X86_32
24431+ if (!cpu)
24432+ per_cpu(stack_canary.canary, cpu) = canary;
24433+#endif
24434+#endif
24435 /*
24436 * Up to this point, the boot CPU has been using .init.data
24437 * area. Reload any changed state for the boot CPU.
24438diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
24439index 6956299..18126ec4 100644
24440--- a/arch/x86/kernel/signal.c
24441+++ b/arch/x86/kernel/signal.c
24442@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
24443 * Align the stack pointer according to the i386 ABI,
24444 * i.e. so that on function entry ((sp + 4) & 15) == 0.
24445 */
24446- sp = ((sp + 4) & -16ul) - 4;
24447+ sp = ((sp - 12) & -16ul) - 4;
24448 #else /* !CONFIG_X86_32 */
24449 sp = round_down(sp, 16) - 8;
24450 #endif
24451@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
24452 }
24453
24454 if (current->mm->context.vdso)
24455- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
24456+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
24457 else
24458- restorer = &frame->retcode;
24459+ restorer = (void __user *)&frame->retcode;
24460 if (ksig->ka.sa.sa_flags & SA_RESTORER)
24461 restorer = ksig->ka.sa.sa_restorer;
24462
24463@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
24464 * reasons and because gdb uses it as a signature to notice
24465 * signal handler stack frames.
24466 */
24467- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
24468+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
24469
24470 if (err)
24471 return -EFAULT;
24472@@ -364,10 +364,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
24473 else
24474 put_user_ex(0, &frame->uc.uc_flags);
24475 put_user_ex(0, &frame->uc.uc_link);
24476- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
24477+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
24478
24479 /* Set up to return from userspace. */
24480- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
24481+ if (current->mm->context.vdso)
24482+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
24483+ else
24484+ restorer = (void __user *)&frame->retcode;
24485 if (ksig->ka.sa.sa_flags & SA_RESTORER)
24486 restorer = ksig->ka.sa.sa_restorer;
24487 put_user_ex(restorer, &frame->pretcode);
24488@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
24489 * reasons and because gdb uses it as a signature to notice
24490 * signal handler stack frames.
24491 */
24492- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
24493+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
24494 } put_user_catch(err);
24495
24496 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
24497@@ -429,7 +432,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
24498 else
24499 put_user_ex(0, &frame->uc.uc_flags);
24500 put_user_ex(0, &frame->uc.uc_link);
24501- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
24502+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
24503
24504 /* Set up to return from userspace. If provided, use a stub
24505 already in userspace. */
24506@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
24507 {
24508 int usig = signr_convert(ksig->sig);
24509 sigset_t *set = sigmask_to_save();
24510- compat_sigset_t *cset = (compat_sigset_t *) set;
24511+ sigset_t sigcopy;
24512+ compat_sigset_t *cset;
24513+
24514+ sigcopy = *set;
24515+
24516+ cset = (compat_sigset_t *) &sigcopy;
24517
24518 /* Set up the stack frame */
24519 if (is_ia32_frame()) {
24520@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
24521 } else if (is_x32_frame()) {
24522 return x32_setup_rt_frame(ksig, cset, regs);
24523 } else {
24524- return __setup_rt_frame(ksig->sig, ksig, set, regs);
24525+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
24526 }
24527 }
24528
24529diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
24530index 48d2b7d..90d328a 100644
24531--- a/arch/x86/kernel/smp.c
24532+++ b/arch/x86/kernel/smp.c
24533@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
24534
24535 __setup("nonmi_ipi", nonmi_ipi_setup);
24536
24537-struct smp_ops smp_ops = {
24538+struct smp_ops smp_ops __read_only = {
24539 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
24540 .smp_prepare_cpus = native_smp_prepare_cpus,
24541 .smp_cpus_done = native_smp_cpus_done,
24542diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
24543index bfd348e..914f323 100644
24544--- a/arch/x86/kernel/smpboot.c
24545+++ b/arch/x86/kernel/smpboot.c
24546@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
24547
24548 enable_start_cpu0 = 0;
24549
24550-#ifdef CONFIG_X86_32
24551- /* switch away from the initial page table */
24552- load_cr3(swapper_pg_dir);
24553- __flush_tlb_all();
24554-#endif
24555-
24556 /* otherwise gcc will move up smp_processor_id before the cpu_init */
24557 barrier();
24558+
24559+ /* switch away from the initial page table */
24560+#ifdef CONFIG_PAX_PER_CPU_PGD
24561+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
24562+ __flush_tlb_all();
24563+#elif defined(CONFIG_X86_32)
24564+ load_cr3(swapper_pg_dir);
24565+ __flush_tlb_all();
24566+#endif
24567+
24568 /*
24569 * Check TSC synchronization with the BP:
24570 */
24571@@ -748,6 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
24572 idle->thread.sp = (unsigned long) (((struct pt_regs *)
24573 (THREAD_SIZE + task_stack_page(idle))) - 1);
24574 per_cpu(current_task, cpu) = idle;
24575+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24576
24577 #ifdef CONFIG_X86_32
24578 /* Stack for startup_32 can be just as for start_secondary onwards */
24579@@ -755,11 +760,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
24580 #else
24581 clear_tsk_thread_flag(idle, TIF_FORK);
24582 initial_gs = per_cpu_offset(cpu);
24583- per_cpu(kernel_stack, cpu) =
24584- (unsigned long)task_stack_page(idle) -
24585- KERNEL_STACK_OFFSET + THREAD_SIZE;
24586+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24587 #endif
24588+
24589+ pax_open_kernel();
24590 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
24591+ pax_close_kernel();
24592+
24593 initial_code = (unsigned long)start_secondary;
24594 stack_start = idle->thread.sp;
24595
24596@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
24597 /* the FPU context is blank, nobody can own it */
24598 __cpu_disable_lazy_restore(cpu);
24599
24600+#ifdef CONFIG_PAX_PER_CPU_PGD
24601+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
24602+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24603+ KERNEL_PGD_PTRS);
24604+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
24605+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24606+ KERNEL_PGD_PTRS);
24607+#endif
24608+
24609 err = do_boot_cpu(apicid, cpu, tidle);
24610 if (err) {
24611 pr_debug("do_boot_cpu failed %d\n", err);
24612diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
24613index 9b4d51d..5d28b58 100644
24614--- a/arch/x86/kernel/step.c
24615+++ b/arch/x86/kernel/step.c
24616@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
24617 struct desc_struct *desc;
24618 unsigned long base;
24619
24620- seg &= ~7UL;
24621+ seg >>= 3;
24622
24623 mutex_lock(&child->mm->context.lock);
24624- if (unlikely((seg >> 3) >= child->mm->context.size))
24625+ if (unlikely(seg >= child->mm->context.size))
24626 addr = -1L; /* bogus selector, access would fault */
24627 else {
24628 desc = child->mm->context.ldt + seg;
24629@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
24630 addr += base;
24631 }
24632 mutex_unlock(&child->mm->context.lock);
24633- }
24634+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
24635+ addr = ktla_ktva(addr);
24636
24637 return addr;
24638 }
24639@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
24640 unsigned char opcode[15];
24641 unsigned long addr = convert_ip_to_linear(child, regs);
24642
24643+ if (addr == -EINVAL)
24644+ return 0;
24645+
24646 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
24647 for (i = 0; i < copied; i++) {
24648 switch (opcode[i]) {
24649diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
24650new file mode 100644
24651index 0000000..5877189
24652--- /dev/null
24653+++ b/arch/x86/kernel/sys_i386_32.c
24654@@ -0,0 +1,189 @@
24655+/*
24656+ * This file contains various random system calls that
24657+ * have a non-standard calling sequence on the Linux/i386
24658+ * platform.
24659+ */
24660+
24661+#include <linux/errno.h>
24662+#include <linux/sched.h>
24663+#include <linux/mm.h>
24664+#include <linux/fs.h>
24665+#include <linux/smp.h>
24666+#include <linux/sem.h>
24667+#include <linux/msg.h>
24668+#include <linux/shm.h>
24669+#include <linux/stat.h>
24670+#include <linux/syscalls.h>
24671+#include <linux/mman.h>
24672+#include <linux/file.h>
24673+#include <linux/utsname.h>
24674+#include <linux/ipc.h>
24675+#include <linux/elf.h>
24676+
24677+#include <linux/uaccess.h>
24678+#include <linux/unistd.h>
24679+
24680+#include <asm/syscalls.h>
24681+
24682+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
24683+{
24684+ unsigned long pax_task_size = TASK_SIZE;
24685+
24686+#ifdef CONFIG_PAX_SEGMEXEC
24687+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
24688+ pax_task_size = SEGMEXEC_TASK_SIZE;
24689+#endif
24690+
24691+ if (flags & MAP_FIXED)
24692+ if (len > pax_task_size || addr > pax_task_size - len)
24693+ return -EINVAL;
24694+
24695+ return 0;
24696+}
24697+
24698+/*
24699+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24700+ */
24701+static unsigned long get_align_mask(void)
24702+{
24703+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
24704+ return 0;
24705+
24706+ if (!(current->flags & PF_RANDOMIZE))
24707+ return 0;
24708+
24709+ return va_align.mask;
24710+}
24711+
24712+unsigned long
24713+arch_get_unmapped_area(struct file *filp, unsigned long addr,
24714+ unsigned long len, unsigned long pgoff, unsigned long flags)
24715+{
24716+ struct mm_struct *mm = current->mm;
24717+ struct vm_area_struct *vma;
24718+ unsigned long pax_task_size = TASK_SIZE;
24719+ struct vm_unmapped_area_info info;
24720+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24721+
24722+#ifdef CONFIG_PAX_SEGMEXEC
24723+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24724+ pax_task_size = SEGMEXEC_TASK_SIZE;
24725+#endif
24726+
24727+ pax_task_size -= PAGE_SIZE;
24728+
24729+ if (len > pax_task_size)
24730+ return -ENOMEM;
24731+
24732+ if (flags & MAP_FIXED)
24733+ return addr;
24734+
24735+#ifdef CONFIG_PAX_RANDMMAP
24736+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24737+#endif
24738+
24739+ if (addr) {
24740+ addr = PAGE_ALIGN(addr);
24741+ if (pax_task_size - len >= addr) {
24742+ vma = find_vma(mm, addr);
24743+ if (check_heap_stack_gap(vma, addr, len, offset))
24744+ return addr;
24745+ }
24746+ }
24747+
24748+ info.flags = 0;
24749+ info.length = len;
24750+ info.align_mask = filp ? get_align_mask() : 0;
24751+ info.align_offset = pgoff << PAGE_SHIFT;
24752+ info.threadstack_offset = offset;
24753+
24754+#ifdef CONFIG_PAX_PAGEEXEC
24755+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
24756+ info.low_limit = 0x00110000UL;
24757+ info.high_limit = mm->start_code;
24758+
24759+#ifdef CONFIG_PAX_RANDMMAP
24760+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24761+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
24762+#endif
24763+
24764+ if (info.low_limit < info.high_limit) {
24765+ addr = vm_unmapped_area(&info);
24766+ if (!IS_ERR_VALUE(addr))
24767+ return addr;
24768+ }
24769+ } else
24770+#endif
24771+
24772+ info.low_limit = mm->mmap_base;
24773+ info.high_limit = pax_task_size;
24774+
24775+ return vm_unmapped_area(&info);
24776+}
24777+
24778+unsigned long
24779+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24780+ const unsigned long len, const unsigned long pgoff,
24781+ const unsigned long flags)
24782+{
24783+ struct vm_area_struct *vma;
24784+ struct mm_struct *mm = current->mm;
24785+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
24786+ struct vm_unmapped_area_info info;
24787+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24788+
24789+#ifdef CONFIG_PAX_SEGMEXEC
24790+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24791+ pax_task_size = SEGMEXEC_TASK_SIZE;
24792+#endif
24793+
24794+ pax_task_size -= PAGE_SIZE;
24795+
24796+ /* requested length too big for entire address space */
24797+ if (len > pax_task_size)
24798+ return -ENOMEM;
24799+
24800+ if (flags & MAP_FIXED)
24801+ return addr;
24802+
24803+#ifdef CONFIG_PAX_PAGEEXEC
24804+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
24805+ goto bottomup;
24806+#endif
24807+
24808+#ifdef CONFIG_PAX_RANDMMAP
24809+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24810+#endif
24811+
24812+ /* requesting a specific address */
24813+ if (addr) {
24814+ addr = PAGE_ALIGN(addr);
24815+ if (pax_task_size - len >= addr) {
24816+ vma = find_vma(mm, addr);
24817+ if (check_heap_stack_gap(vma, addr, len, offset))
24818+ return addr;
24819+ }
24820+ }
24821+
24822+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
24823+ info.length = len;
24824+ info.low_limit = PAGE_SIZE;
24825+ info.high_limit = mm->mmap_base;
24826+ info.align_mask = filp ? get_align_mask() : 0;
24827+ info.align_offset = pgoff << PAGE_SHIFT;
24828+ info.threadstack_offset = offset;
24829+
24830+ addr = vm_unmapped_area(&info);
24831+ if (!(addr & ~PAGE_MASK))
24832+ return addr;
24833+ VM_BUG_ON(addr != -ENOMEM);
24834+
24835+bottomup:
24836+ /*
24837+ * A failed mmap() very likely causes application failure,
24838+ * so fall back to the bottom-up function here. This scenario
24839+ * can happen with large stack limits and large mmap()
24840+ * allocations.
24841+ */
24842+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
24843+}
24844diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
24845index dbded5a..ace2781 100644
24846--- a/arch/x86/kernel/sys_x86_64.c
24847+++ b/arch/x86/kernel/sys_x86_64.c
24848@@ -81,8 +81,8 @@ out:
24849 return error;
24850 }
24851
24852-static void find_start_end(unsigned long flags, unsigned long *begin,
24853- unsigned long *end)
24854+static void find_start_end(struct mm_struct *mm, unsigned long flags,
24855+ unsigned long *begin, unsigned long *end)
24856 {
24857 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
24858 unsigned long new_begin;
24859@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
24860 *begin = new_begin;
24861 }
24862 } else {
24863- *begin = TASK_UNMAPPED_BASE;
24864+ *begin = mm->mmap_base;
24865 *end = TASK_SIZE;
24866 }
24867 }
24868@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
24869 struct vm_area_struct *vma;
24870 struct vm_unmapped_area_info info;
24871 unsigned long begin, end;
24872+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24873
24874 if (flags & MAP_FIXED)
24875 return addr;
24876
24877- find_start_end(flags, &begin, &end);
24878+ find_start_end(mm, flags, &begin, &end);
24879
24880 if (len > end)
24881 return -ENOMEM;
24882
24883+#ifdef CONFIG_PAX_RANDMMAP
24884+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24885+#endif
24886+
24887 if (addr) {
24888 addr = PAGE_ALIGN(addr);
24889 vma = find_vma(mm, addr);
24890- if (end - len >= addr &&
24891- (!vma || addr + len <= vma->vm_start))
24892+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
24893 return addr;
24894 }
24895
24896@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
24897 info.high_limit = end;
24898 info.align_mask = filp ? get_align_mask() : 0;
24899 info.align_offset = pgoff << PAGE_SHIFT;
24900+ info.threadstack_offset = offset;
24901 return vm_unmapped_area(&info);
24902 }
24903
24904@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24905 struct mm_struct *mm = current->mm;
24906 unsigned long addr = addr0;
24907 struct vm_unmapped_area_info info;
24908+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24909
24910 /* requested length too big for entire address space */
24911 if (len > TASK_SIZE)
24912@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24913 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
24914 goto bottomup;
24915
24916+#ifdef CONFIG_PAX_RANDMMAP
24917+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24918+#endif
24919+
24920 /* requesting a specific address */
24921 if (addr) {
24922 addr = PAGE_ALIGN(addr);
24923 vma = find_vma(mm, addr);
24924- if (TASK_SIZE - len >= addr &&
24925- (!vma || addr + len <= vma->vm_start))
24926+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
24927 return addr;
24928 }
24929
24930@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24931 info.high_limit = mm->mmap_base;
24932 info.align_mask = filp ? get_align_mask() : 0;
24933 info.align_offset = pgoff << PAGE_SHIFT;
24934+ info.threadstack_offset = offset;
24935 addr = vm_unmapped_area(&info);
24936 if (!(addr & ~PAGE_MASK))
24937 return addr;
24938diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
24939index f84fe00..f41d9f1 100644
24940--- a/arch/x86/kernel/tboot.c
24941+++ b/arch/x86/kernel/tboot.c
24942@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
24943
24944 void tboot_shutdown(u32 shutdown_type)
24945 {
24946- void (*shutdown)(void);
24947+ void (* __noreturn shutdown)(void);
24948
24949 if (!tboot_enabled())
24950 return;
24951@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
24952
24953 switch_to_tboot_pt();
24954
24955- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
24956+ shutdown = (void *)tboot->shutdown_entry;
24957 shutdown();
24958
24959 /* should not reach here */
24960@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
24961 return 0;
24962 }
24963
24964-static atomic_t ap_wfs_count;
24965+static atomic_unchecked_t ap_wfs_count;
24966
24967 static int tboot_wait_for_aps(int num_aps)
24968 {
24969@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
24970 {
24971 switch (action) {
24972 case CPU_DYING:
24973- atomic_inc(&ap_wfs_count);
24974+ atomic_inc_unchecked(&ap_wfs_count);
24975 if (num_online_cpus() == 1)
24976- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
24977+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
24978 return NOTIFY_BAD;
24979 break;
24980 }
24981 return NOTIFY_OK;
24982 }
24983
24984-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
24985+static struct notifier_block tboot_cpu_notifier =
24986 {
24987 .notifier_call = tboot_cpu_callback,
24988 };
24989@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
24990
24991 tboot_create_trampoline();
24992
24993- atomic_set(&ap_wfs_count, 0);
24994+ atomic_set_unchecked(&ap_wfs_count, 0);
24995 register_hotcpu_notifier(&tboot_cpu_notifier);
24996
24997 acpi_os_set_prepare_sleep(&tboot_sleep);
24998diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
24999index 24d3c91..d06b473 100644
25000--- a/arch/x86/kernel/time.c
25001+++ b/arch/x86/kernel/time.c
25002@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
25003 {
25004 unsigned long pc = instruction_pointer(regs);
25005
25006- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
25007+ if (!user_mode(regs) && in_lock_functions(pc)) {
25008 #ifdef CONFIG_FRAME_POINTER
25009- return *(unsigned long *)(regs->bp + sizeof(long));
25010+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
25011 #else
25012 unsigned long *sp =
25013 (unsigned long *)kernel_stack_pointer(regs);
25014@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
25015 * or above a saved flags. Eflags has bits 22-31 zero,
25016 * kernel addresses don't.
25017 */
25018+
25019+#ifdef CONFIG_PAX_KERNEXEC
25020+ return ktla_ktva(sp[0]);
25021+#else
25022 if (sp[0] >> 22)
25023 return sp[0];
25024 if (sp[1] >> 22)
25025 return sp[1];
25026 #endif
25027+
25028+#endif
25029 }
25030 return pc;
25031 }
25032diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
25033index f7fec09..9991981 100644
25034--- a/arch/x86/kernel/tls.c
25035+++ b/arch/x86/kernel/tls.c
25036@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
25037 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
25038 return -EINVAL;
25039
25040+#ifdef CONFIG_PAX_SEGMEXEC
25041+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
25042+ return -EINVAL;
25043+#endif
25044+
25045 set_tls_desc(p, idx, &info, 1);
25046
25047 return 0;
25048@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
25049
25050 if (kbuf)
25051 info = kbuf;
25052- else if (__copy_from_user(infobuf, ubuf, count))
25053+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
25054 return -EFAULT;
25055 else
25056 info = infobuf;
25057diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
25058index 772e2a8..bad5bf6 100644
25059--- a/arch/x86/kernel/traps.c
25060+++ b/arch/x86/kernel/traps.c
25061@@ -68,12 +68,6 @@
25062 #include <asm/setup.h>
25063
25064 asmlinkage int system_call(void);
25065-
25066-/*
25067- * The IDT has to be page-aligned to simplify the Pentium
25068- * F0 0F bug workaround.
25069- */
25070-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
25071 #endif
25072
25073 DECLARE_BITMAP(used_vectors, NR_VECTORS);
25074@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
25075 }
25076
25077 static int __kprobes
25078-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25079+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
25080 struct pt_regs *regs, long error_code)
25081 {
25082 #ifdef CONFIG_X86_32
25083- if (regs->flags & X86_VM_MASK) {
25084+ if (v8086_mode(regs)) {
25085 /*
25086 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
25087 * On nmi (interrupt 2), do_trap should not be called.
25088@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25089 return -1;
25090 }
25091 #endif
25092- if (!user_mode(regs)) {
25093+ if (!user_mode_novm(regs)) {
25094 if (!fixup_exception(regs)) {
25095 tsk->thread.error_code = error_code;
25096 tsk->thread.trap_nr = trapnr;
25097+
25098+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25099+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
25100+ str = "PAX: suspicious stack segment fault";
25101+#endif
25102+
25103 die(str, regs, error_code);
25104 }
25105+
25106+#ifdef CONFIG_PAX_REFCOUNT
25107+ if (trapnr == 4)
25108+ pax_report_refcount_overflow(regs);
25109+#endif
25110+
25111 return 0;
25112 }
25113
25114@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25115 }
25116
25117 static void __kprobes
25118-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25119+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
25120 long error_code, siginfo_t *info)
25121 {
25122 struct task_struct *tsk = current;
25123@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25124 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
25125 printk_ratelimit()) {
25126 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
25127- tsk->comm, tsk->pid, str,
25128+ tsk->comm, task_pid_nr(tsk), str,
25129 regs->ip, regs->sp, error_code);
25130 print_vma_addr(" in ", regs->ip);
25131 pr_cont("\n");
25132@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
25133 conditional_sti(regs);
25134
25135 #ifdef CONFIG_X86_32
25136- if (regs->flags & X86_VM_MASK) {
25137+ if (v8086_mode(regs)) {
25138 local_irq_enable();
25139 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
25140 goto exit;
25141@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
25142 #endif
25143
25144 tsk = current;
25145- if (!user_mode(regs)) {
25146+ if (!user_mode_novm(regs)) {
25147 if (fixup_exception(regs))
25148 goto exit;
25149
25150 tsk->thread.error_code = error_code;
25151 tsk->thread.trap_nr = X86_TRAP_GP;
25152 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
25153- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
25154+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
25155+
25156+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25157+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
25158+ die("PAX: suspicious general protection fault", regs, error_code);
25159+ else
25160+#endif
25161+
25162 die("general protection fault", regs, error_code);
25163+ }
25164 goto exit;
25165 }
25166
25167+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25168+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
25169+ struct mm_struct *mm = tsk->mm;
25170+ unsigned long limit;
25171+
25172+ down_write(&mm->mmap_sem);
25173+ limit = mm->context.user_cs_limit;
25174+ if (limit < TASK_SIZE) {
25175+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
25176+ up_write(&mm->mmap_sem);
25177+ return;
25178+ }
25179+ up_write(&mm->mmap_sem);
25180+ }
25181+#endif
25182+
25183 tsk->thread.error_code = error_code;
25184 tsk->thread.trap_nr = X86_TRAP_GP;
25185
25186@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25187 /* It's safe to allow irq's after DR6 has been saved */
25188 preempt_conditional_sti(regs);
25189
25190- if (regs->flags & X86_VM_MASK) {
25191+ if (v8086_mode(regs)) {
25192 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
25193 X86_TRAP_DB);
25194 preempt_conditional_cli(regs);
25195@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25196 * We already checked v86 mode above, so we can check for kernel mode
25197 * by just checking the CPL of CS.
25198 */
25199- if ((dr6 & DR_STEP) && !user_mode(regs)) {
25200+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
25201 tsk->thread.debugreg6 &= ~DR_STEP;
25202 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
25203 regs->flags &= ~X86_EFLAGS_TF;
25204@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
25205 return;
25206 conditional_sti(regs);
25207
25208- if (!user_mode_vm(regs))
25209+ if (!user_mode(regs))
25210 {
25211 if (!fixup_exception(regs)) {
25212 task->thread.error_code = error_code;
25213diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
25214index 2ed8459..7cf329f 100644
25215--- a/arch/x86/kernel/uprobes.c
25216+++ b/arch/x86/kernel/uprobes.c
25217@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
25218 int ret = NOTIFY_DONE;
25219
25220 /* We are only interested in userspace traps */
25221- if (regs && !user_mode_vm(regs))
25222+ if (regs && !user_mode(regs))
25223 return NOTIFY_DONE;
25224
25225 switch (val) {
25226@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
25227
25228 if (ncopied != rasize) {
25229 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
25230- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
25231+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
25232
25233 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
25234 }
25235diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
25236index b9242ba..50c5edd 100644
25237--- a/arch/x86/kernel/verify_cpu.S
25238+++ b/arch/x86/kernel/verify_cpu.S
25239@@ -20,6 +20,7 @@
25240 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
25241 * arch/x86/kernel/trampoline_64.S: secondary processor verification
25242 * arch/x86/kernel/head_32.S: processor startup
25243+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
25244 *
25245 * verify_cpu, returns the status of longmode and SSE in register %eax.
25246 * 0: Success 1: Failure
25247diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
25248index e8edcf5..27f9344 100644
25249--- a/arch/x86/kernel/vm86_32.c
25250+++ b/arch/x86/kernel/vm86_32.c
25251@@ -44,6 +44,7 @@
25252 #include <linux/ptrace.h>
25253 #include <linux/audit.h>
25254 #include <linux/stddef.h>
25255+#include <linux/grsecurity.h>
25256
25257 #include <asm/uaccess.h>
25258 #include <asm/io.h>
25259@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
25260 do_exit(SIGSEGV);
25261 }
25262
25263- tss = &per_cpu(init_tss, get_cpu());
25264+ tss = init_tss + get_cpu();
25265 current->thread.sp0 = current->thread.saved_sp0;
25266 current->thread.sysenter_cs = __KERNEL_CS;
25267 load_sp0(tss, &current->thread);
25268@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
25269
25270 if (tsk->thread.saved_sp0)
25271 return -EPERM;
25272+
25273+#ifdef CONFIG_GRKERNSEC_VM86
25274+ if (!capable(CAP_SYS_RAWIO)) {
25275+ gr_handle_vm86();
25276+ return -EPERM;
25277+ }
25278+#endif
25279+
25280 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
25281 offsetof(struct kernel_vm86_struct, vm86plus) -
25282 sizeof(info.regs));
25283@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
25284 int tmp;
25285 struct vm86plus_struct __user *v86;
25286
25287+#ifdef CONFIG_GRKERNSEC_VM86
25288+ if (!capable(CAP_SYS_RAWIO)) {
25289+ gr_handle_vm86();
25290+ return -EPERM;
25291+ }
25292+#endif
25293+
25294 tsk = current;
25295 switch (cmd) {
25296 case VM86_REQUEST_IRQ:
25297@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
25298 tsk->thread.saved_fs = info->regs32->fs;
25299 tsk->thread.saved_gs = get_user_gs(info->regs32);
25300
25301- tss = &per_cpu(init_tss, get_cpu());
25302+ tss = init_tss + get_cpu();
25303 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
25304 if (cpu_has_sep)
25305 tsk->thread.sysenter_cs = 0;
25306@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
25307 goto cannot_handle;
25308 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
25309 goto cannot_handle;
25310- intr_ptr = (unsigned long __user *) (i << 2);
25311+ intr_ptr = (__force unsigned long __user *) (i << 2);
25312 if (get_user(segoffs, intr_ptr))
25313 goto cannot_handle;
25314 if ((segoffs >> 16) == BIOSSEG)
25315diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
25316index 10c4f30..57377c2 100644
25317--- a/arch/x86/kernel/vmlinux.lds.S
25318+++ b/arch/x86/kernel/vmlinux.lds.S
25319@@ -26,6 +26,13 @@
25320 #include <asm/page_types.h>
25321 #include <asm/cache.h>
25322 #include <asm/boot.h>
25323+#include <asm/segment.h>
25324+
25325+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25326+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
25327+#else
25328+#define __KERNEL_TEXT_OFFSET 0
25329+#endif
25330
25331 #undef i386 /* in case the preprocessor is a 32bit one */
25332
25333@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
25334
25335 PHDRS {
25336 text PT_LOAD FLAGS(5); /* R_E */
25337+#ifdef CONFIG_X86_32
25338+ module PT_LOAD FLAGS(5); /* R_E */
25339+#endif
25340+#ifdef CONFIG_XEN
25341+ rodata PT_LOAD FLAGS(5); /* R_E */
25342+#else
25343+ rodata PT_LOAD FLAGS(4); /* R__ */
25344+#endif
25345 data PT_LOAD FLAGS(6); /* RW_ */
25346-#ifdef CONFIG_X86_64
25347+ init.begin PT_LOAD FLAGS(6); /* RW_ */
25348 #ifdef CONFIG_SMP
25349 percpu PT_LOAD FLAGS(6); /* RW_ */
25350 #endif
25351+ text.init PT_LOAD FLAGS(5); /* R_E */
25352+ text.exit PT_LOAD FLAGS(5); /* R_E */
25353 init PT_LOAD FLAGS(7); /* RWE */
25354-#endif
25355 note PT_NOTE FLAGS(0); /* ___ */
25356 }
25357
25358 SECTIONS
25359 {
25360 #ifdef CONFIG_X86_32
25361- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
25362- phys_startup_32 = startup_32 - LOAD_OFFSET;
25363+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
25364 #else
25365- . = __START_KERNEL;
25366- phys_startup_64 = startup_64 - LOAD_OFFSET;
25367+ . = __START_KERNEL;
25368 #endif
25369
25370 /* Text and read-only data */
25371- .text : AT(ADDR(.text) - LOAD_OFFSET) {
25372- _text = .;
25373+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
25374 /* bootstrapping code */
25375+#ifdef CONFIG_X86_32
25376+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
25377+#else
25378+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
25379+#endif
25380+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
25381+ _text = .;
25382 HEAD_TEXT
25383 . = ALIGN(8);
25384 _stext = .;
25385@@ -104,13 +124,48 @@ SECTIONS
25386 IRQENTRY_TEXT
25387 *(.fixup)
25388 *(.gnu.warning)
25389- /* End of text section */
25390- _etext = .;
25391 } :text = 0x9090
25392
25393- NOTES :text :note
25394+ . += __KERNEL_TEXT_OFFSET;
25395
25396- EXCEPTION_TABLE(16) :text = 0x9090
25397+#ifdef CONFIG_X86_32
25398+ . = ALIGN(PAGE_SIZE);
25399+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
25400+
25401+#ifdef CONFIG_PAX_KERNEXEC
25402+ MODULES_EXEC_VADDR = .;
25403+ BYTE(0)
25404+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
25405+ . = ALIGN(HPAGE_SIZE) - 1;
25406+ MODULES_EXEC_END = .;
25407+#endif
25408+
25409+ } :module
25410+#endif
25411+
25412+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
25413+ /* End of text section */
25414+ BYTE(0)
25415+ _etext = . - __KERNEL_TEXT_OFFSET;
25416+ }
25417+
25418+#ifdef CONFIG_X86_32
25419+ . = ALIGN(PAGE_SIZE);
25420+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
25421+ *(.idt)
25422+ . = ALIGN(PAGE_SIZE);
25423+ *(.empty_zero_page)
25424+ *(.initial_pg_fixmap)
25425+ *(.initial_pg_pmd)
25426+ *(.initial_page_table)
25427+ *(.swapper_pg_dir)
25428+ } :rodata
25429+#endif
25430+
25431+ . = ALIGN(PAGE_SIZE);
25432+ NOTES :rodata :note
25433+
25434+ EXCEPTION_TABLE(16) :rodata
25435
25436 #if defined(CONFIG_DEBUG_RODATA)
25437 /* .text should occupy whole number of pages */
25438@@ -122,16 +177,20 @@ SECTIONS
25439
25440 /* Data */
25441 .data : AT(ADDR(.data) - LOAD_OFFSET) {
25442+
25443+#ifdef CONFIG_PAX_KERNEXEC
25444+ . = ALIGN(HPAGE_SIZE);
25445+#else
25446+ . = ALIGN(PAGE_SIZE);
25447+#endif
25448+
25449 /* Start of data section */
25450 _sdata = .;
25451
25452 /* init_task */
25453 INIT_TASK_DATA(THREAD_SIZE)
25454
25455-#ifdef CONFIG_X86_32
25456- /* 32 bit has nosave before _edata */
25457 NOSAVE_DATA
25458-#endif
25459
25460 PAGE_ALIGNED_DATA(PAGE_SIZE)
25461
25462@@ -172,12 +231,19 @@ SECTIONS
25463 #endif /* CONFIG_X86_64 */
25464
25465 /* Init code and data - will be freed after init */
25466- . = ALIGN(PAGE_SIZE);
25467 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
25468+ BYTE(0)
25469+
25470+#ifdef CONFIG_PAX_KERNEXEC
25471+ . = ALIGN(HPAGE_SIZE);
25472+#else
25473+ . = ALIGN(PAGE_SIZE);
25474+#endif
25475+
25476 __init_begin = .; /* paired with __init_end */
25477- }
25478+ } :init.begin
25479
25480-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
25481+#ifdef CONFIG_SMP
25482 /*
25483 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
25484 * output PHDR, so the next output section - .init.text - should
25485@@ -186,12 +252,27 @@ SECTIONS
25486 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
25487 #endif
25488
25489- INIT_TEXT_SECTION(PAGE_SIZE)
25490-#ifdef CONFIG_X86_64
25491- :init
25492-#endif
25493+ . = ALIGN(PAGE_SIZE);
25494+ init_begin = .;
25495+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
25496+ VMLINUX_SYMBOL(_sinittext) = .;
25497+ INIT_TEXT
25498+ VMLINUX_SYMBOL(_einittext) = .;
25499+ . = ALIGN(PAGE_SIZE);
25500+ } :text.init
25501
25502- INIT_DATA_SECTION(16)
25503+ /*
25504+ * .exit.text is discard at runtime, not link time, to deal with
25505+ * references from .altinstructions and .eh_frame
25506+ */
25507+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
25508+ EXIT_TEXT
25509+ . = ALIGN(16);
25510+ } :text.exit
25511+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
25512+
25513+ . = ALIGN(PAGE_SIZE);
25514+ INIT_DATA_SECTION(16) :init
25515
25516 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
25517 __x86_cpu_dev_start = .;
25518@@ -253,19 +334,12 @@ SECTIONS
25519 }
25520
25521 . = ALIGN(8);
25522- /*
25523- * .exit.text is discard at runtime, not link time, to deal with
25524- * references from .altinstructions and .eh_frame
25525- */
25526- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
25527- EXIT_TEXT
25528- }
25529
25530 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
25531 EXIT_DATA
25532 }
25533
25534-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
25535+#ifndef CONFIG_SMP
25536 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
25537 #endif
25538
25539@@ -284,16 +358,10 @@ SECTIONS
25540 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
25541 __smp_locks = .;
25542 *(.smp_locks)
25543- . = ALIGN(PAGE_SIZE);
25544 __smp_locks_end = .;
25545+ . = ALIGN(PAGE_SIZE);
25546 }
25547
25548-#ifdef CONFIG_X86_64
25549- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
25550- NOSAVE_DATA
25551- }
25552-#endif
25553-
25554 /* BSS */
25555 . = ALIGN(PAGE_SIZE);
25556 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
25557@@ -309,6 +377,7 @@ SECTIONS
25558 __brk_base = .;
25559 . += 64 * 1024; /* 64k alignment slop space */
25560 *(.brk_reservation) /* areas brk users have reserved */
25561+ . = ALIGN(HPAGE_SIZE);
25562 __brk_limit = .;
25563 }
25564
25565@@ -335,13 +404,12 @@ SECTIONS
25566 * for the boot processor.
25567 */
25568 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
25569-INIT_PER_CPU(gdt_page);
25570 INIT_PER_CPU(irq_stack_union);
25571
25572 /*
25573 * Build-time check on the image size:
25574 */
25575-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
25576+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
25577 "kernel image bigger than KERNEL_IMAGE_SIZE");
25578
25579 #ifdef CONFIG_SMP
25580diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
25581index 9a907a6..f83f921 100644
25582--- a/arch/x86/kernel/vsyscall_64.c
25583+++ b/arch/x86/kernel/vsyscall_64.c
25584@@ -56,15 +56,13 @@
25585 DEFINE_VVAR(int, vgetcpu_mode);
25586 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
25587
25588-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
25589+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
25590
25591 static int __init vsyscall_setup(char *str)
25592 {
25593 if (str) {
25594 if (!strcmp("emulate", str))
25595 vsyscall_mode = EMULATE;
25596- else if (!strcmp("native", str))
25597- vsyscall_mode = NATIVE;
25598 else if (!strcmp("none", str))
25599 vsyscall_mode = NONE;
25600 else
25601@@ -323,8 +321,7 @@ do_ret:
25602 return true;
25603
25604 sigsegv:
25605- force_sig(SIGSEGV, current);
25606- return true;
25607+ do_group_exit(SIGKILL);
25608 }
25609
25610 /*
25611@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
25612 extern char __vvar_page;
25613 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
25614
25615- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
25616- vsyscall_mode == NATIVE
25617- ? PAGE_KERNEL_VSYSCALL
25618- : PAGE_KERNEL_VVAR);
25619+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
25620 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
25621 (unsigned long)VSYSCALL_START);
25622
25623diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
25624index b014d94..6d6ca7b 100644
25625--- a/arch/x86/kernel/x8664_ksyms_64.c
25626+++ b/arch/x86/kernel/x8664_ksyms_64.c
25627@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
25628 EXPORT_SYMBOL(copy_user_generic_unrolled);
25629 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
25630 EXPORT_SYMBOL(__copy_user_nocache);
25631-EXPORT_SYMBOL(_copy_from_user);
25632-EXPORT_SYMBOL(_copy_to_user);
25633
25634 EXPORT_SYMBOL(copy_page);
25635 EXPORT_SYMBOL(clear_page);
25636diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
25637index 45a14db..075bb9b 100644
25638--- a/arch/x86/kernel/x86_init.c
25639+++ b/arch/x86/kernel/x86_init.c
25640@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
25641 },
25642 };
25643
25644-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
25645+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
25646 .early_percpu_clock_init = x86_init_noop,
25647 .setup_percpu_clockev = setup_secondary_APIC_clock,
25648 };
25649@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
25650 static void default_nmi_init(void) { };
25651 static int default_i8042_detect(void) { return 1; };
25652
25653-struct x86_platform_ops x86_platform = {
25654+struct x86_platform_ops x86_platform __read_only = {
25655 .calibrate_tsc = native_calibrate_tsc,
25656 .get_wallclock = mach_get_cmos_time,
25657 .set_wallclock = mach_set_rtc_mmss,
25658@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
25659 };
25660
25661 EXPORT_SYMBOL_GPL(x86_platform);
25662-struct x86_msi_ops x86_msi = {
25663+struct x86_msi_ops x86_msi __read_only = {
25664 .setup_msi_irqs = native_setup_msi_irqs,
25665 .compose_msi_msg = native_compose_msi_msg,
25666 .teardown_msi_irq = native_teardown_msi_irq,
25667@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
25668 .setup_hpet_msi = default_setup_hpet_msi,
25669 };
25670
25671-struct x86_io_apic_ops x86_io_apic_ops = {
25672+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
25673 .init = native_io_apic_init_mappings,
25674 .read = native_io_apic_read,
25675 .write = native_io_apic_write,
25676diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
25677index ada87a3..afea76d 100644
25678--- a/arch/x86/kernel/xsave.c
25679+++ b/arch/x86/kernel/xsave.c
25680@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
25681 {
25682 int err;
25683
25684+ buf = (struct xsave_struct __user *)____m(buf);
25685 if (use_xsave())
25686 err = xsave_user(buf);
25687 else if (use_fxsr())
25688@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
25689 */
25690 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
25691 {
25692+ buf = (void __user *)____m(buf);
25693 if (use_xsave()) {
25694 if ((unsigned long)buf % 64 || fx_only) {
25695 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
25696diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
25697index a20ecb5..d0e2194 100644
25698--- a/arch/x86/kvm/cpuid.c
25699+++ b/arch/x86/kvm/cpuid.c
25700@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
25701 struct kvm_cpuid2 *cpuid,
25702 struct kvm_cpuid_entry2 __user *entries)
25703 {
25704- int r;
25705+ int r, i;
25706
25707 r = -E2BIG;
25708 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
25709 goto out;
25710 r = -EFAULT;
25711- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
25712- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
25713+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
25714 goto out;
25715+ for (i = 0; i < cpuid->nent; ++i) {
25716+ struct kvm_cpuid_entry2 cpuid_entry;
25717+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
25718+ goto out;
25719+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
25720+ }
25721 vcpu->arch.cpuid_nent = cpuid->nent;
25722 kvm_apic_set_version(vcpu);
25723 kvm_x86_ops->cpuid_update(vcpu);
25724@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
25725 struct kvm_cpuid2 *cpuid,
25726 struct kvm_cpuid_entry2 __user *entries)
25727 {
25728- int r;
25729+ int r, i;
25730
25731 r = -E2BIG;
25732 if (cpuid->nent < vcpu->arch.cpuid_nent)
25733 goto out;
25734 r = -EFAULT;
25735- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
25736- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
25737+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
25738 goto out;
25739+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
25740+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
25741+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
25742+ goto out;
25743+ }
25744 return 0;
25745
25746 out:
25747diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
25748index 5953dce..f11a7d2 100644
25749--- a/arch/x86/kvm/emulate.c
25750+++ b/arch/x86/kvm/emulate.c
25751@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
25752
25753 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
25754 do { \
25755+ unsigned long _tmp; \
25756 __asm__ __volatile__ ( \
25757 _PRE_EFLAGS("0", "4", "2") \
25758 _op _suffix " %"_x"3,%1; " \
25759@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
25760 /* Raw emulation: instruction has two explicit operands. */
25761 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
25762 do { \
25763- unsigned long _tmp; \
25764- \
25765 switch ((ctxt)->dst.bytes) { \
25766 case 2: \
25767 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
25768@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
25769
25770 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
25771 do { \
25772- unsigned long _tmp; \
25773 switch ((ctxt)->dst.bytes) { \
25774 case 1: \
25775 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
25776diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
25777index 0eee2c8..94a32c3 100644
25778--- a/arch/x86/kvm/lapic.c
25779+++ b/arch/x86/kvm/lapic.c
25780@@ -55,7 +55,7 @@
25781 #define APIC_BUS_CYCLE_NS 1
25782
25783 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
25784-#define apic_debug(fmt, arg...)
25785+#define apic_debug(fmt, arg...) do {} while (0)
25786
25787 #define APIC_LVT_NUM 6
25788 /* 14 is the version for Xeon and Pentium 8.4.8*/
25789diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
25790index da20860..d19fdf5 100644
25791--- a/arch/x86/kvm/paging_tmpl.h
25792+++ b/arch/x86/kvm/paging_tmpl.h
25793@@ -208,7 +208,7 @@ retry_walk:
25794 if (unlikely(kvm_is_error_hva(host_addr)))
25795 goto error;
25796
25797- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
25798+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
25799 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
25800 goto error;
25801 walker->ptep_user[walker->level - 1] = ptep_user;
25802diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
25803index a14a6ea..dc86cf0 100644
25804--- a/arch/x86/kvm/svm.c
25805+++ b/arch/x86/kvm/svm.c
25806@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
25807 int cpu = raw_smp_processor_id();
25808
25809 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
25810+
25811+ pax_open_kernel();
25812 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
25813+ pax_close_kernel();
25814+
25815 load_TR_desc();
25816 }
25817
25818@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
25819 #endif
25820 #endif
25821
25822+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25823+ __set_fs(current_thread_info()->addr_limit);
25824+#endif
25825+
25826 reload_tss(vcpu);
25827
25828 local_irq_disable();
25829diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
25830index 5402c94..c3bdeee 100644
25831--- a/arch/x86/kvm/vmx.c
25832+++ b/arch/x86/kvm/vmx.c
25833@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
25834 #endif
25835 }
25836
25837-static void vmcs_clear_bits(unsigned long field, u32 mask)
25838+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
25839 {
25840 vmcs_writel(field, vmcs_readl(field) & ~mask);
25841 }
25842
25843-static void vmcs_set_bits(unsigned long field, u32 mask)
25844+static void vmcs_set_bits(unsigned long field, unsigned long mask)
25845 {
25846 vmcs_writel(field, vmcs_readl(field) | mask);
25847 }
25848@@ -1517,7 +1517,11 @@ static void reload_tss(void)
25849 struct desc_struct *descs;
25850
25851 descs = (void *)gdt->address;
25852+
25853+ pax_open_kernel();
25854 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
25855+ pax_close_kernel();
25856+
25857 load_TR_desc();
25858 }
25859
25860@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
25861 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
25862 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
25863
25864+#ifdef CONFIG_PAX_PER_CPU_PGD
25865+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
25866+#endif
25867+
25868 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
25869 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
25870 vmx->loaded_vmcs->cpu = cpu;
25871@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
25872 if (!cpu_has_vmx_flexpriority())
25873 flexpriority_enabled = 0;
25874
25875- if (!cpu_has_vmx_tpr_shadow())
25876- kvm_x86_ops->update_cr8_intercept = NULL;
25877+ if (!cpu_has_vmx_tpr_shadow()) {
25878+ pax_open_kernel();
25879+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
25880+ pax_close_kernel();
25881+ }
25882
25883 if (enable_ept && !cpu_has_vmx_ept_2m_page())
25884 kvm_disable_largepages();
25885@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
25886 if (!cpu_has_vmx_apicv())
25887 enable_apicv = 0;
25888
25889+ pax_open_kernel();
25890 if (enable_apicv)
25891- kvm_x86_ops->update_cr8_intercept = NULL;
25892+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
25893 else {
25894- kvm_x86_ops->hwapic_irr_update = NULL;
25895- kvm_x86_ops->deliver_posted_interrupt = NULL;
25896- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
25897+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
25898+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
25899+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
25900 }
25901+ pax_close_kernel();
25902
25903 if (nested)
25904 nested_vmx_setup_ctls_msrs();
25905@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
25906
25907 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
25908 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
25909+
25910+#ifndef CONFIG_PAX_PER_CPU_PGD
25911 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
25912+#endif
25913
25914 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
25915 #ifdef CONFIG_X86_64
25916@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
25917 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
25918 vmx->host_idt_base = dt.address;
25919
25920- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
25921+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
25922
25923 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
25924 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
25925@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25926 "jmp 2f \n\t"
25927 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
25928 "2: "
25929+
25930+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25931+ "ljmp %[cs],$3f\n\t"
25932+ "3: "
25933+#endif
25934+
25935 /* Save guest registers, load host registers, keep flags */
25936 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
25937 "pop %0 \n\t"
25938@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25939 #endif
25940 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
25941 [wordsize]"i"(sizeof(ulong))
25942+
25943+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25944+ ,[cs]"i"(__KERNEL_CS)
25945+#endif
25946+
25947 : "cc", "memory"
25948 #ifdef CONFIG_X86_64
25949 , "rax", "rbx", "rdi", "rsi"
25950@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25951 if (debugctlmsr)
25952 update_debugctlmsr(debugctlmsr);
25953
25954-#ifndef CONFIG_X86_64
25955+#ifdef CONFIG_X86_32
25956 /*
25957 * The sysexit path does not restore ds/es, so we must set them to
25958 * a reasonable value ourselves.
25959@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25960 * may be executed in interrupt context, which saves and restore segments
25961 * around it, nullifying its effect.
25962 */
25963- loadsegment(ds, __USER_DS);
25964- loadsegment(es, __USER_DS);
25965+ loadsegment(ds, __KERNEL_DS);
25966+ loadsegment(es, __KERNEL_DS);
25967+ loadsegment(ss, __KERNEL_DS);
25968+
25969+#ifdef CONFIG_PAX_KERNEXEC
25970+ loadsegment(fs, __KERNEL_PERCPU);
25971+#endif
25972+
25973+#ifdef CONFIG_PAX_MEMORY_UDEREF
25974+ __set_fs(current_thread_info()->addr_limit);
25975+#endif
25976+
25977 #endif
25978
25979 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
25980diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
25981index e8ba99c..ee9d7d9 100644
25982--- a/arch/x86/kvm/x86.c
25983+++ b/arch/x86/kvm/x86.c
25984@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
25985 {
25986 struct kvm *kvm = vcpu->kvm;
25987 int lm = is_long_mode(vcpu);
25988- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
25989- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
25990+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
25991+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
25992 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
25993 : kvm->arch.xen_hvm_config.blob_size_32;
25994 u32 page_num = data & ~PAGE_MASK;
25995@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
25996 if (n < msr_list.nmsrs)
25997 goto out;
25998 r = -EFAULT;
25999+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
26000+ goto out;
26001 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
26002 num_msrs_to_save * sizeof(u32)))
26003 goto out;
26004@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
26005 };
26006 #endif
26007
26008-int kvm_arch_init(void *opaque)
26009+int kvm_arch_init(const void *opaque)
26010 {
26011 int r;
26012 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
26013diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
26014index 7114c63..a1018fc 100644
26015--- a/arch/x86/lguest/boot.c
26016+++ b/arch/x86/lguest/boot.c
26017@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
26018 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
26019 * Launcher to reboot us.
26020 */
26021-static void lguest_restart(char *reason)
26022+static __noreturn void lguest_restart(char *reason)
26023 {
26024 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
26025+ BUG();
26026 }
26027
26028 /*G:050
26029diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
26030index 00933d5..3a64af9 100644
26031--- a/arch/x86/lib/atomic64_386_32.S
26032+++ b/arch/x86/lib/atomic64_386_32.S
26033@@ -48,6 +48,10 @@ BEGIN(read)
26034 movl (v), %eax
26035 movl 4(v), %edx
26036 RET_ENDP
26037+BEGIN(read_unchecked)
26038+ movl (v), %eax
26039+ movl 4(v), %edx
26040+RET_ENDP
26041 #undef v
26042
26043 #define v %esi
26044@@ -55,6 +59,10 @@ BEGIN(set)
26045 movl %ebx, (v)
26046 movl %ecx, 4(v)
26047 RET_ENDP
26048+BEGIN(set_unchecked)
26049+ movl %ebx, (v)
26050+ movl %ecx, 4(v)
26051+RET_ENDP
26052 #undef v
26053
26054 #define v %esi
26055@@ -70,6 +78,20 @@ RET_ENDP
26056 BEGIN(add)
26057 addl %eax, (v)
26058 adcl %edx, 4(v)
26059+
26060+#ifdef CONFIG_PAX_REFCOUNT
26061+ jno 0f
26062+ subl %eax, (v)
26063+ sbbl %edx, 4(v)
26064+ int $4
26065+0:
26066+ _ASM_EXTABLE(0b, 0b)
26067+#endif
26068+
26069+RET_ENDP
26070+BEGIN(add_unchecked)
26071+ addl %eax, (v)
26072+ adcl %edx, 4(v)
26073 RET_ENDP
26074 #undef v
26075
26076@@ -77,6 +99,24 @@ RET_ENDP
26077 BEGIN(add_return)
26078 addl (v), %eax
26079 adcl 4(v), %edx
26080+
26081+#ifdef CONFIG_PAX_REFCOUNT
26082+ into
26083+1234:
26084+ _ASM_EXTABLE(1234b, 2f)
26085+#endif
26086+
26087+ movl %eax, (v)
26088+ movl %edx, 4(v)
26089+
26090+#ifdef CONFIG_PAX_REFCOUNT
26091+2:
26092+#endif
26093+
26094+RET_ENDP
26095+BEGIN(add_return_unchecked)
26096+ addl (v), %eax
26097+ adcl 4(v), %edx
26098 movl %eax, (v)
26099 movl %edx, 4(v)
26100 RET_ENDP
26101@@ -86,6 +126,20 @@ RET_ENDP
26102 BEGIN(sub)
26103 subl %eax, (v)
26104 sbbl %edx, 4(v)
26105+
26106+#ifdef CONFIG_PAX_REFCOUNT
26107+ jno 0f
26108+ addl %eax, (v)
26109+ adcl %edx, 4(v)
26110+ int $4
26111+0:
26112+ _ASM_EXTABLE(0b, 0b)
26113+#endif
26114+
26115+RET_ENDP
26116+BEGIN(sub_unchecked)
26117+ subl %eax, (v)
26118+ sbbl %edx, 4(v)
26119 RET_ENDP
26120 #undef v
26121
26122@@ -96,6 +150,27 @@ BEGIN(sub_return)
26123 sbbl $0, %edx
26124 addl (v), %eax
26125 adcl 4(v), %edx
26126+
26127+#ifdef CONFIG_PAX_REFCOUNT
26128+ into
26129+1234:
26130+ _ASM_EXTABLE(1234b, 2f)
26131+#endif
26132+
26133+ movl %eax, (v)
26134+ movl %edx, 4(v)
26135+
26136+#ifdef CONFIG_PAX_REFCOUNT
26137+2:
26138+#endif
26139+
26140+RET_ENDP
26141+BEGIN(sub_return_unchecked)
26142+ negl %edx
26143+ negl %eax
26144+ sbbl $0, %edx
26145+ addl (v), %eax
26146+ adcl 4(v), %edx
26147 movl %eax, (v)
26148 movl %edx, 4(v)
26149 RET_ENDP
26150@@ -105,6 +180,20 @@ RET_ENDP
26151 BEGIN(inc)
26152 addl $1, (v)
26153 adcl $0, 4(v)
26154+
26155+#ifdef CONFIG_PAX_REFCOUNT
26156+ jno 0f
26157+ subl $1, (v)
26158+ sbbl $0, 4(v)
26159+ int $4
26160+0:
26161+ _ASM_EXTABLE(0b, 0b)
26162+#endif
26163+
26164+RET_ENDP
26165+BEGIN(inc_unchecked)
26166+ addl $1, (v)
26167+ adcl $0, 4(v)
26168 RET_ENDP
26169 #undef v
26170
26171@@ -114,6 +203,26 @@ BEGIN(inc_return)
26172 movl 4(v), %edx
26173 addl $1, %eax
26174 adcl $0, %edx
26175+
26176+#ifdef CONFIG_PAX_REFCOUNT
26177+ into
26178+1234:
26179+ _ASM_EXTABLE(1234b, 2f)
26180+#endif
26181+
26182+ movl %eax, (v)
26183+ movl %edx, 4(v)
26184+
26185+#ifdef CONFIG_PAX_REFCOUNT
26186+2:
26187+#endif
26188+
26189+RET_ENDP
26190+BEGIN(inc_return_unchecked)
26191+ movl (v), %eax
26192+ movl 4(v), %edx
26193+ addl $1, %eax
26194+ adcl $0, %edx
26195 movl %eax, (v)
26196 movl %edx, 4(v)
26197 RET_ENDP
26198@@ -123,6 +232,20 @@ RET_ENDP
26199 BEGIN(dec)
26200 subl $1, (v)
26201 sbbl $0, 4(v)
26202+
26203+#ifdef CONFIG_PAX_REFCOUNT
26204+ jno 0f
26205+ addl $1, (v)
26206+ adcl $0, 4(v)
26207+ int $4
26208+0:
26209+ _ASM_EXTABLE(0b, 0b)
26210+#endif
26211+
26212+RET_ENDP
26213+BEGIN(dec_unchecked)
26214+ subl $1, (v)
26215+ sbbl $0, 4(v)
26216 RET_ENDP
26217 #undef v
26218
26219@@ -132,6 +255,26 @@ BEGIN(dec_return)
26220 movl 4(v), %edx
26221 subl $1, %eax
26222 sbbl $0, %edx
26223+
26224+#ifdef CONFIG_PAX_REFCOUNT
26225+ into
26226+1234:
26227+ _ASM_EXTABLE(1234b, 2f)
26228+#endif
26229+
26230+ movl %eax, (v)
26231+ movl %edx, 4(v)
26232+
26233+#ifdef CONFIG_PAX_REFCOUNT
26234+2:
26235+#endif
26236+
26237+RET_ENDP
26238+BEGIN(dec_return_unchecked)
26239+ movl (v), %eax
26240+ movl 4(v), %edx
26241+ subl $1, %eax
26242+ sbbl $0, %edx
26243 movl %eax, (v)
26244 movl %edx, 4(v)
26245 RET_ENDP
26246@@ -143,6 +286,13 @@ BEGIN(add_unless)
26247 adcl %edx, %edi
26248 addl (v), %eax
26249 adcl 4(v), %edx
26250+
26251+#ifdef CONFIG_PAX_REFCOUNT
26252+ into
26253+1234:
26254+ _ASM_EXTABLE(1234b, 2f)
26255+#endif
26256+
26257 cmpl %eax, %ecx
26258 je 3f
26259 1:
26260@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
26261 1:
26262 addl $1, %eax
26263 adcl $0, %edx
26264+
26265+#ifdef CONFIG_PAX_REFCOUNT
26266+ into
26267+1234:
26268+ _ASM_EXTABLE(1234b, 2f)
26269+#endif
26270+
26271 movl %eax, (v)
26272 movl %edx, 4(v)
26273 movl $1, %eax
26274@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
26275 movl 4(v), %edx
26276 subl $1, %eax
26277 sbbl $0, %edx
26278+
26279+#ifdef CONFIG_PAX_REFCOUNT
26280+ into
26281+1234:
26282+ _ASM_EXTABLE(1234b, 1f)
26283+#endif
26284+
26285 js 1f
26286 movl %eax, (v)
26287 movl %edx, 4(v)
26288diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
26289index f5cc9eb..51fa319 100644
26290--- a/arch/x86/lib/atomic64_cx8_32.S
26291+++ b/arch/x86/lib/atomic64_cx8_32.S
26292@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
26293 CFI_STARTPROC
26294
26295 read64 %ecx
26296+ pax_force_retaddr
26297 ret
26298 CFI_ENDPROC
26299 ENDPROC(atomic64_read_cx8)
26300
26301+ENTRY(atomic64_read_unchecked_cx8)
26302+ CFI_STARTPROC
26303+
26304+ read64 %ecx
26305+ pax_force_retaddr
26306+ ret
26307+ CFI_ENDPROC
26308+ENDPROC(atomic64_read_unchecked_cx8)
26309+
26310 ENTRY(atomic64_set_cx8)
26311 CFI_STARTPROC
26312
26313@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
26314 cmpxchg8b (%esi)
26315 jne 1b
26316
26317+ pax_force_retaddr
26318 ret
26319 CFI_ENDPROC
26320 ENDPROC(atomic64_set_cx8)
26321
26322+ENTRY(atomic64_set_unchecked_cx8)
26323+ CFI_STARTPROC
26324+
26325+1:
26326+/* we don't need LOCK_PREFIX since aligned 64-bit writes
26327+ * are atomic on 586 and newer */
26328+ cmpxchg8b (%esi)
26329+ jne 1b
26330+
26331+ pax_force_retaddr
26332+ ret
26333+ CFI_ENDPROC
26334+ENDPROC(atomic64_set_unchecked_cx8)
26335+
26336 ENTRY(atomic64_xchg_cx8)
26337 CFI_STARTPROC
26338
26339@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
26340 cmpxchg8b (%esi)
26341 jne 1b
26342
26343+ pax_force_retaddr
26344 ret
26345 CFI_ENDPROC
26346 ENDPROC(atomic64_xchg_cx8)
26347
26348-.macro addsub_return func ins insc
26349-ENTRY(atomic64_\func\()_return_cx8)
26350+.macro addsub_return func ins insc unchecked=""
26351+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
26352 CFI_STARTPROC
26353 SAVE ebp
26354 SAVE ebx
26355@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
26356 movl %edx, %ecx
26357 \ins\()l %esi, %ebx
26358 \insc\()l %edi, %ecx
26359+
26360+.ifb \unchecked
26361+#ifdef CONFIG_PAX_REFCOUNT
26362+ into
26363+2:
26364+ _ASM_EXTABLE(2b, 3f)
26365+#endif
26366+.endif
26367+
26368 LOCK_PREFIX
26369 cmpxchg8b (%ebp)
26370 jne 1b
26371-
26372-10:
26373 movl %ebx, %eax
26374 movl %ecx, %edx
26375+
26376+.ifb \unchecked
26377+#ifdef CONFIG_PAX_REFCOUNT
26378+3:
26379+#endif
26380+.endif
26381+
26382 RESTORE edi
26383 RESTORE esi
26384 RESTORE ebx
26385 RESTORE ebp
26386+ pax_force_retaddr
26387 ret
26388 CFI_ENDPROC
26389-ENDPROC(atomic64_\func\()_return_cx8)
26390+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
26391 .endm
26392
26393 addsub_return add add adc
26394 addsub_return sub sub sbb
26395+addsub_return add add adc _unchecked
26396+addsub_return sub sub sbb _unchecked
26397
26398-.macro incdec_return func ins insc
26399-ENTRY(atomic64_\func\()_return_cx8)
26400+.macro incdec_return func ins insc unchecked=""
26401+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
26402 CFI_STARTPROC
26403 SAVE ebx
26404
26405@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
26406 movl %edx, %ecx
26407 \ins\()l $1, %ebx
26408 \insc\()l $0, %ecx
26409+
26410+.ifb \unchecked
26411+#ifdef CONFIG_PAX_REFCOUNT
26412+ into
26413+2:
26414+ _ASM_EXTABLE(2b, 3f)
26415+#endif
26416+.endif
26417+
26418 LOCK_PREFIX
26419 cmpxchg8b (%esi)
26420 jne 1b
26421
26422-10:
26423 movl %ebx, %eax
26424 movl %ecx, %edx
26425+
26426+.ifb \unchecked
26427+#ifdef CONFIG_PAX_REFCOUNT
26428+3:
26429+#endif
26430+.endif
26431+
26432 RESTORE ebx
26433+ pax_force_retaddr
26434 ret
26435 CFI_ENDPROC
26436-ENDPROC(atomic64_\func\()_return_cx8)
26437+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
26438 .endm
26439
26440 incdec_return inc add adc
26441 incdec_return dec sub sbb
26442+incdec_return inc add adc _unchecked
26443+incdec_return dec sub sbb _unchecked
26444
26445 ENTRY(atomic64_dec_if_positive_cx8)
26446 CFI_STARTPROC
26447@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
26448 movl %edx, %ecx
26449 subl $1, %ebx
26450 sbb $0, %ecx
26451+
26452+#ifdef CONFIG_PAX_REFCOUNT
26453+ into
26454+1234:
26455+ _ASM_EXTABLE(1234b, 2f)
26456+#endif
26457+
26458 js 2f
26459 LOCK_PREFIX
26460 cmpxchg8b (%esi)
26461@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
26462 movl %ebx, %eax
26463 movl %ecx, %edx
26464 RESTORE ebx
26465+ pax_force_retaddr
26466 ret
26467 CFI_ENDPROC
26468 ENDPROC(atomic64_dec_if_positive_cx8)
26469@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
26470 movl %edx, %ecx
26471 addl %ebp, %ebx
26472 adcl %edi, %ecx
26473+
26474+#ifdef CONFIG_PAX_REFCOUNT
26475+ into
26476+1234:
26477+ _ASM_EXTABLE(1234b, 3f)
26478+#endif
26479+
26480 LOCK_PREFIX
26481 cmpxchg8b (%esi)
26482 jne 1b
26483@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
26484 CFI_ADJUST_CFA_OFFSET -8
26485 RESTORE ebx
26486 RESTORE ebp
26487+ pax_force_retaddr
26488 ret
26489 4:
26490 cmpl %edx, 4(%esp)
26491@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
26492 xorl %ecx, %ecx
26493 addl $1, %ebx
26494 adcl %edx, %ecx
26495+
26496+#ifdef CONFIG_PAX_REFCOUNT
26497+ into
26498+1234:
26499+ _ASM_EXTABLE(1234b, 3f)
26500+#endif
26501+
26502 LOCK_PREFIX
26503 cmpxchg8b (%esi)
26504 jne 1b
26505@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
26506 movl $1, %eax
26507 3:
26508 RESTORE ebx
26509+ pax_force_retaddr
26510 ret
26511 CFI_ENDPROC
26512 ENDPROC(atomic64_inc_not_zero_cx8)
26513diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
26514index e78b8ee..7e173a8 100644
26515--- a/arch/x86/lib/checksum_32.S
26516+++ b/arch/x86/lib/checksum_32.S
26517@@ -29,7 +29,8 @@
26518 #include <asm/dwarf2.h>
26519 #include <asm/errno.h>
26520 #include <asm/asm.h>
26521-
26522+#include <asm/segment.h>
26523+
26524 /*
26525 * computes a partial checksum, e.g. for TCP/UDP fragments
26526 */
26527@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
26528
26529 #define ARGBASE 16
26530 #define FP 12
26531-
26532-ENTRY(csum_partial_copy_generic)
26533+
26534+ENTRY(csum_partial_copy_generic_to_user)
26535 CFI_STARTPROC
26536+
26537+#ifdef CONFIG_PAX_MEMORY_UDEREF
26538+ pushl_cfi %gs
26539+ popl_cfi %es
26540+ jmp csum_partial_copy_generic
26541+#endif
26542+
26543+ENTRY(csum_partial_copy_generic_from_user)
26544+
26545+#ifdef CONFIG_PAX_MEMORY_UDEREF
26546+ pushl_cfi %gs
26547+ popl_cfi %ds
26548+#endif
26549+
26550+ENTRY(csum_partial_copy_generic)
26551 subl $4,%esp
26552 CFI_ADJUST_CFA_OFFSET 4
26553 pushl_cfi %edi
26554@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
26555 jmp 4f
26556 SRC(1: movw (%esi), %bx )
26557 addl $2, %esi
26558-DST( movw %bx, (%edi) )
26559+DST( movw %bx, %es:(%edi) )
26560 addl $2, %edi
26561 addw %bx, %ax
26562 adcl $0, %eax
26563@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
26564 SRC(1: movl (%esi), %ebx )
26565 SRC( movl 4(%esi), %edx )
26566 adcl %ebx, %eax
26567-DST( movl %ebx, (%edi) )
26568+DST( movl %ebx, %es:(%edi) )
26569 adcl %edx, %eax
26570-DST( movl %edx, 4(%edi) )
26571+DST( movl %edx, %es:4(%edi) )
26572
26573 SRC( movl 8(%esi), %ebx )
26574 SRC( movl 12(%esi), %edx )
26575 adcl %ebx, %eax
26576-DST( movl %ebx, 8(%edi) )
26577+DST( movl %ebx, %es:8(%edi) )
26578 adcl %edx, %eax
26579-DST( movl %edx, 12(%edi) )
26580+DST( movl %edx, %es:12(%edi) )
26581
26582 SRC( movl 16(%esi), %ebx )
26583 SRC( movl 20(%esi), %edx )
26584 adcl %ebx, %eax
26585-DST( movl %ebx, 16(%edi) )
26586+DST( movl %ebx, %es:16(%edi) )
26587 adcl %edx, %eax
26588-DST( movl %edx, 20(%edi) )
26589+DST( movl %edx, %es:20(%edi) )
26590
26591 SRC( movl 24(%esi), %ebx )
26592 SRC( movl 28(%esi), %edx )
26593 adcl %ebx, %eax
26594-DST( movl %ebx, 24(%edi) )
26595+DST( movl %ebx, %es:24(%edi) )
26596 adcl %edx, %eax
26597-DST( movl %edx, 28(%edi) )
26598+DST( movl %edx, %es:28(%edi) )
26599
26600 lea 32(%esi), %esi
26601 lea 32(%edi), %edi
26602@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
26603 shrl $2, %edx # This clears CF
26604 SRC(3: movl (%esi), %ebx )
26605 adcl %ebx, %eax
26606-DST( movl %ebx, (%edi) )
26607+DST( movl %ebx, %es:(%edi) )
26608 lea 4(%esi), %esi
26609 lea 4(%edi), %edi
26610 dec %edx
26611@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
26612 jb 5f
26613 SRC( movw (%esi), %cx )
26614 leal 2(%esi), %esi
26615-DST( movw %cx, (%edi) )
26616+DST( movw %cx, %es:(%edi) )
26617 leal 2(%edi), %edi
26618 je 6f
26619 shll $16,%ecx
26620 SRC(5: movb (%esi), %cl )
26621-DST( movb %cl, (%edi) )
26622+DST( movb %cl, %es:(%edi) )
26623 6: addl %ecx, %eax
26624 adcl $0, %eax
26625 7:
26626@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
26627
26628 6001:
26629 movl ARGBASE+20(%esp), %ebx # src_err_ptr
26630- movl $-EFAULT, (%ebx)
26631+ movl $-EFAULT, %ss:(%ebx)
26632
26633 # zero the complete destination - computing the rest
26634 # is too much work
26635@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
26636
26637 6002:
26638 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
26639- movl $-EFAULT,(%ebx)
26640+ movl $-EFAULT,%ss:(%ebx)
26641 jmp 5000b
26642
26643 .previous
26644
26645+ pushl_cfi %ss
26646+ popl_cfi %ds
26647+ pushl_cfi %ss
26648+ popl_cfi %es
26649 popl_cfi %ebx
26650 CFI_RESTORE ebx
26651 popl_cfi %esi
26652@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
26653 popl_cfi %ecx # equivalent to addl $4,%esp
26654 ret
26655 CFI_ENDPROC
26656-ENDPROC(csum_partial_copy_generic)
26657+ENDPROC(csum_partial_copy_generic_to_user)
26658
26659 #else
26660
26661 /* Version for PentiumII/PPro */
26662
26663 #define ROUND1(x) \
26664+ nop; nop; nop; \
26665 SRC(movl x(%esi), %ebx ) ; \
26666 addl %ebx, %eax ; \
26667- DST(movl %ebx, x(%edi) ) ;
26668+ DST(movl %ebx, %es:x(%edi)) ;
26669
26670 #define ROUND(x) \
26671+ nop; nop; nop; \
26672 SRC(movl x(%esi), %ebx ) ; \
26673 adcl %ebx, %eax ; \
26674- DST(movl %ebx, x(%edi) ) ;
26675+ DST(movl %ebx, %es:x(%edi)) ;
26676
26677 #define ARGBASE 12
26678-
26679-ENTRY(csum_partial_copy_generic)
26680+
26681+ENTRY(csum_partial_copy_generic_to_user)
26682 CFI_STARTPROC
26683+
26684+#ifdef CONFIG_PAX_MEMORY_UDEREF
26685+ pushl_cfi %gs
26686+ popl_cfi %es
26687+ jmp csum_partial_copy_generic
26688+#endif
26689+
26690+ENTRY(csum_partial_copy_generic_from_user)
26691+
26692+#ifdef CONFIG_PAX_MEMORY_UDEREF
26693+ pushl_cfi %gs
26694+ popl_cfi %ds
26695+#endif
26696+
26697+ENTRY(csum_partial_copy_generic)
26698 pushl_cfi %ebx
26699 CFI_REL_OFFSET ebx, 0
26700 pushl_cfi %edi
26701@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
26702 subl %ebx, %edi
26703 lea -1(%esi),%edx
26704 andl $-32,%edx
26705- lea 3f(%ebx,%ebx), %ebx
26706+ lea 3f(%ebx,%ebx,2), %ebx
26707 testl %esi, %esi
26708 jmp *%ebx
26709 1: addl $64,%esi
26710@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
26711 jb 5f
26712 SRC( movw (%esi), %dx )
26713 leal 2(%esi), %esi
26714-DST( movw %dx, (%edi) )
26715+DST( movw %dx, %es:(%edi) )
26716 leal 2(%edi), %edi
26717 je 6f
26718 shll $16,%edx
26719 5:
26720 SRC( movb (%esi), %dl )
26721-DST( movb %dl, (%edi) )
26722+DST( movb %dl, %es:(%edi) )
26723 6: addl %edx, %eax
26724 adcl $0, %eax
26725 7:
26726 .section .fixup, "ax"
26727 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
26728- movl $-EFAULT, (%ebx)
26729+ movl $-EFAULT, %ss:(%ebx)
26730 # zero the complete destination (computing the rest is too much work)
26731 movl ARGBASE+8(%esp),%edi # dst
26732 movl ARGBASE+12(%esp),%ecx # len
26733@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
26734 rep; stosb
26735 jmp 7b
26736 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
26737- movl $-EFAULT, (%ebx)
26738+ movl $-EFAULT, %ss:(%ebx)
26739 jmp 7b
26740 .previous
26741
26742+#ifdef CONFIG_PAX_MEMORY_UDEREF
26743+ pushl_cfi %ss
26744+ popl_cfi %ds
26745+ pushl_cfi %ss
26746+ popl_cfi %es
26747+#endif
26748+
26749 popl_cfi %esi
26750 CFI_RESTORE esi
26751 popl_cfi %edi
26752@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
26753 CFI_RESTORE ebx
26754 ret
26755 CFI_ENDPROC
26756-ENDPROC(csum_partial_copy_generic)
26757+ENDPROC(csum_partial_copy_generic_to_user)
26758
26759 #undef ROUND
26760 #undef ROUND1
26761diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
26762index f2145cf..cea889d 100644
26763--- a/arch/x86/lib/clear_page_64.S
26764+++ b/arch/x86/lib/clear_page_64.S
26765@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
26766 movl $4096/8,%ecx
26767 xorl %eax,%eax
26768 rep stosq
26769+ pax_force_retaddr
26770 ret
26771 CFI_ENDPROC
26772 ENDPROC(clear_page_c)
26773@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
26774 movl $4096,%ecx
26775 xorl %eax,%eax
26776 rep stosb
26777+ pax_force_retaddr
26778 ret
26779 CFI_ENDPROC
26780 ENDPROC(clear_page_c_e)
26781@@ -43,6 +45,7 @@ ENTRY(clear_page)
26782 leaq 64(%rdi),%rdi
26783 jnz .Lloop
26784 nop
26785+ pax_force_retaddr
26786 ret
26787 CFI_ENDPROC
26788 .Lclear_page_end:
26789@@ -58,7 +61,7 @@ ENDPROC(clear_page)
26790
26791 #include <asm/cpufeature.h>
26792
26793- .section .altinstr_replacement,"ax"
26794+ .section .altinstr_replacement,"a"
26795 1: .byte 0xeb /* jmp <disp8> */
26796 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
26797 2: .byte 0xeb /* jmp <disp8> */
26798diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
26799index 1e572c5..2a162cd 100644
26800--- a/arch/x86/lib/cmpxchg16b_emu.S
26801+++ b/arch/x86/lib/cmpxchg16b_emu.S
26802@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
26803
26804 popf
26805 mov $1, %al
26806+ pax_force_retaddr
26807 ret
26808
26809 not_same:
26810 popf
26811 xor %al,%al
26812+ pax_force_retaddr
26813 ret
26814
26815 CFI_ENDPROC
26816diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
26817index 176cca6..1166c50 100644
26818--- a/arch/x86/lib/copy_page_64.S
26819+++ b/arch/x86/lib/copy_page_64.S
26820@@ -9,6 +9,7 @@ copy_page_rep:
26821 CFI_STARTPROC
26822 movl $4096/8, %ecx
26823 rep movsq
26824+ pax_force_retaddr
26825 ret
26826 CFI_ENDPROC
26827 ENDPROC(copy_page_rep)
26828@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
26829
26830 ENTRY(copy_page)
26831 CFI_STARTPROC
26832- subq $2*8, %rsp
26833- CFI_ADJUST_CFA_OFFSET 2*8
26834+ subq $3*8, %rsp
26835+ CFI_ADJUST_CFA_OFFSET 3*8
26836 movq %rbx, (%rsp)
26837 CFI_REL_OFFSET rbx, 0
26838 movq %r12, 1*8(%rsp)
26839 CFI_REL_OFFSET r12, 1*8
26840+ movq %r13, 2*8(%rsp)
26841+ CFI_REL_OFFSET r13, 2*8
26842
26843 movl $(4096/64)-5, %ecx
26844 .p2align 4
26845@@ -36,7 +39,7 @@ ENTRY(copy_page)
26846 movq 0x8*2(%rsi), %rdx
26847 movq 0x8*3(%rsi), %r8
26848 movq 0x8*4(%rsi), %r9
26849- movq 0x8*5(%rsi), %r10
26850+ movq 0x8*5(%rsi), %r13
26851 movq 0x8*6(%rsi), %r11
26852 movq 0x8*7(%rsi), %r12
26853
26854@@ -47,7 +50,7 @@ ENTRY(copy_page)
26855 movq %rdx, 0x8*2(%rdi)
26856 movq %r8, 0x8*3(%rdi)
26857 movq %r9, 0x8*4(%rdi)
26858- movq %r10, 0x8*5(%rdi)
26859+ movq %r13, 0x8*5(%rdi)
26860 movq %r11, 0x8*6(%rdi)
26861 movq %r12, 0x8*7(%rdi)
26862
26863@@ -66,7 +69,7 @@ ENTRY(copy_page)
26864 movq 0x8*2(%rsi), %rdx
26865 movq 0x8*3(%rsi), %r8
26866 movq 0x8*4(%rsi), %r9
26867- movq 0x8*5(%rsi), %r10
26868+ movq 0x8*5(%rsi), %r13
26869 movq 0x8*6(%rsi), %r11
26870 movq 0x8*7(%rsi), %r12
26871
26872@@ -75,7 +78,7 @@ ENTRY(copy_page)
26873 movq %rdx, 0x8*2(%rdi)
26874 movq %r8, 0x8*3(%rdi)
26875 movq %r9, 0x8*4(%rdi)
26876- movq %r10, 0x8*5(%rdi)
26877+ movq %r13, 0x8*5(%rdi)
26878 movq %r11, 0x8*6(%rdi)
26879 movq %r12, 0x8*7(%rdi)
26880
26881@@ -87,8 +90,11 @@ ENTRY(copy_page)
26882 CFI_RESTORE rbx
26883 movq 1*8(%rsp), %r12
26884 CFI_RESTORE r12
26885- addq $2*8, %rsp
26886- CFI_ADJUST_CFA_OFFSET -2*8
26887+ movq 2*8(%rsp), %r13
26888+ CFI_RESTORE r13
26889+ addq $3*8, %rsp
26890+ CFI_ADJUST_CFA_OFFSET -3*8
26891+ pax_force_retaddr
26892 ret
26893 .Lcopy_page_end:
26894 CFI_ENDPROC
26895@@ -99,7 +105,7 @@ ENDPROC(copy_page)
26896
26897 #include <asm/cpufeature.h>
26898
26899- .section .altinstr_replacement,"ax"
26900+ .section .altinstr_replacement,"a"
26901 1: .byte 0xeb /* jmp <disp8> */
26902 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
26903 2:
26904diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
26905index a30ca15..6b3f4e1 100644
26906--- a/arch/x86/lib/copy_user_64.S
26907+++ b/arch/x86/lib/copy_user_64.S
26908@@ -18,31 +18,7 @@
26909 #include <asm/alternative-asm.h>
26910 #include <asm/asm.h>
26911 #include <asm/smap.h>
26912-
26913-/*
26914- * By placing feature2 after feature1 in altinstructions section, we logically
26915- * implement:
26916- * If CPU has feature2, jmp to alt2 is used
26917- * else if CPU has feature1, jmp to alt1 is used
26918- * else jmp to orig is used.
26919- */
26920- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
26921-0:
26922- .byte 0xe9 /* 32bit jump */
26923- .long \orig-1f /* by default jump to orig */
26924-1:
26925- .section .altinstr_replacement,"ax"
26926-2: .byte 0xe9 /* near jump with 32bit immediate */
26927- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
26928-3: .byte 0xe9 /* near jump with 32bit immediate */
26929- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
26930- .previous
26931-
26932- .section .altinstructions,"a"
26933- altinstruction_entry 0b,2b,\feature1,5,5
26934- altinstruction_entry 0b,3b,\feature2,5,5
26935- .previous
26936- .endm
26937+#include <asm/pgtable.h>
26938
26939 .macro ALIGN_DESTINATION
26940 #ifdef FIX_ALIGNMENT
26941@@ -70,52 +46,6 @@
26942 #endif
26943 .endm
26944
26945-/* Standard copy_to_user with segment limit checking */
26946-ENTRY(_copy_to_user)
26947- CFI_STARTPROC
26948- GET_THREAD_INFO(%rax)
26949- movq %rdi,%rcx
26950- addq %rdx,%rcx
26951- jc bad_to_user
26952- cmpq TI_addr_limit(%rax),%rcx
26953- ja bad_to_user
26954- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
26955- copy_user_generic_unrolled,copy_user_generic_string, \
26956- copy_user_enhanced_fast_string
26957- CFI_ENDPROC
26958-ENDPROC(_copy_to_user)
26959-
26960-/* Standard copy_from_user with segment limit checking */
26961-ENTRY(_copy_from_user)
26962- CFI_STARTPROC
26963- GET_THREAD_INFO(%rax)
26964- movq %rsi,%rcx
26965- addq %rdx,%rcx
26966- jc bad_from_user
26967- cmpq TI_addr_limit(%rax),%rcx
26968- ja bad_from_user
26969- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
26970- copy_user_generic_unrolled,copy_user_generic_string, \
26971- copy_user_enhanced_fast_string
26972- CFI_ENDPROC
26973-ENDPROC(_copy_from_user)
26974-
26975- .section .fixup,"ax"
26976- /* must zero dest */
26977-ENTRY(bad_from_user)
26978-bad_from_user:
26979- CFI_STARTPROC
26980- movl %edx,%ecx
26981- xorl %eax,%eax
26982- rep
26983- stosb
26984-bad_to_user:
26985- movl %edx,%eax
26986- ret
26987- CFI_ENDPROC
26988-ENDPROC(bad_from_user)
26989- .previous
26990-
26991 /*
26992 * copy_user_generic_unrolled - memory copy with exception handling.
26993 * This version is for CPUs like P4 that don't have efficient micro
26994@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
26995 */
26996 ENTRY(copy_user_generic_unrolled)
26997 CFI_STARTPROC
26998+ ASM_PAX_OPEN_USERLAND
26999 ASM_STAC
27000 cmpl $8,%edx
27001 jb 20f /* less then 8 bytes, go to byte copy loop */
27002@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
27003 jz 17f
27004 1: movq (%rsi),%r8
27005 2: movq 1*8(%rsi),%r9
27006-3: movq 2*8(%rsi),%r10
27007+3: movq 2*8(%rsi),%rax
27008 4: movq 3*8(%rsi),%r11
27009 5: movq %r8,(%rdi)
27010 6: movq %r9,1*8(%rdi)
27011-7: movq %r10,2*8(%rdi)
27012+7: movq %rax,2*8(%rdi)
27013 8: movq %r11,3*8(%rdi)
27014 9: movq 4*8(%rsi),%r8
27015 10: movq 5*8(%rsi),%r9
27016-11: movq 6*8(%rsi),%r10
27017+11: movq 6*8(%rsi),%rax
27018 12: movq 7*8(%rsi),%r11
27019 13: movq %r8,4*8(%rdi)
27020 14: movq %r9,5*8(%rdi)
27021-15: movq %r10,6*8(%rdi)
27022+15: movq %rax,6*8(%rdi)
27023 16: movq %r11,7*8(%rdi)
27024 leaq 64(%rsi),%rsi
27025 leaq 64(%rdi),%rdi
27026@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
27027 jnz 21b
27028 23: xor %eax,%eax
27029 ASM_CLAC
27030+ ASM_PAX_CLOSE_USERLAND
27031+ pax_force_retaddr
27032 ret
27033
27034 .section .fixup,"ax"
27035@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
27036 */
27037 ENTRY(copy_user_generic_string)
27038 CFI_STARTPROC
27039+ ASM_PAX_OPEN_USERLAND
27040 ASM_STAC
27041 andl %edx,%edx
27042 jz 4f
27043@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
27044 movsb
27045 4: xorl %eax,%eax
27046 ASM_CLAC
27047+ ASM_PAX_CLOSE_USERLAND
27048+ pax_force_retaddr
27049 ret
27050
27051 .section .fixup,"ax"
27052@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
27053 */
27054 ENTRY(copy_user_enhanced_fast_string)
27055 CFI_STARTPROC
27056+ ASM_PAX_OPEN_USERLAND
27057 ASM_STAC
27058 andl %edx,%edx
27059 jz 2f
27060@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
27061 movsb
27062 2: xorl %eax,%eax
27063 ASM_CLAC
27064+ ASM_PAX_CLOSE_USERLAND
27065+ pax_force_retaddr
27066 ret
27067
27068 .section .fixup,"ax"
27069diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
27070index 6a4f43c..55d26f2 100644
27071--- a/arch/x86/lib/copy_user_nocache_64.S
27072+++ b/arch/x86/lib/copy_user_nocache_64.S
27073@@ -8,6 +8,7 @@
27074
27075 #include <linux/linkage.h>
27076 #include <asm/dwarf2.h>
27077+#include <asm/alternative-asm.h>
27078
27079 #define FIX_ALIGNMENT 1
27080
27081@@ -16,6 +17,7 @@
27082 #include <asm/thread_info.h>
27083 #include <asm/asm.h>
27084 #include <asm/smap.h>
27085+#include <asm/pgtable.h>
27086
27087 .macro ALIGN_DESTINATION
27088 #ifdef FIX_ALIGNMENT
27089@@ -49,6 +51,16 @@
27090 */
27091 ENTRY(__copy_user_nocache)
27092 CFI_STARTPROC
27093+
27094+#ifdef CONFIG_PAX_MEMORY_UDEREF
27095+ mov pax_user_shadow_base,%rcx
27096+ cmp %rcx,%rsi
27097+ jae 1f
27098+ add %rcx,%rsi
27099+1:
27100+#endif
27101+
27102+ ASM_PAX_OPEN_USERLAND
27103 ASM_STAC
27104 cmpl $8,%edx
27105 jb 20f /* less then 8 bytes, go to byte copy loop */
27106@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
27107 jz 17f
27108 1: movq (%rsi),%r8
27109 2: movq 1*8(%rsi),%r9
27110-3: movq 2*8(%rsi),%r10
27111+3: movq 2*8(%rsi),%rax
27112 4: movq 3*8(%rsi),%r11
27113 5: movnti %r8,(%rdi)
27114 6: movnti %r9,1*8(%rdi)
27115-7: movnti %r10,2*8(%rdi)
27116+7: movnti %rax,2*8(%rdi)
27117 8: movnti %r11,3*8(%rdi)
27118 9: movq 4*8(%rsi),%r8
27119 10: movq 5*8(%rsi),%r9
27120-11: movq 6*8(%rsi),%r10
27121+11: movq 6*8(%rsi),%rax
27122 12: movq 7*8(%rsi),%r11
27123 13: movnti %r8,4*8(%rdi)
27124 14: movnti %r9,5*8(%rdi)
27125-15: movnti %r10,6*8(%rdi)
27126+15: movnti %rax,6*8(%rdi)
27127 16: movnti %r11,7*8(%rdi)
27128 leaq 64(%rsi),%rsi
27129 leaq 64(%rdi),%rdi
27130@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
27131 jnz 21b
27132 23: xorl %eax,%eax
27133 ASM_CLAC
27134+ ASM_PAX_CLOSE_USERLAND
27135 sfence
27136+ pax_force_retaddr
27137 ret
27138
27139 .section .fixup,"ax"
27140diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
27141index 2419d5f..953ee51 100644
27142--- a/arch/x86/lib/csum-copy_64.S
27143+++ b/arch/x86/lib/csum-copy_64.S
27144@@ -9,6 +9,7 @@
27145 #include <asm/dwarf2.h>
27146 #include <asm/errno.h>
27147 #include <asm/asm.h>
27148+#include <asm/alternative-asm.h>
27149
27150 /*
27151 * Checksum copy with exception handling.
27152@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
27153 CFI_RESTORE rbp
27154 addq $7*8, %rsp
27155 CFI_ADJUST_CFA_OFFSET -7*8
27156+ pax_force_retaddr 0, 1
27157 ret
27158 CFI_RESTORE_STATE
27159
27160diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
27161index 25b7ae8..c40113e 100644
27162--- a/arch/x86/lib/csum-wrappers_64.c
27163+++ b/arch/x86/lib/csum-wrappers_64.c
27164@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
27165 len -= 2;
27166 }
27167 }
27168- isum = csum_partial_copy_generic((__force const void *)src,
27169+ pax_open_userland();
27170+ stac();
27171+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
27172 dst, len, isum, errp, NULL);
27173+ clac();
27174+ pax_close_userland();
27175 if (unlikely(*errp))
27176 goto out_err;
27177
27178@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
27179 }
27180
27181 *errp = 0;
27182- return csum_partial_copy_generic(src, (void __force *)dst,
27183+ pax_open_userland();
27184+ stac();
27185+ isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
27186 len, isum, NULL, errp);
27187+ clac();
27188+ pax_close_userland();
27189+ return isum;
27190 }
27191 EXPORT_SYMBOL(csum_partial_copy_to_user);
27192
27193diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
27194index a451235..1daa956 100644
27195--- a/arch/x86/lib/getuser.S
27196+++ b/arch/x86/lib/getuser.S
27197@@ -33,17 +33,40 @@
27198 #include <asm/thread_info.h>
27199 #include <asm/asm.h>
27200 #include <asm/smap.h>
27201+#include <asm/segment.h>
27202+#include <asm/pgtable.h>
27203+#include <asm/alternative-asm.h>
27204+
27205+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27206+#define __copyuser_seg gs;
27207+#else
27208+#define __copyuser_seg
27209+#endif
27210
27211 .text
27212 ENTRY(__get_user_1)
27213 CFI_STARTPROC
27214+
27215+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27216 GET_THREAD_INFO(%_ASM_DX)
27217 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27218 jae bad_get_user
27219 ASM_STAC
27220-1: movzbl (%_ASM_AX),%edx
27221+
27222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27223+ mov pax_user_shadow_base,%_ASM_DX
27224+ cmp %_ASM_DX,%_ASM_AX
27225+ jae 1234f
27226+ add %_ASM_DX,%_ASM_AX
27227+1234:
27228+#endif
27229+
27230+#endif
27231+
27232+1: __copyuser_seg movzbl (%_ASM_AX),%edx
27233 xor %eax,%eax
27234 ASM_CLAC
27235+ pax_force_retaddr
27236 ret
27237 CFI_ENDPROC
27238 ENDPROC(__get_user_1)
27239@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
27240 ENTRY(__get_user_2)
27241 CFI_STARTPROC
27242 add $1,%_ASM_AX
27243+
27244+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27245 jc bad_get_user
27246 GET_THREAD_INFO(%_ASM_DX)
27247 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27248 jae bad_get_user
27249 ASM_STAC
27250-2: movzwl -1(%_ASM_AX),%edx
27251+
27252+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27253+ mov pax_user_shadow_base,%_ASM_DX
27254+ cmp %_ASM_DX,%_ASM_AX
27255+ jae 1234f
27256+ add %_ASM_DX,%_ASM_AX
27257+1234:
27258+#endif
27259+
27260+#endif
27261+
27262+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
27263 xor %eax,%eax
27264 ASM_CLAC
27265+ pax_force_retaddr
27266 ret
27267 CFI_ENDPROC
27268 ENDPROC(__get_user_2)
27269@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
27270 ENTRY(__get_user_4)
27271 CFI_STARTPROC
27272 add $3,%_ASM_AX
27273+
27274+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27275 jc bad_get_user
27276 GET_THREAD_INFO(%_ASM_DX)
27277 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27278 jae bad_get_user
27279 ASM_STAC
27280-3: movl -3(%_ASM_AX),%edx
27281+
27282+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27283+ mov pax_user_shadow_base,%_ASM_DX
27284+ cmp %_ASM_DX,%_ASM_AX
27285+ jae 1234f
27286+ add %_ASM_DX,%_ASM_AX
27287+1234:
27288+#endif
27289+
27290+#endif
27291+
27292+3: __copyuser_seg movl -3(%_ASM_AX),%edx
27293 xor %eax,%eax
27294 ASM_CLAC
27295+ pax_force_retaddr
27296 ret
27297 CFI_ENDPROC
27298 ENDPROC(__get_user_4)
27299@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
27300 GET_THREAD_INFO(%_ASM_DX)
27301 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27302 jae bad_get_user
27303+
27304+#ifdef CONFIG_PAX_MEMORY_UDEREF
27305+ mov pax_user_shadow_base,%_ASM_DX
27306+ cmp %_ASM_DX,%_ASM_AX
27307+ jae 1234f
27308+ add %_ASM_DX,%_ASM_AX
27309+1234:
27310+#endif
27311+
27312 ASM_STAC
27313 4: movq -7(%_ASM_AX),%rdx
27314 xor %eax,%eax
27315 ASM_CLAC
27316+ pax_force_retaddr
27317 ret
27318 #else
27319 add $7,%_ASM_AX
27320@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
27321 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27322 jae bad_get_user_8
27323 ASM_STAC
27324-4: movl -7(%_ASM_AX),%edx
27325-5: movl -3(%_ASM_AX),%ecx
27326+4: __copyuser_seg movl -7(%_ASM_AX),%edx
27327+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
27328 xor %eax,%eax
27329 ASM_CLAC
27330+ pax_force_retaddr
27331 ret
27332 #endif
27333 CFI_ENDPROC
27334@@ -113,6 +175,7 @@ bad_get_user:
27335 xor %edx,%edx
27336 mov $(-EFAULT),%_ASM_AX
27337 ASM_CLAC
27338+ pax_force_retaddr
27339 ret
27340 CFI_ENDPROC
27341 END(bad_get_user)
27342@@ -124,6 +187,7 @@ bad_get_user_8:
27343 xor %ecx,%ecx
27344 mov $(-EFAULT),%_ASM_AX
27345 ASM_CLAC
27346+ pax_force_retaddr
27347 ret
27348 CFI_ENDPROC
27349 END(bad_get_user_8)
27350diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
27351index 54fcffe..7be149e 100644
27352--- a/arch/x86/lib/insn.c
27353+++ b/arch/x86/lib/insn.c
27354@@ -20,8 +20,10 @@
27355
27356 #ifdef __KERNEL__
27357 #include <linux/string.h>
27358+#include <asm/pgtable_types.h>
27359 #else
27360 #include <string.h>
27361+#define ktla_ktva(addr) addr
27362 #endif
27363 #include <asm/inat.h>
27364 #include <asm/insn.h>
27365@@ -53,8 +55,8 @@
27366 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
27367 {
27368 memset(insn, 0, sizeof(*insn));
27369- insn->kaddr = kaddr;
27370- insn->next_byte = kaddr;
27371+ insn->kaddr = ktla_ktva(kaddr);
27372+ insn->next_byte = ktla_ktva(kaddr);
27373 insn->x86_64 = x86_64 ? 1 : 0;
27374 insn->opnd_bytes = 4;
27375 if (x86_64)
27376diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
27377index 05a95e7..326f2fa 100644
27378--- a/arch/x86/lib/iomap_copy_64.S
27379+++ b/arch/x86/lib/iomap_copy_64.S
27380@@ -17,6 +17,7 @@
27381
27382 #include <linux/linkage.h>
27383 #include <asm/dwarf2.h>
27384+#include <asm/alternative-asm.h>
27385
27386 /*
27387 * override generic version in lib/iomap_copy.c
27388@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
27389 CFI_STARTPROC
27390 movl %edx,%ecx
27391 rep movsd
27392+ pax_force_retaddr
27393 ret
27394 CFI_ENDPROC
27395 ENDPROC(__iowrite32_copy)
27396diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
27397index 56313a3..9b59269 100644
27398--- a/arch/x86/lib/memcpy_64.S
27399+++ b/arch/x86/lib/memcpy_64.S
27400@@ -24,7 +24,7 @@
27401 * This gets patched over the unrolled variant (below) via the
27402 * alternative instructions framework:
27403 */
27404- .section .altinstr_replacement, "ax", @progbits
27405+ .section .altinstr_replacement, "a", @progbits
27406 .Lmemcpy_c:
27407 movq %rdi, %rax
27408 movq %rdx, %rcx
27409@@ -33,6 +33,7 @@
27410 rep movsq
27411 movl %edx, %ecx
27412 rep movsb
27413+ pax_force_retaddr
27414 ret
27415 .Lmemcpy_e:
27416 .previous
27417@@ -44,11 +45,12 @@
27418 * This gets patched over the unrolled variant (below) via the
27419 * alternative instructions framework:
27420 */
27421- .section .altinstr_replacement, "ax", @progbits
27422+ .section .altinstr_replacement, "a", @progbits
27423 .Lmemcpy_c_e:
27424 movq %rdi, %rax
27425 movq %rdx, %rcx
27426 rep movsb
27427+ pax_force_retaddr
27428 ret
27429 .Lmemcpy_e_e:
27430 .previous
27431@@ -76,13 +78,13 @@ ENTRY(memcpy)
27432 */
27433 movq 0*8(%rsi), %r8
27434 movq 1*8(%rsi), %r9
27435- movq 2*8(%rsi), %r10
27436+ movq 2*8(%rsi), %rcx
27437 movq 3*8(%rsi), %r11
27438 leaq 4*8(%rsi), %rsi
27439
27440 movq %r8, 0*8(%rdi)
27441 movq %r9, 1*8(%rdi)
27442- movq %r10, 2*8(%rdi)
27443+ movq %rcx, 2*8(%rdi)
27444 movq %r11, 3*8(%rdi)
27445 leaq 4*8(%rdi), %rdi
27446 jae .Lcopy_forward_loop
27447@@ -105,12 +107,12 @@ ENTRY(memcpy)
27448 subq $0x20, %rdx
27449 movq -1*8(%rsi), %r8
27450 movq -2*8(%rsi), %r9
27451- movq -3*8(%rsi), %r10
27452+ movq -3*8(%rsi), %rcx
27453 movq -4*8(%rsi), %r11
27454 leaq -4*8(%rsi), %rsi
27455 movq %r8, -1*8(%rdi)
27456 movq %r9, -2*8(%rdi)
27457- movq %r10, -3*8(%rdi)
27458+ movq %rcx, -3*8(%rdi)
27459 movq %r11, -4*8(%rdi)
27460 leaq -4*8(%rdi), %rdi
27461 jae .Lcopy_backward_loop
27462@@ -130,12 +132,13 @@ ENTRY(memcpy)
27463 */
27464 movq 0*8(%rsi), %r8
27465 movq 1*8(%rsi), %r9
27466- movq -2*8(%rsi, %rdx), %r10
27467+ movq -2*8(%rsi, %rdx), %rcx
27468 movq -1*8(%rsi, %rdx), %r11
27469 movq %r8, 0*8(%rdi)
27470 movq %r9, 1*8(%rdi)
27471- movq %r10, -2*8(%rdi, %rdx)
27472+ movq %rcx, -2*8(%rdi, %rdx)
27473 movq %r11, -1*8(%rdi, %rdx)
27474+ pax_force_retaddr
27475 retq
27476 .p2align 4
27477 .Lless_16bytes:
27478@@ -148,6 +151,7 @@ ENTRY(memcpy)
27479 movq -1*8(%rsi, %rdx), %r9
27480 movq %r8, 0*8(%rdi)
27481 movq %r9, -1*8(%rdi, %rdx)
27482+ pax_force_retaddr
27483 retq
27484 .p2align 4
27485 .Lless_8bytes:
27486@@ -161,6 +165,7 @@ ENTRY(memcpy)
27487 movl -4(%rsi, %rdx), %r8d
27488 movl %ecx, (%rdi)
27489 movl %r8d, -4(%rdi, %rdx)
27490+ pax_force_retaddr
27491 retq
27492 .p2align 4
27493 .Lless_3bytes:
27494@@ -179,6 +184,7 @@ ENTRY(memcpy)
27495 movb %cl, (%rdi)
27496
27497 .Lend:
27498+ pax_force_retaddr
27499 retq
27500 CFI_ENDPROC
27501 ENDPROC(memcpy)
27502diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
27503index 65268a6..5aa7815 100644
27504--- a/arch/x86/lib/memmove_64.S
27505+++ b/arch/x86/lib/memmove_64.S
27506@@ -61,13 +61,13 @@ ENTRY(memmove)
27507 5:
27508 sub $0x20, %rdx
27509 movq 0*8(%rsi), %r11
27510- movq 1*8(%rsi), %r10
27511+ movq 1*8(%rsi), %rcx
27512 movq 2*8(%rsi), %r9
27513 movq 3*8(%rsi), %r8
27514 leaq 4*8(%rsi), %rsi
27515
27516 movq %r11, 0*8(%rdi)
27517- movq %r10, 1*8(%rdi)
27518+ movq %rcx, 1*8(%rdi)
27519 movq %r9, 2*8(%rdi)
27520 movq %r8, 3*8(%rdi)
27521 leaq 4*8(%rdi), %rdi
27522@@ -81,10 +81,10 @@ ENTRY(memmove)
27523 4:
27524 movq %rdx, %rcx
27525 movq -8(%rsi, %rdx), %r11
27526- lea -8(%rdi, %rdx), %r10
27527+ lea -8(%rdi, %rdx), %r9
27528 shrq $3, %rcx
27529 rep movsq
27530- movq %r11, (%r10)
27531+ movq %r11, (%r9)
27532 jmp 13f
27533 .Lmemmove_end_forward:
27534
27535@@ -95,14 +95,14 @@ ENTRY(memmove)
27536 7:
27537 movq %rdx, %rcx
27538 movq (%rsi), %r11
27539- movq %rdi, %r10
27540+ movq %rdi, %r9
27541 leaq -8(%rsi, %rdx), %rsi
27542 leaq -8(%rdi, %rdx), %rdi
27543 shrq $3, %rcx
27544 std
27545 rep movsq
27546 cld
27547- movq %r11, (%r10)
27548+ movq %r11, (%r9)
27549 jmp 13f
27550
27551 /*
27552@@ -127,13 +127,13 @@ ENTRY(memmove)
27553 8:
27554 subq $0x20, %rdx
27555 movq -1*8(%rsi), %r11
27556- movq -2*8(%rsi), %r10
27557+ movq -2*8(%rsi), %rcx
27558 movq -3*8(%rsi), %r9
27559 movq -4*8(%rsi), %r8
27560 leaq -4*8(%rsi), %rsi
27561
27562 movq %r11, -1*8(%rdi)
27563- movq %r10, -2*8(%rdi)
27564+ movq %rcx, -2*8(%rdi)
27565 movq %r9, -3*8(%rdi)
27566 movq %r8, -4*8(%rdi)
27567 leaq -4*8(%rdi), %rdi
27568@@ -151,11 +151,11 @@ ENTRY(memmove)
27569 * Move data from 16 bytes to 31 bytes.
27570 */
27571 movq 0*8(%rsi), %r11
27572- movq 1*8(%rsi), %r10
27573+ movq 1*8(%rsi), %rcx
27574 movq -2*8(%rsi, %rdx), %r9
27575 movq -1*8(%rsi, %rdx), %r8
27576 movq %r11, 0*8(%rdi)
27577- movq %r10, 1*8(%rdi)
27578+ movq %rcx, 1*8(%rdi)
27579 movq %r9, -2*8(%rdi, %rdx)
27580 movq %r8, -1*8(%rdi, %rdx)
27581 jmp 13f
27582@@ -167,9 +167,9 @@ ENTRY(memmove)
27583 * Move data from 8 bytes to 15 bytes.
27584 */
27585 movq 0*8(%rsi), %r11
27586- movq -1*8(%rsi, %rdx), %r10
27587+ movq -1*8(%rsi, %rdx), %r9
27588 movq %r11, 0*8(%rdi)
27589- movq %r10, -1*8(%rdi, %rdx)
27590+ movq %r9, -1*8(%rdi, %rdx)
27591 jmp 13f
27592 10:
27593 cmpq $4, %rdx
27594@@ -178,9 +178,9 @@ ENTRY(memmove)
27595 * Move data from 4 bytes to 7 bytes.
27596 */
27597 movl (%rsi), %r11d
27598- movl -4(%rsi, %rdx), %r10d
27599+ movl -4(%rsi, %rdx), %r9d
27600 movl %r11d, (%rdi)
27601- movl %r10d, -4(%rdi, %rdx)
27602+ movl %r9d, -4(%rdi, %rdx)
27603 jmp 13f
27604 11:
27605 cmp $2, %rdx
27606@@ -189,9 +189,9 @@ ENTRY(memmove)
27607 * Move data from 2 bytes to 3 bytes.
27608 */
27609 movw (%rsi), %r11w
27610- movw -2(%rsi, %rdx), %r10w
27611+ movw -2(%rsi, %rdx), %r9w
27612 movw %r11w, (%rdi)
27613- movw %r10w, -2(%rdi, %rdx)
27614+ movw %r9w, -2(%rdi, %rdx)
27615 jmp 13f
27616 12:
27617 cmp $1, %rdx
27618@@ -202,14 +202,16 @@ ENTRY(memmove)
27619 movb (%rsi), %r11b
27620 movb %r11b, (%rdi)
27621 13:
27622+ pax_force_retaddr
27623 retq
27624 CFI_ENDPROC
27625
27626- .section .altinstr_replacement,"ax"
27627+ .section .altinstr_replacement,"a"
27628 .Lmemmove_begin_forward_efs:
27629 /* Forward moving data. */
27630 movq %rdx, %rcx
27631 rep movsb
27632+ pax_force_retaddr
27633 retq
27634 .Lmemmove_end_forward_efs:
27635 .previous
27636diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
27637index 2dcb380..50a78bc 100644
27638--- a/arch/x86/lib/memset_64.S
27639+++ b/arch/x86/lib/memset_64.S
27640@@ -16,7 +16,7 @@
27641 *
27642 * rax original destination
27643 */
27644- .section .altinstr_replacement, "ax", @progbits
27645+ .section .altinstr_replacement, "a", @progbits
27646 .Lmemset_c:
27647 movq %rdi,%r9
27648 movq %rdx,%rcx
27649@@ -30,6 +30,7 @@
27650 movl %edx,%ecx
27651 rep stosb
27652 movq %r9,%rax
27653+ pax_force_retaddr
27654 ret
27655 .Lmemset_e:
27656 .previous
27657@@ -45,13 +46,14 @@
27658 *
27659 * rax original destination
27660 */
27661- .section .altinstr_replacement, "ax", @progbits
27662+ .section .altinstr_replacement, "a", @progbits
27663 .Lmemset_c_e:
27664 movq %rdi,%r9
27665 movb %sil,%al
27666 movq %rdx,%rcx
27667 rep stosb
27668 movq %r9,%rax
27669+ pax_force_retaddr
27670 ret
27671 .Lmemset_e_e:
27672 .previous
27673@@ -59,7 +61,7 @@
27674 ENTRY(memset)
27675 ENTRY(__memset)
27676 CFI_STARTPROC
27677- movq %rdi,%r10
27678+ movq %rdi,%r11
27679
27680 /* expand byte value */
27681 movzbl %sil,%ecx
27682@@ -117,7 +119,8 @@ ENTRY(__memset)
27683 jnz .Lloop_1
27684
27685 .Lende:
27686- movq %r10,%rax
27687+ movq %r11,%rax
27688+ pax_force_retaddr
27689 ret
27690
27691 CFI_RESTORE_STATE
27692diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
27693index c9f2d9b..e7fd2c0 100644
27694--- a/arch/x86/lib/mmx_32.c
27695+++ b/arch/x86/lib/mmx_32.c
27696@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
27697 {
27698 void *p;
27699 int i;
27700+ unsigned long cr0;
27701
27702 if (unlikely(in_interrupt()))
27703 return __memcpy(to, from, len);
27704@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
27705 kernel_fpu_begin();
27706
27707 __asm__ __volatile__ (
27708- "1: prefetch (%0)\n" /* This set is 28 bytes */
27709- " prefetch 64(%0)\n"
27710- " prefetch 128(%0)\n"
27711- " prefetch 192(%0)\n"
27712- " prefetch 256(%0)\n"
27713+ "1: prefetch (%1)\n" /* This set is 28 bytes */
27714+ " prefetch 64(%1)\n"
27715+ " prefetch 128(%1)\n"
27716+ " prefetch 192(%1)\n"
27717+ " prefetch 256(%1)\n"
27718 "2: \n"
27719 ".section .fixup, \"ax\"\n"
27720- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27721+ "3: \n"
27722+
27723+#ifdef CONFIG_PAX_KERNEXEC
27724+ " movl %%cr0, %0\n"
27725+ " movl %0, %%eax\n"
27726+ " andl $0xFFFEFFFF, %%eax\n"
27727+ " movl %%eax, %%cr0\n"
27728+#endif
27729+
27730+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27731+
27732+#ifdef CONFIG_PAX_KERNEXEC
27733+ " movl %0, %%cr0\n"
27734+#endif
27735+
27736 " jmp 2b\n"
27737 ".previous\n"
27738 _ASM_EXTABLE(1b, 3b)
27739- : : "r" (from));
27740+ : "=&r" (cr0) : "r" (from) : "ax");
27741
27742 for ( ; i > 5; i--) {
27743 __asm__ __volatile__ (
27744- "1: prefetch 320(%0)\n"
27745- "2: movq (%0), %%mm0\n"
27746- " movq 8(%0), %%mm1\n"
27747- " movq 16(%0), %%mm2\n"
27748- " movq 24(%0), %%mm3\n"
27749- " movq %%mm0, (%1)\n"
27750- " movq %%mm1, 8(%1)\n"
27751- " movq %%mm2, 16(%1)\n"
27752- " movq %%mm3, 24(%1)\n"
27753- " movq 32(%0), %%mm0\n"
27754- " movq 40(%0), %%mm1\n"
27755- " movq 48(%0), %%mm2\n"
27756- " movq 56(%0), %%mm3\n"
27757- " movq %%mm0, 32(%1)\n"
27758- " movq %%mm1, 40(%1)\n"
27759- " movq %%mm2, 48(%1)\n"
27760- " movq %%mm3, 56(%1)\n"
27761+ "1: prefetch 320(%1)\n"
27762+ "2: movq (%1), %%mm0\n"
27763+ " movq 8(%1), %%mm1\n"
27764+ " movq 16(%1), %%mm2\n"
27765+ " movq 24(%1), %%mm3\n"
27766+ " movq %%mm0, (%2)\n"
27767+ " movq %%mm1, 8(%2)\n"
27768+ " movq %%mm2, 16(%2)\n"
27769+ " movq %%mm3, 24(%2)\n"
27770+ " movq 32(%1), %%mm0\n"
27771+ " movq 40(%1), %%mm1\n"
27772+ " movq 48(%1), %%mm2\n"
27773+ " movq 56(%1), %%mm3\n"
27774+ " movq %%mm0, 32(%2)\n"
27775+ " movq %%mm1, 40(%2)\n"
27776+ " movq %%mm2, 48(%2)\n"
27777+ " movq %%mm3, 56(%2)\n"
27778 ".section .fixup, \"ax\"\n"
27779- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27780+ "3:\n"
27781+
27782+#ifdef CONFIG_PAX_KERNEXEC
27783+ " movl %%cr0, %0\n"
27784+ " movl %0, %%eax\n"
27785+ " andl $0xFFFEFFFF, %%eax\n"
27786+ " movl %%eax, %%cr0\n"
27787+#endif
27788+
27789+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27790+
27791+#ifdef CONFIG_PAX_KERNEXEC
27792+ " movl %0, %%cr0\n"
27793+#endif
27794+
27795 " jmp 2b\n"
27796 ".previous\n"
27797 _ASM_EXTABLE(1b, 3b)
27798- : : "r" (from), "r" (to) : "memory");
27799+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
27800
27801 from += 64;
27802 to += 64;
27803@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
27804 static void fast_copy_page(void *to, void *from)
27805 {
27806 int i;
27807+ unsigned long cr0;
27808
27809 kernel_fpu_begin();
27810
27811@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
27812 * but that is for later. -AV
27813 */
27814 __asm__ __volatile__(
27815- "1: prefetch (%0)\n"
27816- " prefetch 64(%0)\n"
27817- " prefetch 128(%0)\n"
27818- " prefetch 192(%0)\n"
27819- " prefetch 256(%0)\n"
27820+ "1: prefetch (%1)\n"
27821+ " prefetch 64(%1)\n"
27822+ " prefetch 128(%1)\n"
27823+ " prefetch 192(%1)\n"
27824+ " prefetch 256(%1)\n"
27825 "2: \n"
27826 ".section .fixup, \"ax\"\n"
27827- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27828+ "3: \n"
27829+
27830+#ifdef CONFIG_PAX_KERNEXEC
27831+ " movl %%cr0, %0\n"
27832+ " movl %0, %%eax\n"
27833+ " andl $0xFFFEFFFF, %%eax\n"
27834+ " movl %%eax, %%cr0\n"
27835+#endif
27836+
27837+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27838+
27839+#ifdef CONFIG_PAX_KERNEXEC
27840+ " movl %0, %%cr0\n"
27841+#endif
27842+
27843 " jmp 2b\n"
27844 ".previous\n"
27845- _ASM_EXTABLE(1b, 3b) : : "r" (from));
27846+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
27847
27848 for (i = 0; i < (4096-320)/64; i++) {
27849 __asm__ __volatile__ (
27850- "1: prefetch 320(%0)\n"
27851- "2: movq (%0), %%mm0\n"
27852- " movntq %%mm0, (%1)\n"
27853- " movq 8(%0), %%mm1\n"
27854- " movntq %%mm1, 8(%1)\n"
27855- " movq 16(%0), %%mm2\n"
27856- " movntq %%mm2, 16(%1)\n"
27857- " movq 24(%0), %%mm3\n"
27858- " movntq %%mm3, 24(%1)\n"
27859- " movq 32(%0), %%mm4\n"
27860- " movntq %%mm4, 32(%1)\n"
27861- " movq 40(%0), %%mm5\n"
27862- " movntq %%mm5, 40(%1)\n"
27863- " movq 48(%0), %%mm6\n"
27864- " movntq %%mm6, 48(%1)\n"
27865- " movq 56(%0), %%mm7\n"
27866- " movntq %%mm7, 56(%1)\n"
27867+ "1: prefetch 320(%1)\n"
27868+ "2: movq (%1), %%mm0\n"
27869+ " movntq %%mm0, (%2)\n"
27870+ " movq 8(%1), %%mm1\n"
27871+ " movntq %%mm1, 8(%2)\n"
27872+ " movq 16(%1), %%mm2\n"
27873+ " movntq %%mm2, 16(%2)\n"
27874+ " movq 24(%1), %%mm3\n"
27875+ " movntq %%mm3, 24(%2)\n"
27876+ " movq 32(%1), %%mm4\n"
27877+ " movntq %%mm4, 32(%2)\n"
27878+ " movq 40(%1), %%mm5\n"
27879+ " movntq %%mm5, 40(%2)\n"
27880+ " movq 48(%1), %%mm6\n"
27881+ " movntq %%mm6, 48(%2)\n"
27882+ " movq 56(%1), %%mm7\n"
27883+ " movntq %%mm7, 56(%2)\n"
27884 ".section .fixup, \"ax\"\n"
27885- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27886+ "3:\n"
27887+
27888+#ifdef CONFIG_PAX_KERNEXEC
27889+ " movl %%cr0, %0\n"
27890+ " movl %0, %%eax\n"
27891+ " andl $0xFFFEFFFF, %%eax\n"
27892+ " movl %%eax, %%cr0\n"
27893+#endif
27894+
27895+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27896+
27897+#ifdef CONFIG_PAX_KERNEXEC
27898+ " movl %0, %%cr0\n"
27899+#endif
27900+
27901 " jmp 2b\n"
27902 ".previous\n"
27903- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
27904+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
27905
27906 from += 64;
27907 to += 64;
27908@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
27909 static void fast_copy_page(void *to, void *from)
27910 {
27911 int i;
27912+ unsigned long cr0;
27913
27914 kernel_fpu_begin();
27915
27916 __asm__ __volatile__ (
27917- "1: prefetch (%0)\n"
27918- " prefetch 64(%0)\n"
27919- " prefetch 128(%0)\n"
27920- " prefetch 192(%0)\n"
27921- " prefetch 256(%0)\n"
27922+ "1: prefetch (%1)\n"
27923+ " prefetch 64(%1)\n"
27924+ " prefetch 128(%1)\n"
27925+ " prefetch 192(%1)\n"
27926+ " prefetch 256(%1)\n"
27927 "2: \n"
27928 ".section .fixup, \"ax\"\n"
27929- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27930+ "3: \n"
27931+
27932+#ifdef CONFIG_PAX_KERNEXEC
27933+ " movl %%cr0, %0\n"
27934+ " movl %0, %%eax\n"
27935+ " andl $0xFFFEFFFF, %%eax\n"
27936+ " movl %%eax, %%cr0\n"
27937+#endif
27938+
27939+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27940+
27941+#ifdef CONFIG_PAX_KERNEXEC
27942+ " movl %0, %%cr0\n"
27943+#endif
27944+
27945 " jmp 2b\n"
27946 ".previous\n"
27947- _ASM_EXTABLE(1b, 3b) : : "r" (from));
27948+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
27949
27950 for (i = 0; i < 4096/64; i++) {
27951 __asm__ __volatile__ (
27952- "1: prefetch 320(%0)\n"
27953- "2: movq (%0), %%mm0\n"
27954- " movq 8(%0), %%mm1\n"
27955- " movq 16(%0), %%mm2\n"
27956- " movq 24(%0), %%mm3\n"
27957- " movq %%mm0, (%1)\n"
27958- " movq %%mm1, 8(%1)\n"
27959- " movq %%mm2, 16(%1)\n"
27960- " movq %%mm3, 24(%1)\n"
27961- " movq 32(%0), %%mm0\n"
27962- " movq 40(%0), %%mm1\n"
27963- " movq 48(%0), %%mm2\n"
27964- " movq 56(%0), %%mm3\n"
27965- " movq %%mm0, 32(%1)\n"
27966- " movq %%mm1, 40(%1)\n"
27967- " movq %%mm2, 48(%1)\n"
27968- " movq %%mm3, 56(%1)\n"
27969+ "1: prefetch 320(%1)\n"
27970+ "2: movq (%1), %%mm0\n"
27971+ " movq 8(%1), %%mm1\n"
27972+ " movq 16(%1), %%mm2\n"
27973+ " movq 24(%1), %%mm3\n"
27974+ " movq %%mm0, (%2)\n"
27975+ " movq %%mm1, 8(%2)\n"
27976+ " movq %%mm2, 16(%2)\n"
27977+ " movq %%mm3, 24(%2)\n"
27978+ " movq 32(%1), %%mm0\n"
27979+ " movq 40(%1), %%mm1\n"
27980+ " movq 48(%1), %%mm2\n"
27981+ " movq 56(%1), %%mm3\n"
27982+ " movq %%mm0, 32(%2)\n"
27983+ " movq %%mm1, 40(%2)\n"
27984+ " movq %%mm2, 48(%2)\n"
27985+ " movq %%mm3, 56(%2)\n"
27986 ".section .fixup, \"ax\"\n"
27987- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27988+ "3:\n"
27989+
27990+#ifdef CONFIG_PAX_KERNEXEC
27991+ " movl %%cr0, %0\n"
27992+ " movl %0, %%eax\n"
27993+ " andl $0xFFFEFFFF, %%eax\n"
27994+ " movl %%eax, %%cr0\n"
27995+#endif
27996+
27997+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27998+
27999+#ifdef CONFIG_PAX_KERNEXEC
28000+ " movl %0, %%cr0\n"
28001+#endif
28002+
28003 " jmp 2b\n"
28004 ".previous\n"
28005 _ASM_EXTABLE(1b, 3b)
28006- : : "r" (from), "r" (to) : "memory");
28007+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28008
28009 from += 64;
28010 to += 64;
28011diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
28012index f6d13ee..aca5f0b 100644
28013--- a/arch/x86/lib/msr-reg.S
28014+++ b/arch/x86/lib/msr-reg.S
28015@@ -3,6 +3,7 @@
28016 #include <asm/dwarf2.h>
28017 #include <asm/asm.h>
28018 #include <asm/msr.h>
28019+#include <asm/alternative-asm.h>
28020
28021 #ifdef CONFIG_X86_64
28022 /*
28023@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
28024 CFI_STARTPROC
28025 pushq_cfi %rbx
28026 pushq_cfi %rbp
28027- movq %rdi, %r10 /* Save pointer */
28028+ movq %rdi, %r9 /* Save pointer */
28029 xorl %r11d, %r11d /* Return value */
28030 movl (%rdi), %eax
28031 movl 4(%rdi), %ecx
28032@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
28033 movl 28(%rdi), %edi
28034 CFI_REMEMBER_STATE
28035 1: \op
28036-2: movl %eax, (%r10)
28037+2: movl %eax, (%r9)
28038 movl %r11d, %eax /* Return value */
28039- movl %ecx, 4(%r10)
28040- movl %edx, 8(%r10)
28041- movl %ebx, 12(%r10)
28042- movl %ebp, 20(%r10)
28043- movl %esi, 24(%r10)
28044- movl %edi, 28(%r10)
28045+ movl %ecx, 4(%r9)
28046+ movl %edx, 8(%r9)
28047+ movl %ebx, 12(%r9)
28048+ movl %ebp, 20(%r9)
28049+ movl %esi, 24(%r9)
28050+ movl %edi, 28(%r9)
28051 popq_cfi %rbp
28052 popq_cfi %rbx
28053+ pax_force_retaddr
28054 ret
28055 3:
28056 CFI_RESTORE_STATE
28057diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
28058index fc6ba17..d4d989d 100644
28059--- a/arch/x86/lib/putuser.S
28060+++ b/arch/x86/lib/putuser.S
28061@@ -16,7 +16,9 @@
28062 #include <asm/errno.h>
28063 #include <asm/asm.h>
28064 #include <asm/smap.h>
28065-
28066+#include <asm/segment.h>
28067+#include <asm/pgtable.h>
28068+#include <asm/alternative-asm.h>
28069
28070 /*
28071 * __put_user_X
28072@@ -30,57 +32,125 @@
28073 * as they get called from within inline assembly.
28074 */
28075
28076-#define ENTER CFI_STARTPROC ; \
28077- GET_THREAD_INFO(%_ASM_BX)
28078-#define EXIT ASM_CLAC ; \
28079- ret ; \
28080+#define ENTER CFI_STARTPROC
28081+#define EXIT ASM_CLAC ; \
28082+ pax_force_retaddr ; \
28083+ ret ; \
28084 CFI_ENDPROC
28085
28086+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28087+#define _DEST %_ASM_CX,%_ASM_BX
28088+#else
28089+#define _DEST %_ASM_CX
28090+#endif
28091+
28092+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28093+#define __copyuser_seg gs;
28094+#else
28095+#define __copyuser_seg
28096+#endif
28097+
28098 .text
28099 ENTRY(__put_user_1)
28100 ENTER
28101+
28102+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28103+ GET_THREAD_INFO(%_ASM_BX)
28104 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
28105 jae bad_put_user
28106 ASM_STAC
28107-1: movb %al,(%_ASM_CX)
28108+
28109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28110+ mov pax_user_shadow_base,%_ASM_BX
28111+ cmp %_ASM_BX,%_ASM_CX
28112+ jb 1234f
28113+ xor %ebx,%ebx
28114+1234:
28115+#endif
28116+
28117+#endif
28118+
28119+1: __copyuser_seg movb %al,(_DEST)
28120 xor %eax,%eax
28121 EXIT
28122 ENDPROC(__put_user_1)
28123
28124 ENTRY(__put_user_2)
28125 ENTER
28126+
28127+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28128+ GET_THREAD_INFO(%_ASM_BX)
28129 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28130 sub $1,%_ASM_BX
28131 cmp %_ASM_BX,%_ASM_CX
28132 jae bad_put_user
28133 ASM_STAC
28134-2: movw %ax,(%_ASM_CX)
28135+
28136+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28137+ mov pax_user_shadow_base,%_ASM_BX
28138+ cmp %_ASM_BX,%_ASM_CX
28139+ jb 1234f
28140+ xor %ebx,%ebx
28141+1234:
28142+#endif
28143+
28144+#endif
28145+
28146+2: __copyuser_seg movw %ax,(_DEST)
28147 xor %eax,%eax
28148 EXIT
28149 ENDPROC(__put_user_2)
28150
28151 ENTRY(__put_user_4)
28152 ENTER
28153+
28154+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28155+ GET_THREAD_INFO(%_ASM_BX)
28156 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28157 sub $3,%_ASM_BX
28158 cmp %_ASM_BX,%_ASM_CX
28159 jae bad_put_user
28160 ASM_STAC
28161-3: movl %eax,(%_ASM_CX)
28162+
28163+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28164+ mov pax_user_shadow_base,%_ASM_BX
28165+ cmp %_ASM_BX,%_ASM_CX
28166+ jb 1234f
28167+ xor %ebx,%ebx
28168+1234:
28169+#endif
28170+
28171+#endif
28172+
28173+3: __copyuser_seg movl %eax,(_DEST)
28174 xor %eax,%eax
28175 EXIT
28176 ENDPROC(__put_user_4)
28177
28178 ENTRY(__put_user_8)
28179 ENTER
28180+
28181+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28182+ GET_THREAD_INFO(%_ASM_BX)
28183 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28184 sub $7,%_ASM_BX
28185 cmp %_ASM_BX,%_ASM_CX
28186 jae bad_put_user
28187 ASM_STAC
28188-4: mov %_ASM_AX,(%_ASM_CX)
28189+
28190+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28191+ mov pax_user_shadow_base,%_ASM_BX
28192+ cmp %_ASM_BX,%_ASM_CX
28193+ jb 1234f
28194+ xor %ebx,%ebx
28195+1234:
28196+#endif
28197+
28198+#endif
28199+
28200+4: __copyuser_seg mov %_ASM_AX,(_DEST)
28201 #ifdef CONFIG_X86_32
28202-5: movl %edx,4(%_ASM_CX)
28203+5: __copyuser_seg movl %edx,4(_DEST)
28204 #endif
28205 xor %eax,%eax
28206 EXIT
28207diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
28208index 1cad221..de671ee 100644
28209--- a/arch/x86/lib/rwlock.S
28210+++ b/arch/x86/lib/rwlock.S
28211@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
28212 FRAME
28213 0: LOCK_PREFIX
28214 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
28215+
28216+#ifdef CONFIG_PAX_REFCOUNT
28217+ jno 1234f
28218+ LOCK_PREFIX
28219+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
28220+ int $4
28221+1234:
28222+ _ASM_EXTABLE(1234b, 1234b)
28223+#endif
28224+
28225 1: rep; nop
28226 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
28227 jne 1b
28228 LOCK_PREFIX
28229 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
28230+
28231+#ifdef CONFIG_PAX_REFCOUNT
28232+ jno 1234f
28233+ LOCK_PREFIX
28234+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
28235+ int $4
28236+1234:
28237+ _ASM_EXTABLE(1234b, 1234b)
28238+#endif
28239+
28240 jnz 0b
28241 ENDFRAME
28242+ pax_force_retaddr
28243 ret
28244 CFI_ENDPROC
28245 END(__write_lock_failed)
28246@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
28247 FRAME
28248 0: LOCK_PREFIX
28249 READ_LOCK_SIZE(inc) (%__lock_ptr)
28250+
28251+#ifdef CONFIG_PAX_REFCOUNT
28252+ jno 1234f
28253+ LOCK_PREFIX
28254+ READ_LOCK_SIZE(dec) (%__lock_ptr)
28255+ int $4
28256+1234:
28257+ _ASM_EXTABLE(1234b, 1234b)
28258+#endif
28259+
28260 1: rep; nop
28261 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
28262 js 1b
28263 LOCK_PREFIX
28264 READ_LOCK_SIZE(dec) (%__lock_ptr)
28265+
28266+#ifdef CONFIG_PAX_REFCOUNT
28267+ jno 1234f
28268+ LOCK_PREFIX
28269+ READ_LOCK_SIZE(inc) (%__lock_ptr)
28270+ int $4
28271+1234:
28272+ _ASM_EXTABLE(1234b, 1234b)
28273+#endif
28274+
28275 js 0b
28276 ENDFRAME
28277+ pax_force_retaddr
28278 ret
28279 CFI_ENDPROC
28280 END(__read_lock_failed)
28281diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
28282index 5dff5f0..cadebf4 100644
28283--- a/arch/x86/lib/rwsem.S
28284+++ b/arch/x86/lib/rwsem.S
28285@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
28286 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
28287 CFI_RESTORE __ASM_REG(dx)
28288 restore_common_regs
28289+ pax_force_retaddr
28290 ret
28291 CFI_ENDPROC
28292 ENDPROC(call_rwsem_down_read_failed)
28293@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
28294 movq %rax,%rdi
28295 call rwsem_down_write_failed
28296 restore_common_regs
28297+ pax_force_retaddr
28298 ret
28299 CFI_ENDPROC
28300 ENDPROC(call_rwsem_down_write_failed)
28301@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
28302 movq %rax,%rdi
28303 call rwsem_wake
28304 restore_common_regs
28305-1: ret
28306+1: pax_force_retaddr
28307+ ret
28308 CFI_ENDPROC
28309 ENDPROC(call_rwsem_wake)
28310
28311@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
28312 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
28313 CFI_RESTORE __ASM_REG(dx)
28314 restore_common_regs
28315+ pax_force_retaddr
28316 ret
28317 CFI_ENDPROC
28318 ENDPROC(call_rwsem_downgrade_wake)
28319diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
28320index a63efd6..ccecad8 100644
28321--- a/arch/x86/lib/thunk_64.S
28322+++ b/arch/x86/lib/thunk_64.S
28323@@ -8,6 +8,7 @@
28324 #include <linux/linkage.h>
28325 #include <asm/dwarf2.h>
28326 #include <asm/calling.h>
28327+#include <asm/alternative-asm.h>
28328
28329 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
28330 .macro THUNK name, func, put_ret_addr_in_rdi=0
28331@@ -41,5 +42,6 @@
28332 SAVE_ARGS
28333 restore:
28334 RESTORE_ARGS
28335+ pax_force_retaddr
28336 ret
28337 CFI_ENDPROC
28338diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
28339index 3eb18ac..6890bc3 100644
28340--- a/arch/x86/lib/usercopy_32.c
28341+++ b/arch/x86/lib/usercopy_32.c
28342@@ -42,11 +42,13 @@ do { \
28343 int __d0; \
28344 might_fault(); \
28345 __asm__ __volatile__( \
28346+ __COPYUSER_SET_ES \
28347 ASM_STAC "\n" \
28348 "0: rep; stosl\n" \
28349 " movl %2,%0\n" \
28350 "1: rep; stosb\n" \
28351 "2: " ASM_CLAC "\n" \
28352+ __COPYUSER_RESTORE_ES \
28353 ".section .fixup,\"ax\"\n" \
28354 "3: lea 0(%2,%0,4),%0\n" \
28355 " jmp 2b\n" \
28356@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
28357
28358 #ifdef CONFIG_X86_INTEL_USERCOPY
28359 static unsigned long
28360-__copy_user_intel(void __user *to, const void *from, unsigned long size)
28361+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
28362 {
28363 int d0, d1;
28364 __asm__ __volatile__(
28365@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
28366 " .align 2,0x90\n"
28367 "3: movl 0(%4), %%eax\n"
28368 "4: movl 4(%4), %%edx\n"
28369- "5: movl %%eax, 0(%3)\n"
28370- "6: movl %%edx, 4(%3)\n"
28371+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
28372+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
28373 "7: movl 8(%4), %%eax\n"
28374 "8: movl 12(%4),%%edx\n"
28375- "9: movl %%eax, 8(%3)\n"
28376- "10: movl %%edx, 12(%3)\n"
28377+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
28378+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
28379 "11: movl 16(%4), %%eax\n"
28380 "12: movl 20(%4), %%edx\n"
28381- "13: movl %%eax, 16(%3)\n"
28382- "14: movl %%edx, 20(%3)\n"
28383+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
28384+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
28385 "15: movl 24(%4), %%eax\n"
28386 "16: movl 28(%4), %%edx\n"
28387- "17: movl %%eax, 24(%3)\n"
28388- "18: movl %%edx, 28(%3)\n"
28389+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
28390+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
28391 "19: movl 32(%4), %%eax\n"
28392 "20: movl 36(%4), %%edx\n"
28393- "21: movl %%eax, 32(%3)\n"
28394- "22: movl %%edx, 36(%3)\n"
28395+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
28396+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
28397 "23: movl 40(%4), %%eax\n"
28398 "24: movl 44(%4), %%edx\n"
28399- "25: movl %%eax, 40(%3)\n"
28400- "26: movl %%edx, 44(%3)\n"
28401+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
28402+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
28403 "27: movl 48(%4), %%eax\n"
28404 "28: movl 52(%4), %%edx\n"
28405- "29: movl %%eax, 48(%3)\n"
28406- "30: movl %%edx, 52(%3)\n"
28407+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
28408+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
28409 "31: movl 56(%4), %%eax\n"
28410 "32: movl 60(%4), %%edx\n"
28411- "33: movl %%eax, 56(%3)\n"
28412- "34: movl %%edx, 60(%3)\n"
28413+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
28414+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
28415 " addl $-64, %0\n"
28416 " addl $64, %4\n"
28417 " addl $64, %3\n"
28418@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
28419 " shrl $2, %0\n"
28420 " andl $3, %%eax\n"
28421 " cld\n"
28422+ __COPYUSER_SET_ES
28423 "99: rep; movsl\n"
28424 "36: movl %%eax, %0\n"
28425 "37: rep; movsb\n"
28426 "100:\n"
28427+ __COPYUSER_RESTORE_ES
28428 ".section .fixup,\"ax\"\n"
28429 "101: lea 0(%%eax,%0,4),%0\n"
28430 " jmp 100b\n"
28431@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
28432 }
28433
28434 static unsigned long
28435+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
28436+{
28437+ int d0, d1;
28438+ __asm__ __volatile__(
28439+ " .align 2,0x90\n"
28440+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
28441+ " cmpl $67, %0\n"
28442+ " jbe 3f\n"
28443+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
28444+ " .align 2,0x90\n"
28445+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
28446+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
28447+ "5: movl %%eax, 0(%3)\n"
28448+ "6: movl %%edx, 4(%3)\n"
28449+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
28450+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
28451+ "9: movl %%eax, 8(%3)\n"
28452+ "10: movl %%edx, 12(%3)\n"
28453+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
28454+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
28455+ "13: movl %%eax, 16(%3)\n"
28456+ "14: movl %%edx, 20(%3)\n"
28457+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
28458+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
28459+ "17: movl %%eax, 24(%3)\n"
28460+ "18: movl %%edx, 28(%3)\n"
28461+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
28462+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
28463+ "21: movl %%eax, 32(%3)\n"
28464+ "22: movl %%edx, 36(%3)\n"
28465+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
28466+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
28467+ "25: movl %%eax, 40(%3)\n"
28468+ "26: movl %%edx, 44(%3)\n"
28469+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
28470+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
28471+ "29: movl %%eax, 48(%3)\n"
28472+ "30: movl %%edx, 52(%3)\n"
28473+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
28474+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
28475+ "33: movl %%eax, 56(%3)\n"
28476+ "34: movl %%edx, 60(%3)\n"
28477+ " addl $-64, %0\n"
28478+ " addl $64, %4\n"
28479+ " addl $64, %3\n"
28480+ " cmpl $63, %0\n"
28481+ " ja 1b\n"
28482+ "35: movl %0, %%eax\n"
28483+ " shrl $2, %0\n"
28484+ " andl $3, %%eax\n"
28485+ " cld\n"
28486+ "99: rep; "__copyuser_seg" movsl\n"
28487+ "36: movl %%eax, %0\n"
28488+ "37: rep; "__copyuser_seg" movsb\n"
28489+ "100:\n"
28490+ ".section .fixup,\"ax\"\n"
28491+ "101: lea 0(%%eax,%0,4),%0\n"
28492+ " jmp 100b\n"
28493+ ".previous\n"
28494+ _ASM_EXTABLE(1b,100b)
28495+ _ASM_EXTABLE(2b,100b)
28496+ _ASM_EXTABLE(3b,100b)
28497+ _ASM_EXTABLE(4b,100b)
28498+ _ASM_EXTABLE(5b,100b)
28499+ _ASM_EXTABLE(6b,100b)
28500+ _ASM_EXTABLE(7b,100b)
28501+ _ASM_EXTABLE(8b,100b)
28502+ _ASM_EXTABLE(9b,100b)
28503+ _ASM_EXTABLE(10b,100b)
28504+ _ASM_EXTABLE(11b,100b)
28505+ _ASM_EXTABLE(12b,100b)
28506+ _ASM_EXTABLE(13b,100b)
28507+ _ASM_EXTABLE(14b,100b)
28508+ _ASM_EXTABLE(15b,100b)
28509+ _ASM_EXTABLE(16b,100b)
28510+ _ASM_EXTABLE(17b,100b)
28511+ _ASM_EXTABLE(18b,100b)
28512+ _ASM_EXTABLE(19b,100b)
28513+ _ASM_EXTABLE(20b,100b)
28514+ _ASM_EXTABLE(21b,100b)
28515+ _ASM_EXTABLE(22b,100b)
28516+ _ASM_EXTABLE(23b,100b)
28517+ _ASM_EXTABLE(24b,100b)
28518+ _ASM_EXTABLE(25b,100b)
28519+ _ASM_EXTABLE(26b,100b)
28520+ _ASM_EXTABLE(27b,100b)
28521+ _ASM_EXTABLE(28b,100b)
28522+ _ASM_EXTABLE(29b,100b)
28523+ _ASM_EXTABLE(30b,100b)
28524+ _ASM_EXTABLE(31b,100b)
28525+ _ASM_EXTABLE(32b,100b)
28526+ _ASM_EXTABLE(33b,100b)
28527+ _ASM_EXTABLE(34b,100b)
28528+ _ASM_EXTABLE(35b,100b)
28529+ _ASM_EXTABLE(36b,100b)
28530+ _ASM_EXTABLE(37b,100b)
28531+ _ASM_EXTABLE(99b,101b)
28532+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
28533+ : "1"(to), "2"(from), "0"(size)
28534+ : "eax", "edx", "memory");
28535+ return size;
28536+}
28537+
28538+static unsigned long __size_overflow(3)
28539 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
28540 {
28541 int d0, d1;
28542 __asm__ __volatile__(
28543 " .align 2,0x90\n"
28544- "0: movl 32(%4), %%eax\n"
28545+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
28546 " cmpl $67, %0\n"
28547 " jbe 2f\n"
28548- "1: movl 64(%4), %%eax\n"
28549+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
28550 " .align 2,0x90\n"
28551- "2: movl 0(%4), %%eax\n"
28552- "21: movl 4(%4), %%edx\n"
28553+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
28554+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
28555 " movl %%eax, 0(%3)\n"
28556 " movl %%edx, 4(%3)\n"
28557- "3: movl 8(%4), %%eax\n"
28558- "31: movl 12(%4),%%edx\n"
28559+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
28560+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
28561 " movl %%eax, 8(%3)\n"
28562 " movl %%edx, 12(%3)\n"
28563- "4: movl 16(%4), %%eax\n"
28564- "41: movl 20(%4), %%edx\n"
28565+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
28566+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
28567 " movl %%eax, 16(%3)\n"
28568 " movl %%edx, 20(%3)\n"
28569- "10: movl 24(%4), %%eax\n"
28570- "51: movl 28(%4), %%edx\n"
28571+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
28572+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
28573 " movl %%eax, 24(%3)\n"
28574 " movl %%edx, 28(%3)\n"
28575- "11: movl 32(%4), %%eax\n"
28576- "61: movl 36(%4), %%edx\n"
28577+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
28578+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
28579 " movl %%eax, 32(%3)\n"
28580 " movl %%edx, 36(%3)\n"
28581- "12: movl 40(%4), %%eax\n"
28582- "71: movl 44(%4), %%edx\n"
28583+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
28584+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
28585 " movl %%eax, 40(%3)\n"
28586 " movl %%edx, 44(%3)\n"
28587- "13: movl 48(%4), %%eax\n"
28588- "81: movl 52(%4), %%edx\n"
28589+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
28590+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
28591 " movl %%eax, 48(%3)\n"
28592 " movl %%edx, 52(%3)\n"
28593- "14: movl 56(%4), %%eax\n"
28594- "91: movl 60(%4), %%edx\n"
28595+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
28596+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
28597 " movl %%eax, 56(%3)\n"
28598 " movl %%edx, 60(%3)\n"
28599 " addl $-64, %0\n"
28600@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
28601 " shrl $2, %0\n"
28602 " andl $3, %%eax\n"
28603 " cld\n"
28604- "6: rep; movsl\n"
28605+ "6: rep; "__copyuser_seg" movsl\n"
28606 " movl %%eax,%0\n"
28607- "7: rep; movsb\n"
28608+ "7: rep; "__copyuser_seg" movsb\n"
28609 "8:\n"
28610 ".section .fixup,\"ax\"\n"
28611 "9: lea 0(%%eax,%0,4),%0\n"
28612@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
28613 * hyoshiok@miraclelinux.com
28614 */
28615
28616-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
28617+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
28618 const void __user *from, unsigned long size)
28619 {
28620 int d0, d1;
28621
28622 __asm__ __volatile__(
28623 " .align 2,0x90\n"
28624- "0: movl 32(%4), %%eax\n"
28625+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
28626 " cmpl $67, %0\n"
28627 " jbe 2f\n"
28628- "1: movl 64(%4), %%eax\n"
28629+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
28630 " .align 2,0x90\n"
28631- "2: movl 0(%4), %%eax\n"
28632- "21: movl 4(%4), %%edx\n"
28633+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
28634+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
28635 " movnti %%eax, 0(%3)\n"
28636 " movnti %%edx, 4(%3)\n"
28637- "3: movl 8(%4), %%eax\n"
28638- "31: movl 12(%4),%%edx\n"
28639+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
28640+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
28641 " movnti %%eax, 8(%3)\n"
28642 " movnti %%edx, 12(%3)\n"
28643- "4: movl 16(%4), %%eax\n"
28644- "41: movl 20(%4), %%edx\n"
28645+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
28646+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
28647 " movnti %%eax, 16(%3)\n"
28648 " movnti %%edx, 20(%3)\n"
28649- "10: movl 24(%4), %%eax\n"
28650- "51: movl 28(%4), %%edx\n"
28651+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
28652+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
28653 " movnti %%eax, 24(%3)\n"
28654 " movnti %%edx, 28(%3)\n"
28655- "11: movl 32(%4), %%eax\n"
28656- "61: movl 36(%4), %%edx\n"
28657+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
28658+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
28659 " movnti %%eax, 32(%3)\n"
28660 " movnti %%edx, 36(%3)\n"
28661- "12: movl 40(%4), %%eax\n"
28662- "71: movl 44(%4), %%edx\n"
28663+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
28664+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
28665 " movnti %%eax, 40(%3)\n"
28666 " movnti %%edx, 44(%3)\n"
28667- "13: movl 48(%4), %%eax\n"
28668- "81: movl 52(%4), %%edx\n"
28669+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
28670+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
28671 " movnti %%eax, 48(%3)\n"
28672 " movnti %%edx, 52(%3)\n"
28673- "14: movl 56(%4), %%eax\n"
28674- "91: movl 60(%4), %%edx\n"
28675+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
28676+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
28677 " movnti %%eax, 56(%3)\n"
28678 " movnti %%edx, 60(%3)\n"
28679 " addl $-64, %0\n"
28680@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
28681 " shrl $2, %0\n"
28682 " andl $3, %%eax\n"
28683 " cld\n"
28684- "6: rep; movsl\n"
28685+ "6: rep; "__copyuser_seg" movsl\n"
28686 " movl %%eax,%0\n"
28687- "7: rep; movsb\n"
28688+ "7: rep; "__copyuser_seg" movsb\n"
28689 "8:\n"
28690 ".section .fixup,\"ax\"\n"
28691 "9: lea 0(%%eax,%0,4),%0\n"
28692@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
28693 return size;
28694 }
28695
28696-static unsigned long __copy_user_intel_nocache(void *to,
28697+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
28698 const void __user *from, unsigned long size)
28699 {
28700 int d0, d1;
28701
28702 __asm__ __volatile__(
28703 " .align 2,0x90\n"
28704- "0: movl 32(%4), %%eax\n"
28705+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
28706 " cmpl $67, %0\n"
28707 " jbe 2f\n"
28708- "1: movl 64(%4), %%eax\n"
28709+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
28710 " .align 2,0x90\n"
28711- "2: movl 0(%4), %%eax\n"
28712- "21: movl 4(%4), %%edx\n"
28713+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
28714+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
28715 " movnti %%eax, 0(%3)\n"
28716 " movnti %%edx, 4(%3)\n"
28717- "3: movl 8(%4), %%eax\n"
28718- "31: movl 12(%4),%%edx\n"
28719+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
28720+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
28721 " movnti %%eax, 8(%3)\n"
28722 " movnti %%edx, 12(%3)\n"
28723- "4: movl 16(%4), %%eax\n"
28724- "41: movl 20(%4), %%edx\n"
28725+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
28726+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
28727 " movnti %%eax, 16(%3)\n"
28728 " movnti %%edx, 20(%3)\n"
28729- "10: movl 24(%4), %%eax\n"
28730- "51: movl 28(%4), %%edx\n"
28731+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
28732+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
28733 " movnti %%eax, 24(%3)\n"
28734 " movnti %%edx, 28(%3)\n"
28735- "11: movl 32(%4), %%eax\n"
28736- "61: movl 36(%4), %%edx\n"
28737+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
28738+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
28739 " movnti %%eax, 32(%3)\n"
28740 " movnti %%edx, 36(%3)\n"
28741- "12: movl 40(%4), %%eax\n"
28742- "71: movl 44(%4), %%edx\n"
28743+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
28744+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
28745 " movnti %%eax, 40(%3)\n"
28746 " movnti %%edx, 44(%3)\n"
28747- "13: movl 48(%4), %%eax\n"
28748- "81: movl 52(%4), %%edx\n"
28749+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
28750+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
28751 " movnti %%eax, 48(%3)\n"
28752 " movnti %%edx, 52(%3)\n"
28753- "14: movl 56(%4), %%eax\n"
28754- "91: movl 60(%4), %%edx\n"
28755+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
28756+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
28757 " movnti %%eax, 56(%3)\n"
28758 " movnti %%edx, 60(%3)\n"
28759 " addl $-64, %0\n"
28760@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
28761 " shrl $2, %0\n"
28762 " andl $3, %%eax\n"
28763 " cld\n"
28764- "6: rep; movsl\n"
28765+ "6: rep; "__copyuser_seg" movsl\n"
28766 " movl %%eax,%0\n"
28767- "7: rep; movsb\n"
28768+ "7: rep; "__copyuser_seg" movsb\n"
28769 "8:\n"
28770 ".section .fixup,\"ax\"\n"
28771 "9: lea 0(%%eax,%0,4),%0\n"
28772@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
28773 */
28774 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
28775 unsigned long size);
28776-unsigned long __copy_user_intel(void __user *to, const void *from,
28777+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
28778+ unsigned long size);
28779+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
28780 unsigned long size);
28781 unsigned long __copy_user_zeroing_intel_nocache(void *to,
28782 const void __user *from, unsigned long size);
28783 #endif /* CONFIG_X86_INTEL_USERCOPY */
28784
28785 /* Generic arbitrary sized copy. */
28786-#define __copy_user(to, from, size) \
28787+#define __copy_user(to, from, size, prefix, set, restore) \
28788 do { \
28789 int __d0, __d1, __d2; \
28790 __asm__ __volatile__( \
28791+ set \
28792 " cmp $7,%0\n" \
28793 " jbe 1f\n" \
28794 " movl %1,%0\n" \
28795 " negl %0\n" \
28796 " andl $7,%0\n" \
28797 " subl %0,%3\n" \
28798- "4: rep; movsb\n" \
28799+ "4: rep; "prefix"movsb\n" \
28800 " movl %3,%0\n" \
28801 " shrl $2,%0\n" \
28802 " andl $3,%3\n" \
28803 " .align 2,0x90\n" \
28804- "0: rep; movsl\n" \
28805+ "0: rep; "prefix"movsl\n" \
28806 " movl %3,%0\n" \
28807- "1: rep; movsb\n" \
28808+ "1: rep; "prefix"movsb\n" \
28809 "2:\n" \
28810+ restore \
28811 ".section .fixup,\"ax\"\n" \
28812 "5: addl %3,%0\n" \
28813 " jmp 2b\n" \
28814@@ -538,14 +650,14 @@ do { \
28815 " negl %0\n" \
28816 " andl $7,%0\n" \
28817 " subl %0,%3\n" \
28818- "4: rep; movsb\n" \
28819+ "4: rep; "__copyuser_seg"movsb\n" \
28820 " movl %3,%0\n" \
28821 " shrl $2,%0\n" \
28822 " andl $3,%3\n" \
28823 " .align 2,0x90\n" \
28824- "0: rep; movsl\n" \
28825+ "0: rep; "__copyuser_seg"movsl\n" \
28826 " movl %3,%0\n" \
28827- "1: rep; movsb\n" \
28828+ "1: rep; "__copyuser_seg"movsb\n" \
28829 "2:\n" \
28830 ".section .fixup,\"ax\"\n" \
28831 "5: addl %3,%0\n" \
28832@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
28833 {
28834 stac();
28835 if (movsl_is_ok(to, from, n))
28836- __copy_user(to, from, n);
28837+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
28838 else
28839- n = __copy_user_intel(to, from, n);
28840+ n = __generic_copy_to_user_intel(to, from, n);
28841 clac();
28842 return n;
28843 }
28844@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
28845 {
28846 stac();
28847 if (movsl_is_ok(to, from, n))
28848- __copy_user(to, from, n);
28849+ __copy_user(to, from, n, __copyuser_seg, "", "");
28850 else
28851- n = __copy_user_intel((void __user *)to,
28852- (const void *)from, n);
28853+ n = __generic_copy_from_user_intel(to, from, n);
28854 clac();
28855 return n;
28856 }
28857@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
28858 if (n > 64 && cpu_has_xmm2)
28859 n = __copy_user_intel_nocache(to, from, n);
28860 else
28861- __copy_user(to, from, n);
28862+ __copy_user(to, from, n, __copyuser_seg, "", "");
28863 #else
28864- __copy_user(to, from, n);
28865+ __copy_user(to, from, n, __copyuser_seg, "", "");
28866 #endif
28867 clac();
28868 return n;
28869 }
28870 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
28871
28872-/**
28873- * copy_to_user: - Copy a block of data into user space.
28874- * @to: Destination address, in user space.
28875- * @from: Source address, in kernel space.
28876- * @n: Number of bytes to copy.
28877- *
28878- * Context: User context only. This function may sleep.
28879- *
28880- * Copy data from kernel space to user space.
28881- *
28882- * Returns number of bytes that could not be copied.
28883- * On success, this will be zero.
28884- */
28885-unsigned long
28886-copy_to_user(void __user *to, const void *from, unsigned long n)
28887+#ifdef CONFIG_PAX_MEMORY_UDEREF
28888+void __set_fs(mm_segment_t x)
28889 {
28890- if (access_ok(VERIFY_WRITE, to, n))
28891- n = __copy_to_user(to, from, n);
28892- return n;
28893+ switch (x.seg) {
28894+ case 0:
28895+ loadsegment(gs, 0);
28896+ break;
28897+ case TASK_SIZE_MAX:
28898+ loadsegment(gs, __USER_DS);
28899+ break;
28900+ case -1UL:
28901+ loadsegment(gs, __KERNEL_DS);
28902+ break;
28903+ default:
28904+ BUG();
28905+ }
28906 }
28907-EXPORT_SYMBOL(copy_to_user);
28908+EXPORT_SYMBOL(__set_fs);
28909
28910-/**
28911- * copy_from_user: - Copy a block of data from user space.
28912- * @to: Destination address, in kernel space.
28913- * @from: Source address, in user space.
28914- * @n: Number of bytes to copy.
28915- *
28916- * Context: User context only. This function may sleep.
28917- *
28918- * Copy data from user space to kernel space.
28919- *
28920- * Returns number of bytes that could not be copied.
28921- * On success, this will be zero.
28922- *
28923- * If some data could not be copied, this function will pad the copied
28924- * data to the requested size using zero bytes.
28925- */
28926-unsigned long
28927-_copy_from_user(void *to, const void __user *from, unsigned long n)
28928+void set_fs(mm_segment_t x)
28929 {
28930- if (access_ok(VERIFY_READ, from, n))
28931- n = __copy_from_user(to, from, n);
28932- else
28933- memset(to, 0, n);
28934- return n;
28935+ current_thread_info()->addr_limit = x;
28936+ __set_fs(x);
28937 }
28938-EXPORT_SYMBOL(_copy_from_user);
28939+EXPORT_SYMBOL(set_fs);
28940+#endif
28941diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
28942index 906fea3..0194a18 100644
28943--- a/arch/x86/lib/usercopy_64.c
28944+++ b/arch/x86/lib/usercopy_64.c
28945@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
28946 might_fault();
28947 /* no memory constraint because it doesn't change any memory gcc knows
28948 about */
28949+ pax_open_userland();
28950 stac();
28951 asm volatile(
28952 " testq %[size8],%[size8]\n"
28953@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
28954 _ASM_EXTABLE(0b,3b)
28955 _ASM_EXTABLE(1b,2b)
28956 : [size8] "=&c"(size), [dst] "=&D" (__d0)
28957- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
28958+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
28959 [zero] "r" (0UL), [eight] "r" (8UL));
28960 clac();
28961+ pax_close_userland();
28962 return size;
28963 }
28964 EXPORT_SYMBOL(__clear_user);
28965@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
28966 }
28967 EXPORT_SYMBOL(clear_user);
28968
28969-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
28970+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
28971 {
28972- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
28973- return copy_user_generic((__force void *)to, (__force void *)from, len);
28974- }
28975- return len;
28976+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
28977+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
28978+ return len;
28979 }
28980 EXPORT_SYMBOL(copy_in_user);
28981
28982@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
28983 * it is not necessary to optimize tail handling.
28984 */
28985 unsigned long
28986-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
28987+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
28988 {
28989 char c;
28990 unsigned zero_len;
28991
28992+ clac();
28993+ pax_close_userland();
28994 for (; len; --len, to++) {
28995 if (__get_user_nocheck(c, from++, sizeof(char)))
28996 break;
28997@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
28998 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
28999 if (__put_user_nocheck(c, to++, sizeof(char)))
29000 break;
29001- clac();
29002 return len;
29003 }
29004diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
29005index 23d8e5f..9ccc13a 100644
29006--- a/arch/x86/mm/Makefile
29007+++ b/arch/x86/mm/Makefile
29008@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
29009 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
29010
29011 obj-$(CONFIG_MEMTEST) += memtest.o
29012+
29013+quote:="
29014+obj-$(CONFIG_X86_64) += uderef_64.o
29015+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
29016diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
29017index 903ec1e..c4166b2 100644
29018--- a/arch/x86/mm/extable.c
29019+++ b/arch/x86/mm/extable.c
29020@@ -6,12 +6,24 @@
29021 static inline unsigned long
29022 ex_insn_addr(const struct exception_table_entry *x)
29023 {
29024- return (unsigned long)&x->insn + x->insn;
29025+ unsigned long reloc = 0;
29026+
29027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29028+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29029+#endif
29030+
29031+ return (unsigned long)&x->insn + x->insn + reloc;
29032 }
29033 static inline unsigned long
29034 ex_fixup_addr(const struct exception_table_entry *x)
29035 {
29036- return (unsigned long)&x->fixup + x->fixup;
29037+ unsigned long reloc = 0;
29038+
29039+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29040+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29041+#endif
29042+
29043+ return (unsigned long)&x->fixup + x->fixup + reloc;
29044 }
29045
29046 int fixup_exception(struct pt_regs *regs)
29047@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
29048 unsigned long new_ip;
29049
29050 #ifdef CONFIG_PNPBIOS
29051- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
29052+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
29053 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
29054 extern u32 pnp_bios_is_utter_crap;
29055 pnp_bios_is_utter_crap = 1;
29056@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
29057 i += 4;
29058 p->fixup -= i;
29059 i += 4;
29060+
29061+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29062+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
29063+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29064+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29065+#endif
29066+
29067 }
29068 }
29069
29070diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
29071index 654be4a..a4a3da1 100644
29072--- a/arch/x86/mm/fault.c
29073+++ b/arch/x86/mm/fault.c
29074@@ -14,11 +14,18 @@
29075 #include <linux/hugetlb.h> /* hstate_index_to_shift */
29076 #include <linux/prefetch.h> /* prefetchw */
29077 #include <linux/context_tracking.h> /* exception_enter(), ... */
29078+#include <linux/unistd.h>
29079+#include <linux/compiler.h>
29080
29081 #include <asm/traps.h> /* dotraplinkage, ... */
29082 #include <asm/pgalloc.h> /* pgd_*(), ... */
29083 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
29084 #include <asm/fixmap.h> /* VSYSCALL_START */
29085+#include <asm/tlbflush.h>
29086+
29087+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29088+#include <asm/stacktrace.h>
29089+#endif
29090
29091 /*
29092 * Page fault error code bits:
29093@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
29094 int ret = 0;
29095
29096 /* kprobe_running() needs smp_processor_id() */
29097- if (kprobes_built_in() && !user_mode_vm(regs)) {
29098+ if (kprobes_built_in() && !user_mode(regs)) {
29099 preempt_disable();
29100 if (kprobe_running() && kprobe_fault_handler(regs, 14))
29101 ret = 1;
29102@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
29103 return !instr_lo || (instr_lo>>1) == 1;
29104 case 0x00:
29105 /* Prefetch instruction is 0x0F0D or 0x0F18 */
29106- if (probe_kernel_address(instr, opcode))
29107+ if (user_mode(regs)) {
29108+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29109+ return 0;
29110+ } else if (probe_kernel_address(instr, opcode))
29111 return 0;
29112
29113 *prefetch = (instr_lo == 0xF) &&
29114@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
29115 while (instr < max_instr) {
29116 unsigned char opcode;
29117
29118- if (probe_kernel_address(instr, opcode))
29119+ if (user_mode(regs)) {
29120+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29121+ break;
29122+ } else if (probe_kernel_address(instr, opcode))
29123 break;
29124
29125 instr++;
29126@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
29127 force_sig_info(si_signo, &info, tsk);
29128 }
29129
29130+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29131+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
29132+#endif
29133+
29134+#ifdef CONFIG_PAX_EMUTRAMP
29135+static int pax_handle_fetch_fault(struct pt_regs *regs);
29136+#endif
29137+
29138+#ifdef CONFIG_PAX_PAGEEXEC
29139+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
29140+{
29141+ pgd_t *pgd;
29142+ pud_t *pud;
29143+ pmd_t *pmd;
29144+
29145+ pgd = pgd_offset(mm, address);
29146+ if (!pgd_present(*pgd))
29147+ return NULL;
29148+ pud = pud_offset(pgd, address);
29149+ if (!pud_present(*pud))
29150+ return NULL;
29151+ pmd = pmd_offset(pud, address);
29152+ if (!pmd_present(*pmd))
29153+ return NULL;
29154+ return pmd;
29155+}
29156+#endif
29157+
29158 DEFINE_SPINLOCK(pgd_lock);
29159 LIST_HEAD(pgd_list);
29160
29161@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
29162 for (address = VMALLOC_START & PMD_MASK;
29163 address >= TASK_SIZE && address < FIXADDR_TOP;
29164 address += PMD_SIZE) {
29165+
29166+#ifdef CONFIG_PAX_PER_CPU_PGD
29167+ unsigned long cpu;
29168+#else
29169 struct page *page;
29170+#endif
29171
29172 spin_lock(&pgd_lock);
29173+
29174+#ifdef CONFIG_PAX_PER_CPU_PGD
29175+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29176+ pgd_t *pgd = get_cpu_pgd(cpu, user);
29177+ pmd_t *ret;
29178+
29179+ ret = vmalloc_sync_one(pgd, address);
29180+ if (!ret)
29181+ break;
29182+ pgd = get_cpu_pgd(cpu, kernel);
29183+#else
29184 list_for_each_entry(page, &pgd_list, lru) {
29185+ pgd_t *pgd;
29186 spinlock_t *pgt_lock;
29187 pmd_t *ret;
29188
29189@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
29190 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
29191
29192 spin_lock(pgt_lock);
29193- ret = vmalloc_sync_one(page_address(page), address);
29194+ pgd = page_address(page);
29195+#endif
29196+
29197+ ret = vmalloc_sync_one(pgd, address);
29198+
29199+#ifndef CONFIG_PAX_PER_CPU_PGD
29200 spin_unlock(pgt_lock);
29201+#endif
29202
29203 if (!ret)
29204 break;
29205@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
29206 * an interrupt in the middle of a task switch..
29207 */
29208 pgd_paddr = read_cr3();
29209+
29210+#ifdef CONFIG_PAX_PER_CPU_PGD
29211+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
29212+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
29213+#endif
29214+
29215 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
29216 if (!pmd_k)
29217 return -1;
29218@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
29219 * happen within a race in page table update. In the later
29220 * case just flush:
29221 */
29222- pgd = pgd_offset(current->active_mm, address);
29223+
29224 pgd_ref = pgd_offset_k(address);
29225 if (pgd_none(*pgd_ref))
29226 return -1;
29227
29228+#ifdef CONFIG_PAX_PER_CPU_PGD
29229+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
29230+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
29231+ if (pgd_none(*pgd)) {
29232+ set_pgd(pgd, *pgd_ref);
29233+ arch_flush_lazy_mmu_mode();
29234+ } else {
29235+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
29236+ }
29237+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
29238+#else
29239+ pgd = pgd_offset(current->active_mm, address);
29240+#endif
29241+
29242 if (pgd_none(*pgd)) {
29243 set_pgd(pgd, *pgd_ref);
29244 arch_flush_lazy_mmu_mode();
29245@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
29246 static int is_errata100(struct pt_regs *regs, unsigned long address)
29247 {
29248 #ifdef CONFIG_X86_64
29249- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
29250+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
29251 return 1;
29252 #endif
29253 return 0;
29254@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
29255 }
29256
29257 static const char nx_warning[] = KERN_CRIT
29258-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
29259+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
29260
29261 static void
29262 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
29263@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
29264 if (!oops_may_print())
29265 return;
29266
29267- if (error_code & PF_INSTR) {
29268+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
29269 unsigned int level;
29270
29271 pte_t *pte = lookup_address(address, &level);
29272
29273 if (pte && pte_present(*pte) && !pte_exec(*pte))
29274- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
29275+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
29276 }
29277
29278+#ifdef CONFIG_PAX_KERNEXEC
29279+ if (init_mm.start_code <= address && address < init_mm.end_code) {
29280+ if (current->signal->curr_ip)
29281+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
29282+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
29283+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
29284+ else
29285+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
29286+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
29287+ }
29288+#endif
29289+
29290 printk(KERN_ALERT "BUG: unable to handle kernel ");
29291 if (address < PAGE_SIZE)
29292 printk(KERN_CONT "NULL pointer dereference");
29293@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
29294 return;
29295 }
29296 #endif
29297+
29298+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29299+ if (pax_is_fetch_fault(regs, error_code, address)) {
29300+
29301+#ifdef CONFIG_PAX_EMUTRAMP
29302+ switch (pax_handle_fetch_fault(regs)) {
29303+ case 2:
29304+ return;
29305+ }
29306+#endif
29307+
29308+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
29309+ do_group_exit(SIGKILL);
29310+ }
29311+#endif
29312+
29313 /* Kernel addresses are always protection faults: */
29314 if (address >= TASK_SIZE)
29315 error_code |= PF_PROT;
29316@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
29317 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
29318 printk(KERN_ERR
29319 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
29320- tsk->comm, tsk->pid, address);
29321+ tsk->comm, task_pid_nr(tsk), address);
29322 code = BUS_MCEERR_AR;
29323 }
29324 #endif
29325@@ -898,6 +1010,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
29326 return 1;
29327 }
29328
29329+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
29330+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
29331+{
29332+ pte_t *pte;
29333+ pmd_t *pmd;
29334+ spinlock_t *ptl;
29335+ unsigned char pte_mask;
29336+
29337+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
29338+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
29339+ return 0;
29340+
29341+ /* PaX: it's our fault, let's handle it if we can */
29342+
29343+ /* PaX: take a look at read faults before acquiring any locks */
29344+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
29345+ /* instruction fetch attempt from a protected page in user mode */
29346+ up_read(&mm->mmap_sem);
29347+
29348+#ifdef CONFIG_PAX_EMUTRAMP
29349+ switch (pax_handle_fetch_fault(regs)) {
29350+ case 2:
29351+ return 1;
29352+ }
29353+#endif
29354+
29355+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
29356+ do_group_exit(SIGKILL);
29357+ }
29358+
29359+ pmd = pax_get_pmd(mm, address);
29360+ if (unlikely(!pmd))
29361+ return 0;
29362+
29363+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
29364+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
29365+ pte_unmap_unlock(pte, ptl);
29366+ return 0;
29367+ }
29368+
29369+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
29370+ /* write attempt to a protected page in user mode */
29371+ pte_unmap_unlock(pte, ptl);
29372+ return 0;
29373+ }
29374+
29375+#ifdef CONFIG_SMP
29376+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
29377+#else
29378+ if (likely(address > get_limit(regs->cs)))
29379+#endif
29380+ {
29381+ set_pte(pte, pte_mkread(*pte));
29382+ __flush_tlb_one(address);
29383+ pte_unmap_unlock(pte, ptl);
29384+ up_read(&mm->mmap_sem);
29385+ return 1;
29386+ }
29387+
29388+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
29389+
29390+ /*
29391+ * PaX: fill DTLB with user rights and retry
29392+ */
29393+ __asm__ __volatile__ (
29394+ "orb %2,(%1)\n"
29395+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
29396+/*
29397+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
29398+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
29399+ * page fault when examined during a TLB load attempt. this is true not only
29400+ * for PTEs holding a non-present entry but also present entries that will
29401+ * raise a page fault (such as those set up by PaX, or the copy-on-write
29402+ * mechanism). in effect it means that we do *not* need to flush the TLBs
29403+ * for our target pages since their PTEs are simply not in the TLBs at all.
29404+
29405+ * the best thing in omitting it is that we gain around 15-20% speed in the
29406+ * fast path of the page fault handler and can get rid of tracing since we
29407+ * can no longer flush unintended entries.
29408+ */
29409+ "invlpg (%0)\n"
29410+#endif
29411+ __copyuser_seg"testb $0,(%0)\n"
29412+ "xorb %3,(%1)\n"
29413+ :
29414+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
29415+ : "memory", "cc");
29416+ pte_unmap_unlock(pte, ptl);
29417+ up_read(&mm->mmap_sem);
29418+ return 1;
29419+}
29420+#endif
29421+
29422 /*
29423 * Handle a spurious fault caused by a stale TLB entry.
29424 *
29425@@ -964,6 +1169,9 @@ int show_unhandled_signals = 1;
29426 static inline int
29427 access_error(unsigned long error_code, struct vm_area_struct *vma)
29428 {
29429+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
29430+ return 1;
29431+
29432 if (error_code & PF_WRITE) {
29433 /* write, present and write, not present: */
29434 if (unlikely(!(vma->vm_flags & VM_WRITE)))
29435@@ -992,7 +1200,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
29436 if (error_code & PF_USER)
29437 return false;
29438
29439- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
29440+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
29441 return false;
29442
29443 return true;
29444@@ -1008,18 +1216,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
29445 {
29446 struct vm_area_struct *vma;
29447 struct task_struct *tsk;
29448- unsigned long address;
29449 struct mm_struct *mm;
29450 int fault;
29451 int write = error_code & PF_WRITE;
29452 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
29453 (write ? FAULT_FLAG_WRITE : 0);
29454
29455- tsk = current;
29456- mm = tsk->mm;
29457-
29458 /* Get the faulting address: */
29459- address = read_cr2();
29460+ unsigned long address = read_cr2();
29461+
29462+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29463+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
29464+ if (!search_exception_tables(regs->ip)) {
29465+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
29466+ bad_area_nosemaphore(regs, error_code, address);
29467+ return;
29468+ }
29469+ if (address < pax_user_shadow_base) {
29470+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
29471+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
29472+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
29473+ } else
29474+ address -= pax_user_shadow_base;
29475+ }
29476+#endif
29477+
29478+ tsk = current;
29479+ mm = tsk->mm;
29480
29481 /*
29482 * Detect and handle instructions that would cause a page fault for
29483@@ -1080,7 +1303,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
29484 * User-mode registers count as a user access even for any
29485 * potential system fault or CPU buglet:
29486 */
29487- if (user_mode_vm(regs)) {
29488+ if (user_mode(regs)) {
29489 local_irq_enable();
29490 error_code |= PF_USER;
29491 } else {
29492@@ -1142,6 +1365,11 @@ retry:
29493 might_sleep();
29494 }
29495
29496+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
29497+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
29498+ return;
29499+#endif
29500+
29501 vma = find_vma(mm, address);
29502 if (unlikely(!vma)) {
29503 bad_area(regs, error_code, address);
29504@@ -1153,18 +1381,24 @@ retry:
29505 bad_area(regs, error_code, address);
29506 return;
29507 }
29508- if (error_code & PF_USER) {
29509- /*
29510- * Accessing the stack below %sp is always a bug.
29511- * The large cushion allows instructions like enter
29512- * and pusha to work. ("enter $65535, $31" pushes
29513- * 32 pointers and then decrements %sp by 65535.)
29514- */
29515- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
29516- bad_area(regs, error_code, address);
29517- return;
29518- }
29519+ /*
29520+ * Accessing the stack below %sp is always a bug.
29521+ * The large cushion allows instructions like enter
29522+ * and pusha to work. ("enter $65535, $31" pushes
29523+ * 32 pointers and then decrements %sp by 65535.)
29524+ */
29525+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
29526+ bad_area(regs, error_code, address);
29527+ return;
29528 }
29529+
29530+#ifdef CONFIG_PAX_SEGMEXEC
29531+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
29532+ bad_area(regs, error_code, address);
29533+ return;
29534+ }
29535+#endif
29536+
29537 if (unlikely(expand_stack(vma, address))) {
29538 bad_area(regs, error_code, address);
29539 return;
29540@@ -1230,3 +1464,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
29541 __do_page_fault(regs, error_code);
29542 exception_exit(prev_state);
29543 }
29544+
29545+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29546+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
29547+{
29548+ struct mm_struct *mm = current->mm;
29549+ unsigned long ip = regs->ip;
29550+
29551+ if (v8086_mode(regs))
29552+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
29553+
29554+#ifdef CONFIG_PAX_PAGEEXEC
29555+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
29556+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
29557+ return true;
29558+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
29559+ return true;
29560+ return false;
29561+ }
29562+#endif
29563+
29564+#ifdef CONFIG_PAX_SEGMEXEC
29565+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
29566+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
29567+ return true;
29568+ return false;
29569+ }
29570+#endif
29571+
29572+ return false;
29573+}
29574+#endif
29575+
29576+#ifdef CONFIG_PAX_EMUTRAMP
29577+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
29578+{
29579+ int err;
29580+
29581+ do { /* PaX: libffi trampoline emulation */
29582+ unsigned char mov, jmp;
29583+ unsigned int addr1, addr2;
29584+
29585+#ifdef CONFIG_X86_64
29586+ if ((regs->ip + 9) >> 32)
29587+ break;
29588+#endif
29589+
29590+ err = get_user(mov, (unsigned char __user *)regs->ip);
29591+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
29592+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
29593+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
29594+
29595+ if (err)
29596+ break;
29597+
29598+ if (mov == 0xB8 && jmp == 0xE9) {
29599+ regs->ax = addr1;
29600+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
29601+ return 2;
29602+ }
29603+ } while (0);
29604+
29605+ do { /* PaX: gcc trampoline emulation #1 */
29606+ unsigned char mov1, mov2;
29607+ unsigned short jmp;
29608+ unsigned int addr1, addr2;
29609+
29610+#ifdef CONFIG_X86_64
29611+ if ((regs->ip + 11) >> 32)
29612+ break;
29613+#endif
29614+
29615+ err = get_user(mov1, (unsigned char __user *)regs->ip);
29616+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
29617+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
29618+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
29619+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
29620+
29621+ if (err)
29622+ break;
29623+
29624+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
29625+ regs->cx = addr1;
29626+ regs->ax = addr2;
29627+ regs->ip = addr2;
29628+ return 2;
29629+ }
29630+ } while (0);
29631+
29632+ do { /* PaX: gcc trampoline emulation #2 */
29633+ unsigned char mov, jmp;
29634+ unsigned int addr1, addr2;
29635+
29636+#ifdef CONFIG_X86_64
29637+ if ((regs->ip + 9) >> 32)
29638+ break;
29639+#endif
29640+
29641+ err = get_user(mov, (unsigned char __user *)regs->ip);
29642+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
29643+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
29644+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
29645+
29646+ if (err)
29647+ break;
29648+
29649+ if (mov == 0xB9 && jmp == 0xE9) {
29650+ regs->cx = addr1;
29651+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
29652+ return 2;
29653+ }
29654+ } while (0);
29655+
29656+ return 1; /* PaX in action */
29657+}
29658+
29659+#ifdef CONFIG_X86_64
29660+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
29661+{
29662+ int err;
29663+
29664+ do { /* PaX: libffi trampoline emulation */
29665+ unsigned short mov1, mov2, jmp1;
29666+ unsigned char stcclc, jmp2;
29667+ unsigned long addr1, addr2;
29668+
29669+ err = get_user(mov1, (unsigned short __user *)regs->ip);
29670+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
29671+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
29672+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
29673+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
29674+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
29675+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
29676+
29677+ if (err)
29678+ break;
29679+
29680+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
29681+ regs->r11 = addr1;
29682+ regs->r10 = addr2;
29683+ if (stcclc == 0xF8)
29684+ regs->flags &= ~X86_EFLAGS_CF;
29685+ else
29686+ regs->flags |= X86_EFLAGS_CF;
29687+ regs->ip = addr1;
29688+ return 2;
29689+ }
29690+ } while (0);
29691+
29692+ do { /* PaX: gcc trampoline emulation #1 */
29693+ unsigned short mov1, mov2, jmp1;
29694+ unsigned char jmp2;
29695+ unsigned int addr1;
29696+ unsigned long addr2;
29697+
29698+ err = get_user(mov1, (unsigned short __user *)regs->ip);
29699+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
29700+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
29701+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
29702+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
29703+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
29704+
29705+ if (err)
29706+ break;
29707+
29708+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
29709+ regs->r11 = addr1;
29710+ regs->r10 = addr2;
29711+ regs->ip = addr1;
29712+ return 2;
29713+ }
29714+ } while (0);
29715+
29716+ do { /* PaX: gcc trampoline emulation #2 */
29717+ unsigned short mov1, mov2, jmp1;
29718+ unsigned char jmp2;
29719+ unsigned long addr1, addr2;
29720+
29721+ err = get_user(mov1, (unsigned short __user *)regs->ip);
29722+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
29723+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
29724+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
29725+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
29726+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
29727+
29728+ if (err)
29729+ break;
29730+
29731+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
29732+ regs->r11 = addr1;
29733+ regs->r10 = addr2;
29734+ regs->ip = addr1;
29735+ return 2;
29736+ }
29737+ } while (0);
29738+
29739+ return 1; /* PaX in action */
29740+}
29741+#endif
29742+
29743+/*
29744+ * PaX: decide what to do with offenders (regs->ip = fault address)
29745+ *
29746+ * returns 1 when task should be killed
29747+ * 2 when gcc trampoline was detected
29748+ */
29749+static int pax_handle_fetch_fault(struct pt_regs *regs)
29750+{
29751+ if (v8086_mode(regs))
29752+ return 1;
29753+
29754+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
29755+ return 1;
29756+
29757+#ifdef CONFIG_X86_32
29758+ return pax_handle_fetch_fault_32(regs);
29759+#else
29760+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
29761+ return pax_handle_fetch_fault_32(regs);
29762+ else
29763+ return pax_handle_fetch_fault_64(regs);
29764+#endif
29765+}
29766+#endif
29767+
29768+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29769+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
29770+{
29771+ long i;
29772+
29773+ printk(KERN_ERR "PAX: bytes at PC: ");
29774+ for (i = 0; i < 20; i++) {
29775+ unsigned char c;
29776+ if (get_user(c, (unsigned char __force_user *)pc+i))
29777+ printk(KERN_CONT "?? ");
29778+ else
29779+ printk(KERN_CONT "%02x ", c);
29780+ }
29781+ printk("\n");
29782+
29783+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
29784+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
29785+ unsigned long c;
29786+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
29787+#ifdef CONFIG_X86_32
29788+ printk(KERN_CONT "???????? ");
29789+#else
29790+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
29791+ printk(KERN_CONT "???????? ???????? ");
29792+ else
29793+ printk(KERN_CONT "???????????????? ");
29794+#endif
29795+ } else {
29796+#ifdef CONFIG_X86_64
29797+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
29798+ printk(KERN_CONT "%08x ", (unsigned int)c);
29799+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
29800+ } else
29801+#endif
29802+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
29803+ }
29804+ }
29805+ printk("\n");
29806+}
29807+#endif
29808+
29809+/**
29810+ * probe_kernel_write(): safely attempt to write to a location
29811+ * @dst: address to write to
29812+ * @src: pointer to the data that shall be written
29813+ * @size: size of the data chunk
29814+ *
29815+ * Safely write to address @dst from the buffer at @src. If a kernel fault
29816+ * happens, handle that and return -EFAULT.
29817+ */
29818+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
29819+{
29820+ long ret;
29821+ mm_segment_t old_fs = get_fs();
29822+
29823+ set_fs(KERNEL_DS);
29824+ pagefault_disable();
29825+ pax_open_kernel();
29826+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
29827+ pax_close_kernel();
29828+ pagefault_enable();
29829+ set_fs(old_fs);
29830+
29831+ return ret ? -EFAULT : 0;
29832+}
29833diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
29834index dd74e46..7d26398 100644
29835--- a/arch/x86/mm/gup.c
29836+++ b/arch/x86/mm/gup.c
29837@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
29838 addr = start;
29839 len = (unsigned long) nr_pages << PAGE_SHIFT;
29840 end = start + len;
29841- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
29842+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
29843 (void __user *)start, len)))
29844 return 0;
29845
29846diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
29847index 252b8f5..4dcfdc1 100644
29848--- a/arch/x86/mm/highmem_32.c
29849+++ b/arch/x86/mm/highmem_32.c
29850@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
29851 idx = type + KM_TYPE_NR*smp_processor_id();
29852 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
29853 BUG_ON(!pte_none(*(kmap_pte-idx)));
29854+
29855+ pax_open_kernel();
29856 set_pte(kmap_pte-idx, mk_pte(page, prot));
29857+ pax_close_kernel();
29858+
29859 arch_flush_lazy_mmu_mode();
29860
29861 return (void *)vaddr;
29862diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
29863index ae1aa71..d9bea75 100644
29864--- a/arch/x86/mm/hugetlbpage.c
29865+++ b/arch/x86/mm/hugetlbpage.c
29866@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
29867 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
29868 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
29869 unsigned long addr, unsigned long len,
29870- unsigned long pgoff, unsigned long flags)
29871+ unsigned long pgoff, unsigned long flags, unsigned long offset)
29872 {
29873 struct hstate *h = hstate_file(file);
29874 struct vm_unmapped_area_info info;
29875-
29876+
29877 info.flags = 0;
29878 info.length = len;
29879 info.low_limit = TASK_UNMAPPED_BASE;
29880+
29881+#ifdef CONFIG_PAX_RANDMMAP
29882+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
29883+ info.low_limit += current->mm->delta_mmap;
29884+#endif
29885+
29886 info.high_limit = TASK_SIZE;
29887 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
29888 info.align_offset = 0;
29889+ info.threadstack_offset = offset;
29890 return vm_unmapped_area(&info);
29891 }
29892
29893 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
29894 unsigned long addr0, unsigned long len,
29895- unsigned long pgoff, unsigned long flags)
29896+ unsigned long pgoff, unsigned long flags, unsigned long offset)
29897 {
29898 struct hstate *h = hstate_file(file);
29899 struct vm_unmapped_area_info info;
29900@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
29901 info.high_limit = current->mm->mmap_base;
29902 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
29903 info.align_offset = 0;
29904+ info.threadstack_offset = offset;
29905 addr = vm_unmapped_area(&info);
29906
29907 /*
29908@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
29909 VM_BUG_ON(addr != -ENOMEM);
29910 info.flags = 0;
29911 info.low_limit = TASK_UNMAPPED_BASE;
29912+
29913+#ifdef CONFIG_PAX_RANDMMAP
29914+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
29915+ info.low_limit += current->mm->delta_mmap;
29916+#endif
29917+
29918 info.high_limit = TASK_SIZE;
29919 addr = vm_unmapped_area(&info);
29920 }
29921@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
29922 struct hstate *h = hstate_file(file);
29923 struct mm_struct *mm = current->mm;
29924 struct vm_area_struct *vma;
29925+ unsigned long pax_task_size = TASK_SIZE;
29926+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
29927
29928 if (len & ~huge_page_mask(h))
29929 return -EINVAL;
29930- if (len > TASK_SIZE)
29931+
29932+#ifdef CONFIG_PAX_SEGMEXEC
29933+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29934+ pax_task_size = SEGMEXEC_TASK_SIZE;
29935+#endif
29936+
29937+ pax_task_size -= PAGE_SIZE;
29938+
29939+ if (len > pax_task_size)
29940 return -ENOMEM;
29941
29942 if (flags & MAP_FIXED) {
29943@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
29944 return addr;
29945 }
29946
29947+#ifdef CONFIG_PAX_RANDMMAP
29948+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
29949+#endif
29950+
29951 if (addr) {
29952 addr = ALIGN(addr, huge_page_size(h));
29953 vma = find_vma(mm, addr);
29954- if (TASK_SIZE - len >= addr &&
29955- (!vma || addr + len <= vma->vm_start))
29956+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
29957 return addr;
29958 }
29959 if (mm->get_unmapped_area == arch_get_unmapped_area)
29960 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
29961- pgoff, flags);
29962+ pgoff, flags, offset);
29963 else
29964 return hugetlb_get_unmapped_area_topdown(file, addr, len,
29965- pgoff, flags);
29966+ pgoff, flags, offset);
29967 }
29968
29969 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
29970diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
29971index 1f34e92..c97b98f 100644
29972--- a/arch/x86/mm/init.c
29973+++ b/arch/x86/mm/init.c
29974@@ -4,6 +4,7 @@
29975 #include <linux/swap.h>
29976 #include <linux/memblock.h>
29977 #include <linux/bootmem.h> /* for max_low_pfn */
29978+#include <linux/tboot.h>
29979
29980 #include <asm/cacheflush.h>
29981 #include <asm/e820.h>
29982@@ -17,6 +18,8 @@
29983 #include <asm/proto.h>
29984 #include <asm/dma.h> /* for MAX_DMA_PFN */
29985 #include <asm/microcode.h>
29986+#include <asm/desc.h>
29987+#include <asm/bios_ebda.h>
29988
29989 #include "mm_internal.h"
29990
29991@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
29992 early_ioremap_page_table_range_init();
29993 #endif
29994
29995+#ifdef CONFIG_PAX_PER_CPU_PGD
29996+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
29997+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
29998+ KERNEL_PGD_PTRS);
29999+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
30000+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30001+ KERNEL_PGD_PTRS);
30002+ load_cr3(get_cpu_pgd(0, kernel));
30003+#else
30004 load_cr3(swapper_pg_dir);
30005+#endif
30006+
30007 __flush_tlb_all();
30008
30009 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
30010@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
30011 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
30012 * mmio resources as well as potential bios/acpi data regions.
30013 */
30014+
30015+#ifdef CONFIG_GRKERNSEC_KMEM
30016+static unsigned int ebda_start __read_only;
30017+static unsigned int ebda_end __read_only;
30018+#endif
30019+
30020 int devmem_is_allowed(unsigned long pagenr)
30021 {
30022- if (pagenr < 256)
30023+#ifdef CONFIG_GRKERNSEC_KMEM
30024+ /* allow BDA */
30025+ if (!pagenr)
30026 return 1;
30027+ /* allow EBDA */
30028+ if (pagenr >= ebda_start && pagenr < ebda_end)
30029+ return 1;
30030+ /* if tboot is in use, allow access to its hardcoded serial log range */
30031+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
30032+ return 1;
30033+#else
30034+ if (!pagenr)
30035+ return 1;
30036+#ifdef CONFIG_VM86
30037+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
30038+ return 1;
30039+#endif
30040+#endif
30041+
30042+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
30043+ return 1;
30044+#ifdef CONFIG_GRKERNSEC_KMEM
30045+ /* throw out everything else below 1MB */
30046+ if (pagenr <= 256)
30047+ return 0;
30048+#endif
30049 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
30050 return 0;
30051 if (!page_is_ram(pagenr))
30052@@ -538,8 +582,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
30053 #endif
30054 }
30055
30056+#ifdef CONFIG_GRKERNSEC_KMEM
30057+static inline void gr_init_ebda(void)
30058+{
30059+ unsigned int ebda_addr;
30060+ unsigned int ebda_size = 0;
30061+
30062+ ebda_addr = get_bios_ebda();
30063+ if (ebda_addr) {
30064+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
30065+ ebda_size <<= 10;
30066+ }
30067+ if (ebda_addr && ebda_size) {
30068+ ebda_start = ebda_addr >> PAGE_SHIFT;
30069+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
30070+ } else {
30071+ ebda_start = 0x9f000 >> PAGE_SHIFT;
30072+ ebda_end = 0xa0000 >> PAGE_SHIFT;
30073+ }
30074+}
30075+#else
30076+static inline void gr_init_ebda(void) { }
30077+#endif
30078+
30079 void free_initmem(void)
30080 {
30081+#ifdef CONFIG_PAX_KERNEXEC
30082+#ifdef CONFIG_X86_32
30083+ /* PaX: limit KERNEL_CS to actual size */
30084+ unsigned long addr, limit;
30085+ struct desc_struct d;
30086+ int cpu;
30087+#else
30088+ pgd_t *pgd;
30089+ pud_t *pud;
30090+ pmd_t *pmd;
30091+ unsigned long addr, end;
30092+#endif
30093+#endif
30094+
30095+ gr_init_ebda();
30096+
30097+#ifdef CONFIG_PAX_KERNEXEC
30098+#ifdef CONFIG_X86_32
30099+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
30100+ limit = (limit - 1UL) >> PAGE_SHIFT;
30101+
30102+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
30103+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30104+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
30105+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
30106+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
30107+ }
30108+
30109+ /* PaX: make KERNEL_CS read-only */
30110+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
30111+ if (!paravirt_enabled())
30112+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
30113+/*
30114+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
30115+ pgd = pgd_offset_k(addr);
30116+ pud = pud_offset(pgd, addr);
30117+ pmd = pmd_offset(pud, addr);
30118+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30119+ }
30120+*/
30121+#ifdef CONFIG_X86_PAE
30122+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
30123+/*
30124+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
30125+ pgd = pgd_offset_k(addr);
30126+ pud = pud_offset(pgd, addr);
30127+ pmd = pmd_offset(pud, addr);
30128+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30129+ }
30130+*/
30131+#endif
30132+
30133+#ifdef CONFIG_MODULES
30134+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
30135+#endif
30136+
30137+#else
30138+ /* PaX: make kernel code/rodata read-only, rest non-executable */
30139+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
30140+ pgd = pgd_offset_k(addr);
30141+ pud = pud_offset(pgd, addr);
30142+ pmd = pmd_offset(pud, addr);
30143+ if (!pmd_present(*pmd))
30144+ continue;
30145+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
30146+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30147+ else
30148+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30149+ }
30150+
30151+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
30152+ end = addr + KERNEL_IMAGE_SIZE;
30153+ for (; addr < end; addr += PMD_SIZE) {
30154+ pgd = pgd_offset_k(addr);
30155+ pud = pud_offset(pgd, addr);
30156+ pmd = pmd_offset(pud, addr);
30157+ if (!pmd_present(*pmd))
30158+ continue;
30159+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
30160+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30161+ }
30162+#endif
30163+
30164+ flush_tlb_all();
30165+#endif
30166+
30167 free_init_pages("unused kernel memory",
30168 (unsigned long)(&__init_begin),
30169 (unsigned long)(&__init_end));
30170diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
30171index 3ac7e31..89611b7 100644
30172--- a/arch/x86/mm/init_32.c
30173+++ b/arch/x86/mm/init_32.c
30174@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
30175 bool __read_mostly __vmalloc_start_set = false;
30176
30177 /*
30178- * Creates a middle page table and puts a pointer to it in the
30179- * given global directory entry. This only returns the gd entry
30180- * in non-PAE compilation mode, since the middle layer is folded.
30181- */
30182-static pmd_t * __init one_md_table_init(pgd_t *pgd)
30183-{
30184- pud_t *pud;
30185- pmd_t *pmd_table;
30186-
30187-#ifdef CONFIG_X86_PAE
30188- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
30189- pmd_table = (pmd_t *)alloc_low_page();
30190- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
30191- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
30192- pud = pud_offset(pgd, 0);
30193- BUG_ON(pmd_table != pmd_offset(pud, 0));
30194-
30195- return pmd_table;
30196- }
30197-#endif
30198- pud = pud_offset(pgd, 0);
30199- pmd_table = pmd_offset(pud, 0);
30200-
30201- return pmd_table;
30202-}
30203-
30204-/*
30205 * Create a page table and place a pointer to it in a middle page
30206 * directory entry:
30207 */
30208@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
30209 pte_t *page_table = (pte_t *)alloc_low_page();
30210
30211 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
30212+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30213+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
30214+#else
30215 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
30216+#endif
30217 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
30218 }
30219
30220 return pte_offset_kernel(pmd, 0);
30221 }
30222
30223+static pmd_t * __init one_md_table_init(pgd_t *pgd)
30224+{
30225+ pud_t *pud;
30226+ pmd_t *pmd_table;
30227+
30228+ pud = pud_offset(pgd, 0);
30229+ pmd_table = pmd_offset(pud, 0);
30230+
30231+ return pmd_table;
30232+}
30233+
30234 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
30235 {
30236 int pgd_idx = pgd_index(vaddr);
30237@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
30238 int pgd_idx, pmd_idx;
30239 unsigned long vaddr;
30240 pgd_t *pgd;
30241+ pud_t *pud;
30242 pmd_t *pmd;
30243 pte_t *pte = NULL;
30244 unsigned long count = page_table_range_init_count(start, end);
30245@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
30246 pgd = pgd_base + pgd_idx;
30247
30248 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
30249- pmd = one_md_table_init(pgd);
30250- pmd = pmd + pmd_index(vaddr);
30251+ pud = pud_offset(pgd, vaddr);
30252+ pmd = pmd_offset(pud, vaddr);
30253+
30254+#ifdef CONFIG_X86_PAE
30255+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
30256+#endif
30257+
30258 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
30259 pmd++, pmd_idx++) {
30260 pte = page_table_kmap_check(one_page_table_init(pmd),
30261@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
30262 }
30263 }
30264
30265-static inline int is_kernel_text(unsigned long addr)
30266+static inline int is_kernel_text(unsigned long start, unsigned long end)
30267 {
30268- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
30269- return 1;
30270- return 0;
30271+ if ((start > ktla_ktva((unsigned long)_etext) ||
30272+ end <= ktla_ktva((unsigned long)_stext)) &&
30273+ (start > ktla_ktva((unsigned long)_einittext) ||
30274+ end <= ktla_ktva((unsigned long)_sinittext)) &&
30275+
30276+#ifdef CONFIG_ACPI_SLEEP
30277+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
30278+#endif
30279+
30280+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
30281+ return 0;
30282+ return 1;
30283 }
30284
30285 /*
30286@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
30287 unsigned long last_map_addr = end;
30288 unsigned long start_pfn, end_pfn;
30289 pgd_t *pgd_base = swapper_pg_dir;
30290- int pgd_idx, pmd_idx, pte_ofs;
30291+ unsigned int pgd_idx, pmd_idx, pte_ofs;
30292 unsigned long pfn;
30293 pgd_t *pgd;
30294+ pud_t *pud;
30295 pmd_t *pmd;
30296 pte_t *pte;
30297 unsigned pages_2m, pages_4k;
30298@@ -291,8 +295,13 @@ repeat:
30299 pfn = start_pfn;
30300 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
30301 pgd = pgd_base + pgd_idx;
30302- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
30303- pmd = one_md_table_init(pgd);
30304+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
30305+ pud = pud_offset(pgd, 0);
30306+ pmd = pmd_offset(pud, 0);
30307+
30308+#ifdef CONFIG_X86_PAE
30309+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
30310+#endif
30311
30312 if (pfn >= end_pfn)
30313 continue;
30314@@ -304,14 +313,13 @@ repeat:
30315 #endif
30316 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
30317 pmd++, pmd_idx++) {
30318- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
30319+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
30320
30321 /*
30322 * Map with big pages if possible, otherwise
30323 * create normal page tables:
30324 */
30325 if (use_pse) {
30326- unsigned int addr2;
30327 pgprot_t prot = PAGE_KERNEL_LARGE;
30328 /*
30329 * first pass will use the same initial
30330@@ -322,11 +330,7 @@ repeat:
30331 _PAGE_PSE);
30332
30333 pfn &= PMD_MASK >> PAGE_SHIFT;
30334- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
30335- PAGE_OFFSET + PAGE_SIZE-1;
30336-
30337- if (is_kernel_text(addr) ||
30338- is_kernel_text(addr2))
30339+ if (is_kernel_text(address, address + PMD_SIZE))
30340 prot = PAGE_KERNEL_LARGE_EXEC;
30341
30342 pages_2m++;
30343@@ -343,7 +347,7 @@ repeat:
30344 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
30345 pte += pte_ofs;
30346 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
30347- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
30348+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
30349 pgprot_t prot = PAGE_KERNEL;
30350 /*
30351 * first pass will use the same initial
30352@@ -351,7 +355,7 @@ repeat:
30353 */
30354 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
30355
30356- if (is_kernel_text(addr))
30357+ if (is_kernel_text(address, address + PAGE_SIZE))
30358 prot = PAGE_KERNEL_EXEC;
30359
30360 pages_4k++;
30361@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
30362
30363 pud = pud_offset(pgd, va);
30364 pmd = pmd_offset(pud, va);
30365- if (!pmd_present(*pmd))
30366+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
30367 break;
30368
30369 /* should not be large page here */
30370@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
30371
30372 static void __init pagetable_init(void)
30373 {
30374- pgd_t *pgd_base = swapper_pg_dir;
30375-
30376- permanent_kmaps_init(pgd_base);
30377+ permanent_kmaps_init(swapper_pg_dir);
30378 }
30379
30380-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
30381+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
30382 EXPORT_SYMBOL_GPL(__supported_pte_mask);
30383
30384 /* user-defined highmem size */
30385@@ -772,7 +774,7 @@ void __init mem_init(void)
30386 after_bootmem = 1;
30387
30388 codesize = (unsigned long) &_etext - (unsigned long) &_text;
30389- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
30390+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
30391 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
30392
30393 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
30394@@ -813,10 +815,10 @@ void __init mem_init(void)
30395 ((unsigned long)&__init_end -
30396 (unsigned long)&__init_begin) >> 10,
30397
30398- (unsigned long)&_etext, (unsigned long)&_edata,
30399- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
30400+ (unsigned long)&_sdata, (unsigned long)&_edata,
30401+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
30402
30403- (unsigned long)&_text, (unsigned long)&_etext,
30404+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
30405 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
30406
30407 /*
30408@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
30409 if (!kernel_set_to_readonly)
30410 return;
30411
30412+ start = ktla_ktva(start);
30413 pr_debug("Set kernel text: %lx - %lx for read write\n",
30414 start, start+size);
30415
30416@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
30417 if (!kernel_set_to_readonly)
30418 return;
30419
30420+ start = ktla_ktva(start);
30421 pr_debug("Set kernel text: %lx - %lx for read only\n",
30422 start, start+size);
30423
30424@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
30425 unsigned long start = PFN_ALIGN(_text);
30426 unsigned long size = PFN_ALIGN(_etext) - start;
30427
30428+ start = ktla_ktva(start);
30429 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
30430 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
30431 size >> 10);
30432diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
30433index bb00c46..bf91a67 100644
30434--- a/arch/x86/mm/init_64.c
30435+++ b/arch/x86/mm/init_64.c
30436@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
30437 * around without checking the pgd every time.
30438 */
30439
30440-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
30441+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
30442 EXPORT_SYMBOL_GPL(__supported_pte_mask);
30443
30444 int force_personality32;
30445@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
30446
30447 for (address = start; address <= end; address += PGDIR_SIZE) {
30448 const pgd_t *pgd_ref = pgd_offset_k(address);
30449+
30450+#ifdef CONFIG_PAX_PER_CPU_PGD
30451+ unsigned long cpu;
30452+#else
30453 struct page *page;
30454+#endif
30455
30456 if (pgd_none(*pgd_ref))
30457 continue;
30458
30459 spin_lock(&pgd_lock);
30460+
30461+#ifdef CONFIG_PAX_PER_CPU_PGD
30462+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30463+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
30464+
30465+ if (pgd_none(*pgd))
30466+ set_pgd(pgd, *pgd_ref);
30467+ else
30468+ BUG_ON(pgd_page_vaddr(*pgd)
30469+ != pgd_page_vaddr(*pgd_ref));
30470+ pgd = pgd_offset_cpu(cpu, kernel, address);
30471+#else
30472 list_for_each_entry(page, &pgd_list, lru) {
30473 pgd_t *pgd;
30474 spinlock_t *pgt_lock;
30475@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
30476 /* the pgt_lock only for Xen */
30477 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
30478 spin_lock(pgt_lock);
30479+#endif
30480
30481 if (pgd_none(*pgd))
30482 set_pgd(pgd, *pgd_ref);
30483@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
30484 BUG_ON(pgd_page_vaddr(*pgd)
30485 != pgd_page_vaddr(*pgd_ref));
30486
30487+#ifndef CONFIG_PAX_PER_CPU_PGD
30488 spin_unlock(pgt_lock);
30489+#endif
30490+
30491 }
30492 spin_unlock(&pgd_lock);
30493 }
30494@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
30495 {
30496 if (pgd_none(*pgd)) {
30497 pud_t *pud = (pud_t *)spp_getpage();
30498- pgd_populate(&init_mm, pgd, pud);
30499+ pgd_populate_kernel(&init_mm, pgd, pud);
30500 if (pud != pud_offset(pgd, 0))
30501 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
30502 pud, pud_offset(pgd, 0));
30503@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
30504 {
30505 if (pud_none(*pud)) {
30506 pmd_t *pmd = (pmd_t *) spp_getpage();
30507- pud_populate(&init_mm, pud, pmd);
30508+ pud_populate_kernel(&init_mm, pud, pmd);
30509 if (pmd != pmd_offset(pud, 0))
30510 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
30511 pmd, pmd_offset(pud, 0));
30512@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
30513 pmd = fill_pmd(pud, vaddr);
30514 pte = fill_pte(pmd, vaddr);
30515
30516+ pax_open_kernel();
30517 set_pte(pte, new_pte);
30518+ pax_close_kernel();
30519
30520 /*
30521 * It's enough to flush this one mapping.
30522@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
30523 pgd = pgd_offset_k((unsigned long)__va(phys));
30524 if (pgd_none(*pgd)) {
30525 pud = (pud_t *) spp_getpage();
30526- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
30527- _PAGE_USER));
30528+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
30529 }
30530 pud = pud_offset(pgd, (unsigned long)__va(phys));
30531 if (pud_none(*pud)) {
30532 pmd = (pmd_t *) spp_getpage();
30533- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
30534- _PAGE_USER));
30535+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
30536 }
30537 pmd = pmd_offset(pud, phys);
30538 BUG_ON(!pmd_none(*pmd));
30539@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
30540 prot);
30541
30542 spin_lock(&init_mm.page_table_lock);
30543- pud_populate(&init_mm, pud, pmd);
30544+ pud_populate_kernel(&init_mm, pud, pmd);
30545 spin_unlock(&init_mm.page_table_lock);
30546 }
30547 __flush_tlb_all();
30548@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
30549 page_size_mask);
30550
30551 spin_lock(&init_mm.page_table_lock);
30552- pgd_populate(&init_mm, pgd, pud);
30553+ pgd_populate_kernel(&init_mm, pgd, pud);
30554 spin_unlock(&init_mm.page_table_lock);
30555 pgd_changed = true;
30556 }
30557@@ -1221,8 +1242,8 @@ int kern_addr_valid(unsigned long addr)
30558 static struct vm_area_struct gate_vma = {
30559 .vm_start = VSYSCALL_START,
30560 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
30561- .vm_page_prot = PAGE_READONLY_EXEC,
30562- .vm_flags = VM_READ | VM_EXEC
30563+ .vm_page_prot = PAGE_READONLY,
30564+ .vm_flags = VM_READ
30565 };
30566
30567 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
30568@@ -1256,7 +1277,7 @@ int in_gate_area_no_mm(unsigned long addr)
30569
30570 const char *arch_vma_name(struct vm_area_struct *vma)
30571 {
30572- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
30573+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
30574 return "[vdso]";
30575 if (vma == &gate_vma)
30576 return "[vsyscall]";
30577diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
30578index 7b179b4..6bd17777 100644
30579--- a/arch/x86/mm/iomap_32.c
30580+++ b/arch/x86/mm/iomap_32.c
30581@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
30582 type = kmap_atomic_idx_push();
30583 idx = type + KM_TYPE_NR * smp_processor_id();
30584 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
30585+
30586+ pax_open_kernel();
30587 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
30588+ pax_close_kernel();
30589+
30590 arch_flush_lazy_mmu_mode();
30591
30592 return (void *)vaddr;
30593diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
30594index 9a1e658..da003f3 100644
30595--- a/arch/x86/mm/ioremap.c
30596+++ b/arch/x86/mm/ioremap.c
30597@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
30598 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
30599 int is_ram = page_is_ram(pfn);
30600
30601- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
30602+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
30603 return NULL;
30604 WARN_ON_ONCE(is_ram);
30605 }
30606@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
30607 *
30608 * Caller must ensure there is only one unmapping for the same pointer.
30609 */
30610-void iounmap(volatile void __iomem *addr)
30611+void iounmap(const volatile void __iomem *addr)
30612 {
30613 struct vm_struct *p, *o;
30614
30615@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
30616
30617 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
30618 if (page_is_ram(start >> PAGE_SHIFT))
30619+#ifdef CONFIG_HIGHMEM
30620+ if ((start >> PAGE_SHIFT) < max_low_pfn)
30621+#endif
30622 return __va(phys);
30623
30624 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
30625@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
30626 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
30627 {
30628 if (page_is_ram(phys >> PAGE_SHIFT))
30629+#ifdef CONFIG_HIGHMEM
30630+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
30631+#endif
30632 return;
30633
30634 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
30635@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
30636 early_param("early_ioremap_debug", early_ioremap_debug_setup);
30637
30638 static __initdata int after_paging_init;
30639-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
30640+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
30641
30642 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
30643 {
30644@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
30645 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
30646
30647 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
30648- memset(bm_pte, 0, sizeof(bm_pte));
30649- pmd_populate_kernel(&init_mm, pmd, bm_pte);
30650+ pmd_populate_user(&init_mm, pmd, bm_pte);
30651
30652 /*
30653 * The boot-ioremap range spans multiple pmds, for which
30654diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
30655index d87dd6d..bf3fa66 100644
30656--- a/arch/x86/mm/kmemcheck/kmemcheck.c
30657+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
30658@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
30659 * memory (e.g. tracked pages)? For now, we need this to avoid
30660 * invoking kmemcheck for PnP BIOS calls.
30661 */
30662- if (regs->flags & X86_VM_MASK)
30663+ if (v8086_mode(regs))
30664 return false;
30665- if (regs->cs != __KERNEL_CS)
30666+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
30667 return false;
30668
30669 pte = kmemcheck_pte_lookup(address);
30670diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
30671index 845df68..1d8d29f 100644
30672--- a/arch/x86/mm/mmap.c
30673+++ b/arch/x86/mm/mmap.c
30674@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
30675 * Leave an at least ~128 MB hole with possible stack randomization.
30676 */
30677 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
30678-#define MAX_GAP (TASK_SIZE/6*5)
30679+#define MAX_GAP (pax_task_size/6*5)
30680
30681 static int mmap_is_legacy(void)
30682 {
30683@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
30684 return rnd << PAGE_SHIFT;
30685 }
30686
30687-static unsigned long mmap_base(void)
30688+static unsigned long mmap_base(struct mm_struct *mm)
30689 {
30690 unsigned long gap = rlimit(RLIMIT_STACK);
30691+ unsigned long pax_task_size = TASK_SIZE;
30692+
30693+#ifdef CONFIG_PAX_SEGMEXEC
30694+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30695+ pax_task_size = SEGMEXEC_TASK_SIZE;
30696+#endif
30697
30698 if (gap < MIN_GAP)
30699 gap = MIN_GAP;
30700 else if (gap > MAX_GAP)
30701 gap = MAX_GAP;
30702
30703- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
30704+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
30705 }
30706
30707 /*
30708 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
30709 * does, but not when emulating X86_32
30710 */
30711-static unsigned long mmap_legacy_base(void)
30712+static unsigned long mmap_legacy_base(struct mm_struct *mm)
30713 {
30714- if (mmap_is_ia32())
30715+ if (mmap_is_ia32()) {
30716+
30717+#ifdef CONFIG_PAX_SEGMEXEC
30718+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30719+ return SEGMEXEC_TASK_UNMAPPED_BASE;
30720+ else
30721+#endif
30722+
30723 return TASK_UNMAPPED_BASE;
30724- else
30725+ } else
30726 return TASK_UNMAPPED_BASE + mmap_rnd();
30727 }
30728
30729@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
30730 void arch_pick_mmap_layout(struct mm_struct *mm)
30731 {
30732 if (mmap_is_legacy()) {
30733- mm->mmap_base = mmap_legacy_base();
30734+ mm->mmap_base = mmap_legacy_base(mm);
30735+
30736+#ifdef CONFIG_PAX_RANDMMAP
30737+ if (mm->pax_flags & MF_PAX_RANDMMAP)
30738+ mm->mmap_base += mm->delta_mmap;
30739+#endif
30740+
30741 mm->get_unmapped_area = arch_get_unmapped_area;
30742 mm->unmap_area = arch_unmap_area;
30743 } else {
30744- mm->mmap_base = mmap_base();
30745+ mm->mmap_base = mmap_base(mm);
30746+
30747+#ifdef CONFIG_PAX_RANDMMAP
30748+ if (mm->pax_flags & MF_PAX_RANDMMAP)
30749+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
30750+#endif
30751+
30752 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
30753 mm->unmap_area = arch_unmap_area_topdown;
30754 }
30755diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
30756index dc0b727..f612039 100644
30757--- a/arch/x86/mm/mmio-mod.c
30758+++ b/arch/x86/mm/mmio-mod.c
30759@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
30760 break;
30761 default:
30762 {
30763- unsigned char *ip = (unsigned char *)instptr;
30764+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
30765 my_trace->opcode = MMIO_UNKNOWN_OP;
30766 my_trace->width = 0;
30767 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
30768@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
30769 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
30770 void __iomem *addr)
30771 {
30772- static atomic_t next_id;
30773+ static atomic_unchecked_t next_id;
30774 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
30775 /* These are page-unaligned. */
30776 struct mmiotrace_map map = {
30777@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
30778 .private = trace
30779 },
30780 .phys = offset,
30781- .id = atomic_inc_return(&next_id)
30782+ .id = atomic_inc_return_unchecked(&next_id)
30783 };
30784 map.map_id = trace->id;
30785
30786@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
30787 ioremap_trace_core(offset, size, addr);
30788 }
30789
30790-static void iounmap_trace_core(volatile void __iomem *addr)
30791+static void iounmap_trace_core(const volatile void __iomem *addr)
30792 {
30793 struct mmiotrace_map map = {
30794 .phys = 0,
30795@@ -328,7 +328,7 @@ not_enabled:
30796 }
30797 }
30798
30799-void mmiotrace_iounmap(volatile void __iomem *addr)
30800+void mmiotrace_iounmap(const volatile void __iomem *addr)
30801 {
30802 might_sleep();
30803 if (is_enabled()) /* recheck and proper locking in *_core() */
30804diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
30805index a71c4e2..301ae44 100644
30806--- a/arch/x86/mm/numa.c
30807+++ b/arch/x86/mm/numa.c
30808@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
30809 return true;
30810 }
30811
30812-static int __init numa_register_memblks(struct numa_meminfo *mi)
30813+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
30814 {
30815 unsigned long uninitialized_var(pfn_align);
30816 int i, nid;
30817diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
30818index d0b1773..4c3327c 100644
30819--- a/arch/x86/mm/pageattr-test.c
30820+++ b/arch/x86/mm/pageattr-test.c
30821@@ -36,7 +36,7 @@ enum {
30822
30823 static int pte_testbit(pte_t pte)
30824 {
30825- return pte_flags(pte) & _PAGE_UNUSED1;
30826+ return pte_flags(pte) & _PAGE_CPA_TEST;
30827 }
30828
30829 struct split_state {
30830diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
30831index bb32480..75f2f5e 100644
30832--- a/arch/x86/mm/pageattr.c
30833+++ b/arch/x86/mm/pageattr.c
30834@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30835 */
30836 #ifdef CONFIG_PCI_BIOS
30837 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
30838- pgprot_val(forbidden) |= _PAGE_NX;
30839+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
30840 #endif
30841
30842 /*
30843@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30844 * Does not cover __inittext since that is gone later on. On
30845 * 64bit we do not enforce !NX on the low mapping
30846 */
30847- if (within(address, (unsigned long)_text, (unsigned long)_etext))
30848- pgprot_val(forbidden) |= _PAGE_NX;
30849+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
30850+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
30851
30852+#ifdef CONFIG_DEBUG_RODATA
30853 /*
30854 * The .rodata section needs to be read-only. Using the pfn
30855 * catches all aliases.
30856@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30857 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
30858 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
30859 pgprot_val(forbidden) |= _PAGE_RW;
30860+#endif
30861
30862 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
30863 /*
30864@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30865 }
30866 #endif
30867
30868+#ifdef CONFIG_PAX_KERNEXEC
30869+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
30870+ pgprot_val(forbidden) |= _PAGE_RW;
30871+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
30872+ }
30873+#endif
30874+
30875 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
30876
30877 return prot;
30878@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
30879 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
30880 {
30881 /* change init_mm */
30882+ pax_open_kernel();
30883 set_pte_atomic(kpte, pte);
30884+
30885 #ifdef CONFIG_X86_32
30886 if (!SHARED_KERNEL_PMD) {
30887+
30888+#ifdef CONFIG_PAX_PER_CPU_PGD
30889+ unsigned long cpu;
30890+#else
30891 struct page *page;
30892+#endif
30893
30894+#ifdef CONFIG_PAX_PER_CPU_PGD
30895+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30896+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
30897+#else
30898 list_for_each_entry(page, &pgd_list, lru) {
30899- pgd_t *pgd;
30900+ pgd_t *pgd = (pgd_t *)page_address(page);
30901+#endif
30902+
30903 pud_t *pud;
30904 pmd_t *pmd;
30905
30906- pgd = (pgd_t *)page_address(page) + pgd_index(address);
30907+ pgd += pgd_index(address);
30908 pud = pud_offset(pgd, address);
30909 pmd = pmd_offset(pud, address);
30910 set_pte_atomic((pte_t *)pmd, pte);
30911 }
30912 }
30913 #endif
30914+ pax_close_kernel();
30915 }
30916
30917 static int
30918diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
30919index 6574388..87e9bef 100644
30920--- a/arch/x86/mm/pat.c
30921+++ b/arch/x86/mm/pat.c
30922@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
30923
30924 if (!entry) {
30925 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
30926- current->comm, current->pid, start, end - 1);
30927+ current->comm, task_pid_nr(current), start, end - 1);
30928 return -EINVAL;
30929 }
30930
30931@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
30932
30933 while (cursor < to) {
30934 if (!devmem_is_allowed(pfn)) {
30935- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
30936- current->comm, from, to - 1);
30937+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
30938+ current->comm, from, to - 1, cursor);
30939 return 0;
30940 }
30941 cursor += PAGE_SIZE;
30942@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
30943 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
30944 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
30945 "for [mem %#010Lx-%#010Lx]\n",
30946- current->comm, current->pid,
30947+ current->comm, task_pid_nr(current),
30948 cattr_name(flags),
30949 base, (unsigned long long)(base + size-1));
30950 return -EINVAL;
30951@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
30952 flags = lookup_memtype(paddr);
30953 if (want_flags != flags) {
30954 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
30955- current->comm, current->pid,
30956+ current->comm, task_pid_nr(current),
30957 cattr_name(want_flags),
30958 (unsigned long long)paddr,
30959 (unsigned long long)(paddr + size - 1),
30960@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
30961 free_memtype(paddr, paddr + size);
30962 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
30963 " for [mem %#010Lx-%#010Lx], got %s\n",
30964- current->comm, current->pid,
30965+ current->comm, task_pid_nr(current),
30966 cattr_name(want_flags),
30967 (unsigned long long)paddr,
30968 (unsigned long long)(paddr + size - 1),
30969diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
30970index 415f6c4..d319983 100644
30971--- a/arch/x86/mm/pat_rbtree.c
30972+++ b/arch/x86/mm/pat_rbtree.c
30973@@ -160,7 +160,7 @@ success:
30974
30975 failure:
30976 printk(KERN_INFO "%s:%d conflicting memory types "
30977- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
30978+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
30979 end, cattr_name(found_type), cattr_name(match->type));
30980 return -EBUSY;
30981 }
30982diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
30983index 9f0614d..92ae64a 100644
30984--- a/arch/x86/mm/pf_in.c
30985+++ b/arch/x86/mm/pf_in.c
30986@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
30987 int i;
30988 enum reason_type rv = OTHERS;
30989
30990- p = (unsigned char *)ins_addr;
30991+ p = (unsigned char *)ktla_ktva(ins_addr);
30992 p += skip_prefix(p, &prf);
30993 p += get_opcode(p, &opcode);
30994
30995@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
30996 struct prefix_bits prf;
30997 int i;
30998
30999- p = (unsigned char *)ins_addr;
31000+ p = (unsigned char *)ktla_ktva(ins_addr);
31001 p += skip_prefix(p, &prf);
31002 p += get_opcode(p, &opcode);
31003
31004@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
31005 struct prefix_bits prf;
31006 int i;
31007
31008- p = (unsigned char *)ins_addr;
31009+ p = (unsigned char *)ktla_ktva(ins_addr);
31010 p += skip_prefix(p, &prf);
31011 p += get_opcode(p, &opcode);
31012
31013@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
31014 struct prefix_bits prf;
31015 int i;
31016
31017- p = (unsigned char *)ins_addr;
31018+ p = (unsigned char *)ktla_ktva(ins_addr);
31019 p += skip_prefix(p, &prf);
31020 p += get_opcode(p, &opcode);
31021 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
31022@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
31023 struct prefix_bits prf;
31024 int i;
31025
31026- p = (unsigned char *)ins_addr;
31027+ p = (unsigned char *)ktla_ktva(ins_addr);
31028 p += skip_prefix(p, &prf);
31029 p += get_opcode(p, &opcode);
31030 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
31031diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
31032index 17fda6a..f7d54a0 100644
31033--- a/arch/x86/mm/pgtable.c
31034+++ b/arch/x86/mm/pgtable.c
31035@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
31036 list_del(&page->lru);
31037 }
31038
31039-#define UNSHARED_PTRS_PER_PGD \
31040- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31041+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31042+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
31043
31044+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
31045+{
31046+ unsigned int count = USER_PGD_PTRS;
31047
31048+ if (!pax_user_shadow_base)
31049+ return;
31050+
31051+ while (count--)
31052+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
31053+}
31054+#endif
31055+
31056+#ifdef CONFIG_PAX_PER_CPU_PGD
31057+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
31058+{
31059+ unsigned int count = USER_PGD_PTRS;
31060+
31061+ while (count--) {
31062+ pgd_t pgd;
31063+
31064+#ifdef CONFIG_X86_64
31065+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
31066+#else
31067+ pgd = *src++;
31068+#endif
31069+
31070+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31071+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
31072+#endif
31073+
31074+ *dst++ = pgd;
31075+ }
31076+
31077+}
31078+#endif
31079+
31080+#ifdef CONFIG_X86_64
31081+#define pxd_t pud_t
31082+#define pyd_t pgd_t
31083+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
31084+#define pxd_free(mm, pud) pud_free((mm), (pud))
31085+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
31086+#define pyd_offset(mm, address) pgd_offset((mm), (address))
31087+#define PYD_SIZE PGDIR_SIZE
31088+#else
31089+#define pxd_t pmd_t
31090+#define pyd_t pud_t
31091+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
31092+#define pxd_free(mm, pud) pmd_free((mm), (pud))
31093+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
31094+#define pyd_offset(mm, address) pud_offset((mm), (address))
31095+#define PYD_SIZE PUD_SIZE
31096+#endif
31097+
31098+#ifdef CONFIG_PAX_PER_CPU_PGD
31099+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
31100+static inline void pgd_dtor(pgd_t *pgd) {}
31101+#else
31102 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
31103 {
31104 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
31105@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
31106 pgd_list_del(pgd);
31107 spin_unlock(&pgd_lock);
31108 }
31109+#endif
31110
31111 /*
31112 * List of all pgd's needed for non-PAE so it can invalidate entries
31113@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
31114 * -- nyc
31115 */
31116
31117-#ifdef CONFIG_X86_PAE
31118+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
31119 /*
31120 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
31121 * updating the top-level pagetable entries to guarantee the
31122@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
31123 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
31124 * and initialize the kernel pmds here.
31125 */
31126-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
31127+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31128
31129 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31130 {
31131@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31132 */
31133 flush_tlb_mm(mm);
31134 }
31135+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
31136+#define PREALLOCATED_PXDS USER_PGD_PTRS
31137 #else /* !CONFIG_X86_PAE */
31138
31139 /* No need to prepopulate any pagetable entries in non-PAE modes. */
31140-#define PREALLOCATED_PMDS 0
31141+#define PREALLOCATED_PXDS 0
31142
31143 #endif /* CONFIG_X86_PAE */
31144
31145-static void free_pmds(pmd_t *pmds[])
31146+static void free_pxds(pxd_t *pxds[])
31147 {
31148 int i;
31149
31150- for(i = 0; i < PREALLOCATED_PMDS; i++)
31151- if (pmds[i])
31152- free_page((unsigned long)pmds[i]);
31153+ for(i = 0; i < PREALLOCATED_PXDS; i++)
31154+ if (pxds[i])
31155+ free_page((unsigned long)pxds[i]);
31156 }
31157
31158-static int preallocate_pmds(pmd_t *pmds[])
31159+static int preallocate_pxds(pxd_t *pxds[])
31160 {
31161 int i;
31162 bool failed = false;
31163
31164- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31165- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
31166- if (pmd == NULL)
31167+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31168+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
31169+ if (pxd == NULL)
31170 failed = true;
31171- pmds[i] = pmd;
31172+ pxds[i] = pxd;
31173 }
31174
31175 if (failed) {
31176- free_pmds(pmds);
31177+ free_pxds(pxds);
31178 return -ENOMEM;
31179 }
31180
31181@@ -219,51 +279,55 @@ static int preallocate_pmds(pmd_t *pmds[])
31182 * preallocate which never got a corresponding vma will need to be
31183 * freed manually.
31184 */
31185-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
31186+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
31187 {
31188 int i;
31189
31190- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31191+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31192 pgd_t pgd = pgdp[i];
31193
31194 if (pgd_val(pgd) != 0) {
31195- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
31196+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
31197
31198- pgdp[i] = native_make_pgd(0);
31199+ set_pgd(pgdp + i, native_make_pgd(0));
31200
31201- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
31202- pmd_free(mm, pmd);
31203+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
31204+ pxd_free(mm, pxd);
31205 }
31206 }
31207 }
31208
31209-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
31210+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
31211 {
31212- pud_t *pud;
31213+ pyd_t *pyd;
31214 unsigned long addr;
31215 int i;
31216
31217- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
31218+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
31219 return;
31220
31221- pud = pud_offset(pgd, 0);
31222+#ifdef CONFIG_X86_64
31223+ pyd = pyd_offset(mm, 0L);
31224+#else
31225+ pyd = pyd_offset(pgd, 0L);
31226+#endif
31227
31228- for (addr = i = 0; i < PREALLOCATED_PMDS;
31229- i++, pud++, addr += PUD_SIZE) {
31230- pmd_t *pmd = pmds[i];
31231+ for (addr = i = 0; i < PREALLOCATED_PXDS;
31232+ i++, pyd++, addr += PYD_SIZE) {
31233+ pxd_t *pxd = pxds[i];
31234
31235 if (i >= KERNEL_PGD_BOUNDARY)
31236- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
31237- sizeof(pmd_t) * PTRS_PER_PMD);
31238+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
31239+ sizeof(pxd_t) * PTRS_PER_PMD);
31240
31241- pud_populate(mm, pud, pmd);
31242+ pyd_populate(mm, pyd, pxd);
31243 }
31244 }
31245
31246 pgd_t *pgd_alloc(struct mm_struct *mm)
31247 {
31248 pgd_t *pgd;
31249- pmd_t *pmds[PREALLOCATED_PMDS];
31250+ pxd_t *pxds[PREALLOCATED_PXDS];
31251
31252 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
31253
31254@@ -272,11 +336,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
31255
31256 mm->pgd = pgd;
31257
31258- if (preallocate_pmds(pmds) != 0)
31259+ if (preallocate_pxds(pxds) != 0)
31260 goto out_free_pgd;
31261
31262 if (paravirt_pgd_alloc(mm) != 0)
31263- goto out_free_pmds;
31264+ goto out_free_pxds;
31265
31266 /*
31267 * Make sure that pre-populating the pmds is atomic with
31268@@ -286,14 +350,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
31269 spin_lock(&pgd_lock);
31270
31271 pgd_ctor(mm, pgd);
31272- pgd_prepopulate_pmd(mm, pgd, pmds);
31273+ pgd_prepopulate_pxd(mm, pgd, pxds);
31274
31275 spin_unlock(&pgd_lock);
31276
31277 return pgd;
31278
31279-out_free_pmds:
31280- free_pmds(pmds);
31281+out_free_pxds:
31282+ free_pxds(pxds);
31283 out_free_pgd:
31284 free_page((unsigned long)pgd);
31285 out:
31286@@ -302,7 +366,7 @@ out:
31287
31288 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
31289 {
31290- pgd_mop_up_pmds(mm, pgd);
31291+ pgd_mop_up_pxds(mm, pgd);
31292 pgd_dtor(pgd);
31293 paravirt_pgd_free(mm, pgd);
31294 free_page((unsigned long)pgd);
31295diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
31296index a69bcb8..19068ab 100644
31297--- a/arch/x86/mm/pgtable_32.c
31298+++ b/arch/x86/mm/pgtable_32.c
31299@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
31300 return;
31301 }
31302 pte = pte_offset_kernel(pmd, vaddr);
31303+
31304+ pax_open_kernel();
31305 if (pte_val(pteval))
31306 set_pte_at(&init_mm, vaddr, pte, pteval);
31307 else
31308 pte_clear(&init_mm, vaddr, pte);
31309+ pax_close_kernel();
31310
31311 /*
31312 * It's enough to flush this one mapping.
31313diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
31314index e666cbb..61788c45 100644
31315--- a/arch/x86/mm/physaddr.c
31316+++ b/arch/x86/mm/physaddr.c
31317@@ -10,7 +10,7 @@
31318 #ifdef CONFIG_X86_64
31319
31320 #ifdef CONFIG_DEBUG_VIRTUAL
31321-unsigned long __phys_addr(unsigned long x)
31322+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
31323 {
31324 unsigned long y = x - __START_KERNEL_map;
31325
31326@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
31327 #else
31328
31329 #ifdef CONFIG_DEBUG_VIRTUAL
31330-unsigned long __phys_addr(unsigned long x)
31331+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
31332 {
31333 unsigned long phys_addr = x - PAGE_OFFSET;
31334 /* VMALLOC_* aren't constants */
31335diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
31336index 410531d..0f16030 100644
31337--- a/arch/x86/mm/setup_nx.c
31338+++ b/arch/x86/mm/setup_nx.c
31339@@ -5,8 +5,10 @@
31340 #include <asm/pgtable.h>
31341 #include <asm/proto.h>
31342
31343+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31344 static int disable_nx __cpuinitdata;
31345
31346+#ifndef CONFIG_PAX_PAGEEXEC
31347 /*
31348 * noexec = on|off
31349 *
31350@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
31351 return 0;
31352 }
31353 early_param("noexec", noexec_setup);
31354+#endif
31355+
31356+#endif
31357
31358 void __cpuinit x86_configure_nx(void)
31359 {
31360+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31361 if (cpu_has_nx && !disable_nx)
31362 __supported_pte_mask |= _PAGE_NX;
31363 else
31364+#endif
31365 __supported_pte_mask &= ~_PAGE_NX;
31366 }
31367
31368diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
31369index 282375f..e03a98f 100644
31370--- a/arch/x86/mm/tlb.c
31371+++ b/arch/x86/mm/tlb.c
31372@@ -48,7 +48,11 @@ void leave_mm(int cpu)
31373 BUG();
31374 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
31375 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
31376+
31377+#ifndef CONFIG_PAX_PER_CPU_PGD
31378 load_cr3(swapper_pg_dir);
31379+#endif
31380+
31381 }
31382 }
31383 EXPORT_SYMBOL_GPL(leave_mm);
31384diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
31385new file mode 100644
31386index 0000000..dace51c
31387--- /dev/null
31388+++ b/arch/x86/mm/uderef_64.c
31389@@ -0,0 +1,37 @@
31390+#include <linux/mm.h>
31391+#include <asm/pgtable.h>
31392+#include <asm/uaccess.h>
31393+
31394+#ifdef CONFIG_PAX_MEMORY_UDEREF
31395+/* PaX: due to the special call convention these functions must
31396+ * - remain leaf functions under all configurations,
31397+ * - never be called directly, only dereferenced from the wrappers.
31398+ */
31399+void __pax_open_userland(void)
31400+{
31401+ unsigned int cpu;
31402+
31403+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
31404+ return;
31405+
31406+ cpu = raw_get_cpu();
31407+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
31408+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
31409+ raw_put_cpu_no_resched();
31410+}
31411+EXPORT_SYMBOL(__pax_open_userland);
31412+
31413+void __pax_close_userland(void)
31414+{
31415+ unsigned int cpu;
31416+
31417+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
31418+ return;
31419+
31420+ cpu = raw_get_cpu();
31421+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
31422+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
31423+ raw_put_cpu_no_resched();
31424+}
31425+EXPORT_SYMBOL(__pax_close_userland);
31426+#endif
31427diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
31428index 877b9a1..a8ecf42 100644
31429--- a/arch/x86/net/bpf_jit.S
31430+++ b/arch/x86/net/bpf_jit.S
31431@@ -9,6 +9,7 @@
31432 */
31433 #include <linux/linkage.h>
31434 #include <asm/dwarf2.h>
31435+#include <asm/alternative-asm.h>
31436
31437 /*
31438 * Calling convention :
31439@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
31440 jle bpf_slow_path_word
31441 mov (SKBDATA,%rsi),%eax
31442 bswap %eax /* ntohl() */
31443+ pax_force_retaddr
31444 ret
31445
31446 sk_load_half:
31447@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
31448 jle bpf_slow_path_half
31449 movzwl (SKBDATA,%rsi),%eax
31450 rol $8,%ax # ntohs()
31451+ pax_force_retaddr
31452 ret
31453
31454 sk_load_byte:
31455@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
31456 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
31457 jle bpf_slow_path_byte
31458 movzbl (SKBDATA,%rsi),%eax
31459+ pax_force_retaddr
31460 ret
31461
31462 /**
31463@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
31464 movzbl (SKBDATA,%rsi),%ebx
31465 and $15,%bl
31466 shl $2,%bl
31467+ pax_force_retaddr
31468 ret
31469
31470 /* rsi contains offset and can be scratched */
31471@@ -109,6 +114,7 @@ bpf_slow_path_word:
31472 js bpf_error
31473 mov -12(%rbp),%eax
31474 bswap %eax
31475+ pax_force_retaddr
31476 ret
31477
31478 bpf_slow_path_half:
31479@@ -117,12 +123,14 @@ bpf_slow_path_half:
31480 mov -12(%rbp),%ax
31481 rol $8,%ax
31482 movzwl %ax,%eax
31483+ pax_force_retaddr
31484 ret
31485
31486 bpf_slow_path_byte:
31487 bpf_slow_path_common(1)
31488 js bpf_error
31489 movzbl -12(%rbp),%eax
31490+ pax_force_retaddr
31491 ret
31492
31493 bpf_slow_path_byte_msh:
31494@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
31495 and $15,%al
31496 shl $2,%al
31497 xchg %eax,%ebx
31498+ pax_force_retaddr
31499 ret
31500
31501 #define sk_negative_common(SIZE) \
31502@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
31503 sk_negative_common(4)
31504 mov (%rax), %eax
31505 bswap %eax
31506+ pax_force_retaddr
31507 ret
31508
31509 bpf_slow_path_half_neg:
31510@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
31511 mov (%rax),%ax
31512 rol $8,%ax
31513 movzwl %ax,%eax
31514+ pax_force_retaddr
31515 ret
31516
31517 bpf_slow_path_byte_neg:
31518@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
31519 .globl sk_load_byte_negative_offset
31520 sk_negative_common(1)
31521 movzbl (%rax), %eax
31522+ pax_force_retaddr
31523 ret
31524
31525 bpf_slow_path_byte_msh_neg:
31526@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
31527 and $15,%al
31528 shl $2,%al
31529 xchg %eax,%ebx
31530+ pax_force_retaddr
31531 ret
31532
31533 bpf_error:
31534@@ -197,4 +210,5 @@ bpf_error:
31535 xor %eax,%eax
31536 mov -8(%rbp),%rbx
31537 leaveq
31538+ pax_force_retaddr
31539 ret
31540diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
31541index f66b540..3e88dfb 100644
31542--- a/arch/x86/net/bpf_jit_comp.c
31543+++ b/arch/x86/net/bpf_jit_comp.c
31544@@ -12,6 +12,7 @@
31545 #include <linux/netdevice.h>
31546 #include <linux/filter.h>
31547 #include <linux/if_vlan.h>
31548+#include <linux/random.h>
31549
31550 /*
31551 * Conventions :
31552@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
31553 return ptr + len;
31554 }
31555
31556+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31557+#define MAX_INSTR_CODE_SIZE 96
31558+#else
31559+#define MAX_INSTR_CODE_SIZE 64
31560+#endif
31561+
31562 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
31563
31564 #define EMIT1(b1) EMIT(b1, 1)
31565 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
31566 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
31567 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
31568+
31569+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31570+/* original constant will appear in ecx */
31571+#define DILUTE_CONST_SEQUENCE(_off, _key) \
31572+do { \
31573+ /* mov ecx, randkey */ \
31574+ EMIT1(0xb9); \
31575+ EMIT(_key, 4); \
31576+ /* xor ecx, randkey ^ off */ \
31577+ EMIT2(0x81, 0xf1); \
31578+ EMIT((_key) ^ (_off), 4); \
31579+} while (0)
31580+
31581+#define EMIT1_off32(b1, _off) \
31582+do { \
31583+ switch (b1) { \
31584+ case 0x05: /* add eax, imm32 */ \
31585+ case 0x2d: /* sub eax, imm32 */ \
31586+ case 0x25: /* and eax, imm32 */ \
31587+ case 0x0d: /* or eax, imm32 */ \
31588+ case 0xb8: /* mov eax, imm32 */ \
31589+ case 0x35: /* xor eax, imm32 */ \
31590+ case 0x3d: /* cmp eax, imm32 */ \
31591+ case 0xa9: /* test eax, imm32 */ \
31592+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31593+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
31594+ break; \
31595+ case 0xbb: /* mov ebx, imm32 */ \
31596+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31597+ /* mov ebx, ecx */ \
31598+ EMIT2(0x89, 0xcb); \
31599+ break; \
31600+ case 0xbe: /* mov esi, imm32 */ \
31601+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31602+ /* mov esi, ecx */ \
31603+ EMIT2(0x89, 0xce); \
31604+ break; \
31605+ case 0xe8: /* call rel imm32, always to known funcs */ \
31606+ EMIT1(b1); \
31607+ EMIT(_off, 4); \
31608+ break; \
31609+ case 0xe9: /* jmp rel imm32 */ \
31610+ EMIT1(b1); \
31611+ EMIT(_off, 4); \
31612+ /* prevent fall-through, we're not called if off = 0 */ \
31613+ EMIT(0xcccccccc, 4); \
31614+ EMIT(0xcccccccc, 4); \
31615+ break; \
31616+ default: \
31617+ BUILD_BUG(); \
31618+ } \
31619+} while (0)
31620+
31621+#define EMIT2_off32(b1, b2, _off) \
31622+do { \
31623+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
31624+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
31625+ EMIT(randkey, 4); \
31626+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
31627+ EMIT((_off) - randkey, 4); \
31628+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
31629+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31630+ /* imul eax, ecx */ \
31631+ EMIT3(0x0f, 0xaf, 0xc1); \
31632+ } else { \
31633+ BUILD_BUG(); \
31634+ } \
31635+} while (0)
31636+#else
31637 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
31638+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
31639+#endif
31640
31641 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
31642 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
31643@@ -90,6 +168,24 @@ do { \
31644 #define X86_JBE 0x76
31645 #define X86_JA 0x77
31646
31647+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31648+#define APPEND_FLOW_VERIFY() \
31649+do { \
31650+ /* mov ecx, randkey */ \
31651+ EMIT1(0xb9); \
31652+ EMIT(randkey, 4); \
31653+ /* cmp ecx, randkey */ \
31654+ EMIT2(0x81, 0xf9); \
31655+ EMIT(randkey, 4); \
31656+ /* jz after 8 int 3s */ \
31657+ EMIT2(0x74, 0x08); \
31658+ EMIT(0xcccccccc, 4); \
31659+ EMIT(0xcccccccc, 4); \
31660+} while (0)
31661+#else
31662+#define APPEND_FLOW_VERIFY() do { } while (0)
31663+#endif
31664+
31665 #define EMIT_COND_JMP(op, offset) \
31666 do { \
31667 if (is_near(offset)) \
31668@@ -97,6 +193,7 @@ do { \
31669 else { \
31670 EMIT2(0x0f, op + 0x10); \
31671 EMIT(offset, 4); /* jxx .+off32 */ \
31672+ APPEND_FLOW_VERIFY(); \
31673 } \
31674 } while (0)
31675
31676@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
31677 set_fs(old_fs);
31678 }
31679
31680+struct bpf_jit_work {
31681+ struct work_struct work;
31682+ void *image;
31683+};
31684+
31685 #define CHOOSE_LOAD_FUNC(K, func) \
31686 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
31687
31688@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
31689
31690 void bpf_jit_compile(struct sk_filter *fp)
31691 {
31692- u8 temp[64];
31693+ u8 temp[MAX_INSTR_CODE_SIZE];
31694 u8 *prog;
31695 unsigned int proglen, oldproglen = 0;
31696 int ilen, i;
31697@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
31698 unsigned int *addrs;
31699 const struct sock_filter *filter = fp->insns;
31700 int flen = fp->len;
31701+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31702+ unsigned int randkey;
31703+#endif
31704
31705 if (!bpf_jit_enable)
31706 return;
31707@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
31708 if (addrs == NULL)
31709 return;
31710
31711+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
31712+ if (!fp->work)
31713+ goto out;
31714+
31715+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31716+ randkey = get_random_int();
31717+#endif
31718+
31719 /* Before first pass, make a rough estimation of addrs[]
31720- * each bpf instruction is translated to less than 64 bytes
31721+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
31722 */
31723 for (proglen = 0, i = 0; i < flen; i++) {
31724- proglen += 64;
31725+ proglen += MAX_INSTR_CODE_SIZE;
31726 addrs[i] = proglen;
31727 }
31728 cleanup_addr = proglen; /* epilogue address */
31729@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
31730 case BPF_S_ALU_MUL_K: /* A *= K */
31731 if (is_imm8(K))
31732 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
31733- else {
31734- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
31735- EMIT(K, 4);
31736- }
31737+ else
31738+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
31739 break;
31740 case BPF_S_ALU_DIV_X: /* A /= X; */
31741 seen |= SEEN_XREG;
31742@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
31743 break;
31744 case BPF_S_ALU_MOD_K: /* A %= K; */
31745 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
31746+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31747+ DILUTE_CONST_SEQUENCE(K, randkey);
31748+#else
31749 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
31750+#endif
31751 EMIT2(0xf7, 0xf1); /* div %ecx */
31752 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
31753 break;
31754 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
31755+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31756+ DILUTE_CONST_SEQUENCE(K, randkey);
31757+ // imul rax, rcx
31758+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
31759+#else
31760 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
31761 EMIT(K, 4);
31762+#endif
31763 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
31764 break;
31765 case BPF_S_ALU_AND_X:
31766@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
31767 if (is_imm8(K)) {
31768 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
31769 } else {
31770- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
31771- EMIT(K, 4);
31772+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
31773 }
31774 } else {
31775 EMIT2(0x89,0xde); /* mov %ebx,%esi */
31776@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
31777 break;
31778 default:
31779 /* hmm, too complex filter, give up with jit compiler */
31780- goto out;
31781+ goto error;
31782 }
31783 ilen = prog - temp;
31784 if (image) {
31785 if (unlikely(proglen + ilen > oldproglen)) {
31786 pr_err("bpb_jit_compile fatal error\n");
31787- kfree(addrs);
31788- module_free(NULL, image);
31789- return;
31790+ module_free_exec(NULL, image);
31791+ goto error;
31792 }
31793+ pax_open_kernel();
31794 memcpy(image + proglen, temp, ilen);
31795+ pax_close_kernel();
31796 }
31797 proglen += ilen;
31798 addrs[i] = proglen;
31799@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
31800 break;
31801 }
31802 if (proglen == oldproglen) {
31803- image = module_alloc(max_t(unsigned int,
31804- proglen,
31805- sizeof(struct work_struct)));
31806+ image = module_alloc_exec(proglen);
31807 if (!image)
31808- goto out;
31809+ goto error;
31810 }
31811 oldproglen = proglen;
31812 }
31813@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
31814 if (image) {
31815 bpf_flush_icache(image, image + proglen);
31816 fp->bpf_func = (void *)image;
31817- }
31818+ } else
31819+error:
31820+ kfree(fp->work);
31821+
31822 out:
31823 kfree(addrs);
31824 return;
31825@@ -740,18 +862,20 @@ out:
31826
31827 static void jit_free_defer(struct work_struct *arg)
31828 {
31829- module_free(NULL, arg);
31830+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
31831+ kfree(arg);
31832 }
31833
31834 /* run from softirq, we must use a work_struct to call
31835- * module_free() from process context
31836+ * module_free_exec() from process context
31837 */
31838 void bpf_jit_free(struct sk_filter *fp)
31839 {
31840 if (fp->bpf_func != sk_run_filter) {
31841- struct work_struct *work = (struct work_struct *)fp->bpf_func;
31842+ struct work_struct *work = &fp->work->work;
31843
31844 INIT_WORK(work, jit_free_defer);
31845+ fp->work->image = fp->bpf_func;
31846 schedule_work(work);
31847 }
31848 }
31849diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
31850index d6aa6e8..266395a 100644
31851--- a/arch/x86/oprofile/backtrace.c
31852+++ b/arch/x86/oprofile/backtrace.c
31853@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
31854 struct stack_frame_ia32 *fp;
31855 unsigned long bytes;
31856
31857- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
31858+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
31859 if (bytes != sizeof(bufhead))
31860 return NULL;
31861
31862- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
31863+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
31864
31865 oprofile_add_trace(bufhead[0].return_address);
31866
31867@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
31868 struct stack_frame bufhead[2];
31869 unsigned long bytes;
31870
31871- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
31872+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
31873 if (bytes != sizeof(bufhead))
31874 return NULL;
31875
31876@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
31877 {
31878 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
31879
31880- if (!user_mode_vm(regs)) {
31881+ if (!user_mode(regs)) {
31882 unsigned long stack = kernel_stack_pointer(regs);
31883 if (depth)
31884 dump_trace(NULL, regs, (unsigned long *)stack, 0,
31885diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
31886index 48768df..ba9143c 100644
31887--- a/arch/x86/oprofile/nmi_int.c
31888+++ b/arch/x86/oprofile/nmi_int.c
31889@@ -23,6 +23,7 @@
31890 #include <asm/nmi.h>
31891 #include <asm/msr.h>
31892 #include <asm/apic.h>
31893+#include <asm/pgtable.h>
31894
31895 #include "op_counter.h"
31896 #include "op_x86_model.h"
31897@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
31898 if (ret)
31899 return ret;
31900
31901- if (!model->num_virt_counters)
31902- model->num_virt_counters = model->num_counters;
31903+ if (!model->num_virt_counters) {
31904+ pax_open_kernel();
31905+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
31906+ pax_close_kernel();
31907+ }
31908
31909 mux_init(ops);
31910
31911diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
31912index b2b9443..be58856 100644
31913--- a/arch/x86/oprofile/op_model_amd.c
31914+++ b/arch/x86/oprofile/op_model_amd.c
31915@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
31916 num_counters = AMD64_NUM_COUNTERS;
31917 }
31918
31919- op_amd_spec.num_counters = num_counters;
31920- op_amd_spec.num_controls = num_counters;
31921- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
31922+ pax_open_kernel();
31923+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
31924+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
31925+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
31926+ pax_close_kernel();
31927
31928 return 0;
31929 }
31930diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
31931index d90528e..0127e2b 100644
31932--- a/arch/x86/oprofile/op_model_ppro.c
31933+++ b/arch/x86/oprofile/op_model_ppro.c
31934@@ -19,6 +19,7 @@
31935 #include <asm/msr.h>
31936 #include <asm/apic.h>
31937 #include <asm/nmi.h>
31938+#include <asm/pgtable.h>
31939
31940 #include "op_x86_model.h"
31941 #include "op_counter.h"
31942@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
31943
31944 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
31945
31946- op_arch_perfmon_spec.num_counters = num_counters;
31947- op_arch_perfmon_spec.num_controls = num_counters;
31948+ pax_open_kernel();
31949+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
31950+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
31951+ pax_close_kernel();
31952 }
31953
31954 static int arch_perfmon_init(struct oprofile_operations *ignore)
31955diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
31956index 71e8a67..6a313bb 100644
31957--- a/arch/x86/oprofile/op_x86_model.h
31958+++ b/arch/x86/oprofile/op_x86_model.h
31959@@ -52,7 +52,7 @@ struct op_x86_model_spec {
31960 void (*switch_ctrl)(struct op_x86_model_spec const *model,
31961 struct op_msrs const * const msrs);
31962 #endif
31963-};
31964+} __do_const;
31965
31966 struct op_counter_config;
31967
31968diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
31969index e9e6ed5..e47ae67 100644
31970--- a/arch/x86/pci/amd_bus.c
31971+++ b/arch/x86/pci/amd_bus.c
31972@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
31973 return NOTIFY_OK;
31974 }
31975
31976-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
31977+static struct notifier_block amd_cpu_notifier = {
31978 .notifier_call = amd_cpu_notify,
31979 };
31980
31981diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
31982index 372e9b8..e775a6c 100644
31983--- a/arch/x86/pci/irq.c
31984+++ b/arch/x86/pci/irq.c
31985@@ -50,7 +50,7 @@ struct irq_router {
31986 struct irq_router_handler {
31987 u16 vendor;
31988 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
31989-};
31990+} __do_const;
31991
31992 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
31993 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
31994@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
31995 return 0;
31996 }
31997
31998-static __initdata struct irq_router_handler pirq_routers[] = {
31999+static __initconst const struct irq_router_handler pirq_routers[] = {
32000 { PCI_VENDOR_ID_INTEL, intel_router_probe },
32001 { PCI_VENDOR_ID_AL, ali_router_probe },
32002 { PCI_VENDOR_ID_ITE, ite_router_probe },
32003@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
32004 static void __init pirq_find_router(struct irq_router *r)
32005 {
32006 struct irq_routing_table *rt = pirq_table;
32007- struct irq_router_handler *h;
32008+ const struct irq_router_handler *h;
32009
32010 #ifdef CONFIG_PCI_BIOS
32011 if (!rt->signature) {
32012@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
32013 return 0;
32014 }
32015
32016-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
32017+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
32018 {
32019 .callback = fix_broken_hp_bios_irq9,
32020 .ident = "HP Pavilion N5400 Series Laptop",
32021diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
32022index 6eb18c4..20d83de 100644
32023--- a/arch/x86/pci/mrst.c
32024+++ b/arch/x86/pci/mrst.c
32025@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
32026 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
32027 pci_mmcfg_late_init();
32028 pcibios_enable_irq = mrst_pci_irq_enable;
32029- pci_root_ops = pci_mrst_ops;
32030+ pax_open_kernel();
32031+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
32032+ pax_close_kernel();
32033 pci_soc_mode = 1;
32034 /* Continue with standard init */
32035 return 1;
32036diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
32037index c77b24a..c979855 100644
32038--- a/arch/x86/pci/pcbios.c
32039+++ b/arch/x86/pci/pcbios.c
32040@@ -79,7 +79,7 @@ union bios32 {
32041 static struct {
32042 unsigned long address;
32043 unsigned short segment;
32044-} bios32_indirect = { 0, __KERNEL_CS };
32045+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
32046
32047 /*
32048 * Returns the entry point for the given service, NULL on error
32049@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
32050 unsigned long length; /* %ecx */
32051 unsigned long entry; /* %edx */
32052 unsigned long flags;
32053+ struct desc_struct d, *gdt;
32054
32055 local_irq_save(flags);
32056- __asm__("lcall *(%%edi); cld"
32057+
32058+ gdt = get_cpu_gdt_table(smp_processor_id());
32059+
32060+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
32061+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32062+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
32063+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32064+
32065+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
32066 : "=a" (return_code),
32067 "=b" (address),
32068 "=c" (length),
32069 "=d" (entry)
32070 : "0" (service),
32071 "1" (0),
32072- "D" (&bios32_indirect));
32073+ "D" (&bios32_indirect),
32074+ "r"(__PCIBIOS_DS)
32075+ : "memory");
32076+
32077+ pax_open_kernel();
32078+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
32079+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
32080+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
32081+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
32082+ pax_close_kernel();
32083+
32084 local_irq_restore(flags);
32085
32086 switch (return_code) {
32087- case 0:
32088- return address + entry;
32089- case 0x80: /* Not present */
32090- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32091- return 0;
32092- default: /* Shouldn't happen */
32093- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32094- service, return_code);
32095+ case 0: {
32096+ int cpu;
32097+ unsigned char flags;
32098+
32099+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
32100+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
32101+ printk(KERN_WARNING "bios32_service: not valid\n");
32102 return 0;
32103+ }
32104+ address = address + PAGE_OFFSET;
32105+ length += 16UL; /* some BIOSs underreport this... */
32106+ flags = 4;
32107+ if (length >= 64*1024*1024) {
32108+ length >>= PAGE_SHIFT;
32109+ flags |= 8;
32110+ }
32111+
32112+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32113+ gdt = get_cpu_gdt_table(cpu);
32114+ pack_descriptor(&d, address, length, 0x9b, flags);
32115+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32116+ pack_descriptor(&d, address, length, 0x93, flags);
32117+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32118+ }
32119+ return entry;
32120+ }
32121+ case 0x80: /* Not present */
32122+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32123+ return 0;
32124+ default: /* Shouldn't happen */
32125+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32126+ service, return_code);
32127+ return 0;
32128 }
32129 }
32130
32131 static struct {
32132 unsigned long address;
32133 unsigned short segment;
32134-} pci_indirect = { 0, __KERNEL_CS };
32135+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
32136
32137-static int pci_bios_present;
32138+static int pci_bios_present __read_only;
32139
32140 static int check_pcibios(void)
32141 {
32142@@ -131,11 +174,13 @@ static int check_pcibios(void)
32143 unsigned long flags, pcibios_entry;
32144
32145 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
32146- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
32147+ pci_indirect.address = pcibios_entry;
32148
32149 local_irq_save(flags);
32150- __asm__(
32151- "lcall *(%%edi); cld\n\t"
32152+ __asm__("movw %w6, %%ds\n\t"
32153+ "lcall *%%ss:(%%edi); cld\n\t"
32154+ "push %%ss\n\t"
32155+ "pop %%ds\n\t"
32156 "jc 1f\n\t"
32157 "xor %%ah, %%ah\n"
32158 "1:"
32159@@ -144,7 +189,8 @@ static int check_pcibios(void)
32160 "=b" (ebx),
32161 "=c" (ecx)
32162 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
32163- "D" (&pci_indirect)
32164+ "D" (&pci_indirect),
32165+ "r" (__PCIBIOS_DS)
32166 : "memory");
32167 local_irq_restore(flags);
32168
32169@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32170
32171 switch (len) {
32172 case 1:
32173- __asm__("lcall *(%%esi); cld\n\t"
32174+ __asm__("movw %w6, %%ds\n\t"
32175+ "lcall *%%ss:(%%esi); cld\n\t"
32176+ "push %%ss\n\t"
32177+ "pop %%ds\n\t"
32178 "jc 1f\n\t"
32179 "xor %%ah, %%ah\n"
32180 "1:"
32181@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32182 : "1" (PCIBIOS_READ_CONFIG_BYTE),
32183 "b" (bx),
32184 "D" ((long)reg),
32185- "S" (&pci_indirect));
32186+ "S" (&pci_indirect),
32187+ "r" (__PCIBIOS_DS));
32188 /*
32189 * Zero-extend the result beyond 8 bits, do not trust the
32190 * BIOS having done it:
32191@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32192 *value &= 0xff;
32193 break;
32194 case 2:
32195- __asm__("lcall *(%%esi); cld\n\t"
32196+ __asm__("movw %w6, %%ds\n\t"
32197+ "lcall *%%ss:(%%esi); cld\n\t"
32198+ "push %%ss\n\t"
32199+ "pop %%ds\n\t"
32200 "jc 1f\n\t"
32201 "xor %%ah, %%ah\n"
32202 "1:"
32203@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32204 : "1" (PCIBIOS_READ_CONFIG_WORD),
32205 "b" (bx),
32206 "D" ((long)reg),
32207- "S" (&pci_indirect));
32208+ "S" (&pci_indirect),
32209+ "r" (__PCIBIOS_DS));
32210 /*
32211 * Zero-extend the result beyond 16 bits, do not trust the
32212 * BIOS having done it:
32213@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32214 *value &= 0xffff;
32215 break;
32216 case 4:
32217- __asm__("lcall *(%%esi); cld\n\t"
32218+ __asm__("movw %w6, %%ds\n\t"
32219+ "lcall *%%ss:(%%esi); cld\n\t"
32220+ "push %%ss\n\t"
32221+ "pop %%ds\n\t"
32222 "jc 1f\n\t"
32223 "xor %%ah, %%ah\n"
32224 "1:"
32225@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32226 : "1" (PCIBIOS_READ_CONFIG_DWORD),
32227 "b" (bx),
32228 "D" ((long)reg),
32229- "S" (&pci_indirect));
32230+ "S" (&pci_indirect),
32231+ "r" (__PCIBIOS_DS));
32232 break;
32233 }
32234
32235@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32236
32237 switch (len) {
32238 case 1:
32239- __asm__("lcall *(%%esi); cld\n\t"
32240+ __asm__("movw %w6, %%ds\n\t"
32241+ "lcall *%%ss:(%%esi); cld\n\t"
32242+ "push %%ss\n\t"
32243+ "pop %%ds\n\t"
32244 "jc 1f\n\t"
32245 "xor %%ah, %%ah\n"
32246 "1:"
32247@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32248 "c" (value),
32249 "b" (bx),
32250 "D" ((long)reg),
32251- "S" (&pci_indirect));
32252+ "S" (&pci_indirect),
32253+ "r" (__PCIBIOS_DS));
32254 break;
32255 case 2:
32256- __asm__("lcall *(%%esi); cld\n\t"
32257+ __asm__("movw %w6, %%ds\n\t"
32258+ "lcall *%%ss:(%%esi); cld\n\t"
32259+ "push %%ss\n\t"
32260+ "pop %%ds\n\t"
32261 "jc 1f\n\t"
32262 "xor %%ah, %%ah\n"
32263 "1:"
32264@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32265 "c" (value),
32266 "b" (bx),
32267 "D" ((long)reg),
32268- "S" (&pci_indirect));
32269+ "S" (&pci_indirect),
32270+ "r" (__PCIBIOS_DS));
32271 break;
32272 case 4:
32273- __asm__("lcall *(%%esi); cld\n\t"
32274+ __asm__("movw %w6, %%ds\n\t"
32275+ "lcall *%%ss:(%%esi); cld\n\t"
32276+ "push %%ss\n\t"
32277+ "pop %%ds\n\t"
32278 "jc 1f\n\t"
32279 "xor %%ah, %%ah\n"
32280 "1:"
32281@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32282 "c" (value),
32283 "b" (bx),
32284 "D" ((long)reg),
32285- "S" (&pci_indirect));
32286+ "S" (&pci_indirect),
32287+ "r" (__PCIBIOS_DS));
32288 break;
32289 }
32290
32291@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
32292
32293 DBG("PCI: Fetching IRQ routing table... ");
32294 __asm__("push %%es\n\t"
32295+ "movw %w8, %%ds\n\t"
32296 "push %%ds\n\t"
32297 "pop %%es\n\t"
32298- "lcall *(%%esi); cld\n\t"
32299+ "lcall *%%ss:(%%esi); cld\n\t"
32300 "pop %%es\n\t"
32301+ "push %%ss\n\t"
32302+ "pop %%ds\n"
32303 "jc 1f\n\t"
32304 "xor %%ah, %%ah\n"
32305 "1:"
32306@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
32307 "1" (0),
32308 "D" ((long) &opt),
32309 "S" (&pci_indirect),
32310- "m" (opt)
32311+ "m" (opt),
32312+ "r" (__PCIBIOS_DS)
32313 : "memory");
32314 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
32315 if (ret & 0xff00)
32316@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
32317 {
32318 int ret;
32319
32320- __asm__("lcall *(%%esi); cld\n\t"
32321+ __asm__("movw %w5, %%ds\n\t"
32322+ "lcall *%%ss:(%%esi); cld\n\t"
32323+ "push %%ss\n\t"
32324+ "pop %%ds\n"
32325 "jc 1f\n\t"
32326 "xor %%ah, %%ah\n"
32327 "1:"
32328@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
32329 : "0" (PCIBIOS_SET_PCI_HW_INT),
32330 "b" ((dev->bus->number << 8) | dev->devfn),
32331 "c" ((irq << 8) | (pin + 10)),
32332- "S" (&pci_indirect));
32333+ "S" (&pci_indirect),
32334+ "r" (__PCIBIOS_DS));
32335 return !(ret & 0xff00);
32336 }
32337 EXPORT_SYMBOL(pcibios_set_irq_routing);
32338diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
32339index 40e4469..d915bf9 100644
32340--- a/arch/x86/platform/efi/efi_32.c
32341+++ b/arch/x86/platform/efi/efi_32.c
32342@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
32343 {
32344 struct desc_ptr gdt_descr;
32345
32346+#ifdef CONFIG_PAX_KERNEXEC
32347+ struct desc_struct d;
32348+#endif
32349+
32350 local_irq_save(efi_rt_eflags);
32351
32352 load_cr3(initial_page_table);
32353 __flush_tlb_all();
32354
32355+#ifdef CONFIG_PAX_KERNEXEC
32356+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
32357+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
32358+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
32359+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
32360+#endif
32361+
32362 gdt_descr.address = __pa(get_cpu_gdt_table(0));
32363 gdt_descr.size = GDT_SIZE - 1;
32364 load_gdt(&gdt_descr);
32365@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
32366 {
32367 struct desc_ptr gdt_descr;
32368
32369+#ifdef CONFIG_PAX_KERNEXEC
32370+ struct desc_struct d;
32371+
32372+ memset(&d, 0, sizeof d);
32373+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
32374+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
32375+#endif
32376+
32377 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
32378 gdt_descr.size = GDT_SIZE - 1;
32379 load_gdt(&gdt_descr);
32380
32381+#ifdef CONFIG_PAX_PER_CPU_PGD
32382+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
32383+#else
32384 load_cr3(swapper_pg_dir);
32385+#endif
32386+
32387 __flush_tlb_all();
32388
32389 local_irq_restore(efi_rt_eflags);
32390diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
32391index 39a0e7f1..872396e 100644
32392--- a/arch/x86/platform/efi/efi_64.c
32393+++ b/arch/x86/platform/efi/efi_64.c
32394@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
32395 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
32396 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
32397 }
32398+
32399+#ifdef CONFIG_PAX_PER_CPU_PGD
32400+ load_cr3(swapper_pg_dir);
32401+#endif
32402+
32403 __flush_tlb_all();
32404 }
32405
32406@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
32407 for (pgd = 0; pgd < n_pgds; pgd++)
32408 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
32409 kfree(save_pgd);
32410+
32411+#ifdef CONFIG_PAX_PER_CPU_PGD
32412+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
32413+#endif
32414+
32415 __flush_tlb_all();
32416 local_irq_restore(efi_flags);
32417 early_code_mapping_set_exec(0);
32418diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
32419index fbe66e6..eae5e38 100644
32420--- a/arch/x86/platform/efi/efi_stub_32.S
32421+++ b/arch/x86/platform/efi/efi_stub_32.S
32422@@ -6,7 +6,9 @@
32423 */
32424
32425 #include <linux/linkage.h>
32426+#include <linux/init.h>
32427 #include <asm/page_types.h>
32428+#include <asm/segment.h>
32429
32430 /*
32431 * efi_call_phys(void *, ...) is a function with variable parameters.
32432@@ -20,7 +22,7 @@
32433 * service functions will comply with gcc calling convention, too.
32434 */
32435
32436-.text
32437+__INIT
32438 ENTRY(efi_call_phys)
32439 /*
32440 * 0. The function can only be called in Linux kernel. So CS has been
32441@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
32442 * The mapping of lower virtual memory has been created in prelog and
32443 * epilog.
32444 */
32445- movl $1f, %edx
32446- subl $__PAGE_OFFSET, %edx
32447- jmp *%edx
32448+#ifdef CONFIG_PAX_KERNEXEC
32449+ movl $(__KERNEXEC_EFI_DS), %edx
32450+ mov %edx, %ds
32451+ mov %edx, %es
32452+ mov %edx, %ss
32453+ addl $2f,(1f)
32454+ ljmp *(1f)
32455+
32456+__INITDATA
32457+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
32458+.previous
32459+
32460+2:
32461+ subl $2b,(1b)
32462+#else
32463+ jmp 1f-__PAGE_OFFSET
32464 1:
32465+#endif
32466
32467 /*
32468 * 2. Now on the top of stack is the return
32469@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
32470 * parameter 2, ..., param n. To make things easy, we save the return
32471 * address of efi_call_phys in a global variable.
32472 */
32473- popl %edx
32474- movl %edx, saved_return_addr
32475- /* get the function pointer into ECX*/
32476- popl %ecx
32477- movl %ecx, efi_rt_function_ptr
32478- movl $2f, %edx
32479- subl $__PAGE_OFFSET, %edx
32480- pushl %edx
32481+ popl (saved_return_addr)
32482+ popl (efi_rt_function_ptr)
32483
32484 /*
32485 * 3. Clear PG bit in %CR0.
32486@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
32487 /*
32488 * 5. Call the physical function.
32489 */
32490- jmp *%ecx
32491+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
32492
32493-2:
32494 /*
32495 * 6. After EFI runtime service returns, control will return to
32496 * following instruction. We'd better readjust stack pointer first.
32497@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
32498 movl %cr0, %edx
32499 orl $0x80000000, %edx
32500 movl %edx, %cr0
32501- jmp 1f
32502-1:
32503+
32504 /*
32505 * 8. Now restore the virtual mode from flat mode by
32506 * adding EIP with PAGE_OFFSET.
32507 */
32508- movl $1f, %edx
32509- jmp *%edx
32510+#ifdef CONFIG_PAX_KERNEXEC
32511+ movl $(__KERNEL_DS), %edx
32512+ mov %edx, %ds
32513+ mov %edx, %es
32514+ mov %edx, %ss
32515+ ljmp $(__KERNEL_CS),$1f
32516+#else
32517+ jmp 1f+__PAGE_OFFSET
32518+#endif
32519 1:
32520
32521 /*
32522 * 9. Balance the stack. And because EAX contain the return value,
32523 * we'd better not clobber it.
32524 */
32525- leal efi_rt_function_ptr, %edx
32526- movl (%edx), %ecx
32527- pushl %ecx
32528+ pushl (efi_rt_function_ptr)
32529
32530 /*
32531- * 10. Push the saved return address onto the stack and return.
32532+ * 10. Return to the saved return address.
32533 */
32534- leal saved_return_addr, %edx
32535- movl (%edx), %ecx
32536- pushl %ecx
32537- ret
32538+ jmpl *(saved_return_addr)
32539 ENDPROC(efi_call_phys)
32540 .previous
32541
32542-.data
32543+__INITDATA
32544 saved_return_addr:
32545 .long 0
32546 efi_rt_function_ptr:
32547diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
32548index 4c07cca..2c8427d 100644
32549--- a/arch/x86/platform/efi/efi_stub_64.S
32550+++ b/arch/x86/platform/efi/efi_stub_64.S
32551@@ -7,6 +7,7 @@
32552 */
32553
32554 #include <linux/linkage.h>
32555+#include <asm/alternative-asm.h>
32556
32557 #define SAVE_XMM \
32558 mov %rsp, %rax; \
32559@@ -40,6 +41,7 @@ ENTRY(efi_call0)
32560 call *%rdi
32561 addq $32, %rsp
32562 RESTORE_XMM
32563+ pax_force_retaddr 0, 1
32564 ret
32565 ENDPROC(efi_call0)
32566
32567@@ -50,6 +52,7 @@ ENTRY(efi_call1)
32568 call *%rdi
32569 addq $32, %rsp
32570 RESTORE_XMM
32571+ pax_force_retaddr 0, 1
32572 ret
32573 ENDPROC(efi_call1)
32574
32575@@ -60,6 +63,7 @@ ENTRY(efi_call2)
32576 call *%rdi
32577 addq $32, %rsp
32578 RESTORE_XMM
32579+ pax_force_retaddr 0, 1
32580 ret
32581 ENDPROC(efi_call2)
32582
32583@@ -71,6 +75,7 @@ ENTRY(efi_call3)
32584 call *%rdi
32585 addq $32, %rsp
32586 RESTORE_XMM
32587+ pax_force_retaddr 0, 1
32588 ret
32589 ENDPROC(efi_call3)
32590
32591@@ -83,6 +88,7 @@ ENTRY(efi_call4)
32592 call *%rdi
32593 addq $32, %rsp
32594 RESTORE_XMM
32595+ pax_force_retaddr 0, 1
32596 ret
32597 ENDPROC(efi_call4)
32598
32599@@ -96,6 +102,7 @@ ENTRY(efi_call5)
32600 call *%rdi
32601 addq $48, %rsp
32602 RESTORE_XMM
32603+ pax_force_retaddr 0, 1
32604 ret
32605 ENDPROC(efi_call5)
32606
32607@@ -112,5 +119,6 @@ ENTRY(efi_call6)
32608 call *%rdi
32609 addq $48, %rsp
32610 RESTORE_XMM
32611+ pax_force_retaddr 0, 1
32612 ret
32613 ENDPROC(efi_call6)
32614diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
32615index a0a0a43..a48e233 100644
32616--- a/arch/x86/platform/mrst/mrst.c
32617+++ b/arch/x86/platform/mrst/mrst.c
32618@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
32619 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
32620 int sfi_mrtc_num;
32621
32622-static void mrst_power_off(void)
32623+static __noreturn void mrst_power_off(void)
32624 {
32625+ BUG();
32626 }
32627
32628-static void mrst_reboot(void)
32629+static __noreturn void mrst_reboot(void)
32630 {
32631 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
32632+ BUG();
32633 }
32634
32635 /* parse all the mtimer info to a static mtimer array */
32636diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
32637index d6ee929..3637cb5 100644
32638--- a/arch/x86/platform/olpc/olpc_dt.c
32639+++ b/arch/x86/platform/olpc/olpc_dt.c
32640@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
32641 return res;
32642 }
32643
32644-static struct of_pdt_ops prom_olpc_ops __initdata = {
32645+static struct of_pdt_ops prom_olpc_ops __initconst = {
32646 .nextprop = olpc_dt_nextprop,
32647 .getproplen = olpc_dt_getproplen,
32648 .getproperty = olpc_dt_getproperty,
32649diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
32650index 1cf5b30..fd45732 100644
32651--- a/arch/x86/power/cpu.c
32652+++ b/arch/x86/power/cpu.c
32653@@ -137,11 +137,8 @@ static void do_fpu_end(void)
32654 static void fix_processor_context(void)
32655 {
32656 int cpu = smp_processor_id();
32657- struct tss_struct *t = &per_cpu(init_tss, cpu);
32658-#ifdef CONFIG_X86_64
32659- struct desc_struct *desc = get_cpu_gdt_table(cpu);
32660- tss_desc tss;
32661-#endif
32662+ struct tss_struct *t = init_tss + cpu;
32663+
32664 set_tss_desc(cpu, t); /*
32665 * This just modifies memory; should not be
32666 * necessary. But... This is necessary, because
32667@@ -150,10 +147,6 @@ static void fix_processor_context(void)
32668 */
32669
32670 #ifdef CONFIG_X86_64
32671- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
32672- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
32673- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
32674-
32675 syscall_init(); /* This sets MSR_*STAR and related */
32676 #endif
32677 load_TR_desc(); /* This does ltr */
32678diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
32679index a44f457..9140171 100644
32680--- a/arch/x86/realmode/init.c
32681+++ b/arch/x86/realmode/init.c
32682@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
32683 __va(real_mode_header->trampoline_header);
32684
32685 #ifdef CONFIG_X86_32
32686- trampoline_header->start = __pa_symbol(startup_32_smp);
32687+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
32688+
32689+#ifdef CONFIG_PAX_KERNEXEC
32690+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
32691+#endif
32692+
32693+ trampoline_header->boot_cs = __BOOT_CS;
32694 trampoline_header->gdt_limit = __BOOT_DS + 7;
32695 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
32696 #else
32697@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
32698 *trampoline_cr4_features = read_cr4();
32699
32700 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
32701- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
32702+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
32703 trampoline_pgd[511] = init_level4_pgt[511].pgd;
32704 #endif
32705 }
32706diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
32707index 8869287..d577672 100644
32708--- a/arch/x86/realmode/rm/Makefile
32709+++ b/arch/x86/realmode/rm/Makefile
32710@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
32711 $(call cc-option, -fno-unit-at-a-time)) \
32712 $(call cc-option, -fno-stack-protector) \
32713 $(call cc-option, -mpreferred-stack-boundary=2)
32714+ifdef CONSTIFY_PLUGIN
32715+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
32716+endif
32717 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
32718 GCOV_PROFILE := n
32719diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
32720index a28221d..93c40f1 100644
32721--- a/arch/x86/realmode/rm/header.S
32722+++ b/arch/x86/realmode/rm/header.S
32723@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
32724 #endif
32725 /* APM/BIOS reboot */
32726 .long pa_machine_real_restart_asm
32727-#ifdef CONFIG_X86_64
32728+#ifdef CONFIG_X86_32
32729+ .long __KERNEL_CS
32730+#else
32731 .long __KERNEL32_CS
32732 #endif
32733 END(real_mode_header)
32734diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
32735index c1b2791..f9e31c7 100644
32736--- a/arch/x86/realmode/rm/trampoline_32.S
32737+++ b/arch/x86/realmode/rm/trampoline_32.S
32738@@ -25,6 +25,12 @@
32739 #include <asm/page_types.h>
32740 #include "realmode.h"
32741
32742+#ifdef CONFIG_PAX_KERNEXEC
32743+#define ta(X) (X)
32744+#else
32745+#define ta(X) (pa_ ## X)
32746+#endif
32747+
32748 .text
32749 .code16
32750
32751@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
32752
32753 cli # We should be safe anyway
32754
32755- movl tr_start, %eax # where we need to go
32756-
32757 movl $0xA5A5A5A5, trampoline_status
32758 # write marker for master knows we're running
32759
32760@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
32761 movw $1, %dx # protected mode (PE) bit
32762 lmsw %dx # into protected mode
32763
32764- ljmpl $__BOOT_CS, $pa_startup_32
32765+ ljmpl *(trampoline_header)
32766
32767 .section ".text32","ax"
32768 .code32
32769@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
32770 .balign 8
32771 GLOBAL(trampoline_header)
32772 tr_start: .space 4
32773- tr_gdt_pad: .space 2
32774+ tr_boot_cs: .space 2
32775 tr_gdt: .space 6
32776 END(trampoline_header)
32777
32778diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
32779index bb360dc..d0fd8f8 100644
32780--- a/arch/x86/realmode/rm/trampoline_64.S
32781+++ b/arch/x86/realmode/rm/trampoline_64.S
32782@@ -94,6 +94,7 @@ ENTRY(startup_32)
32783 movl %edx, %gs
32784
32785 movl pa_tr_cr4, %eax
32786+ andl $~X86_CR4_PCIDE, %eax
32787 movl %eax, %cr4 # Enable PAE mode
32788
32789 # Setup trampoline 4 level pagetables
32790@@ -107,7 +108,7 @@ ENTRY(startup_32)
32791 wrmsr
32792
32793 # Enable paging and in turn activate Long Mode
32794- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
32795+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
32796 movl %eax, %cr0
32797
32798 /*
32799diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
32800index e812034..c747134 100644
32801--- a/arch/x86/tools/Makefile
32802+++ b/arch/x86/tools/Makefile
32803@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
32804
32805 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
32806
32807-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
32808+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
32809 hostprogs-y += relocs
32810 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
32811 relocs: $(obj)/relocs
32812diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
32813index f7bab68..b6d9886 100644
32814--- a/arch/x86/tools/relocs.c
32815+++ b/arch/x86/tools/relocs.c
32816@@ -1,5 +1,7 @@
32817 /* This is included from relocs_32/64.c */
32818
32819+#include "../../../include/generated/autoconf.h"
32820+
32821 #define ElfW(type) _ElfW(ELF_BITS, type)
32822 #define _ElfW(bits, type) __ElfW(bits, type)
32823 #define __ElfW(bits, type) Elf##bits##_##type
32824@@ -11,6 +13,7 @@
32825 #define Elf_Sym ElfW(Sym)
32826
32827 static Elf_Ehdr ehdr;
32828+static Elf_Phdr *phdr;
32829
32830 struct relocs {
32831 uint32_t *offset;
32832@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
32833 }
32834 }
32835
32836+static void read_phdrs(FILE *fp)
32837+{
32838+ unsigned int i;
32839+
32840+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
32841+ if (!phdr) {
32842+ die("Unable to allocate %d program headers\n",
32843+ ehdr.e_phnum);
32844+ }
32845+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
32846+ die("Seek to %d failed: %s\n",
32847+ ehdr.e_phoff, strerror(errno));
32848+ }
32849+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
32850+ die("Cannot read ELF program headers: %s\n",
32851+ strerror(errno));
32852+ }
32853+ for(i = 0; i < ehdr.e_phnum; i++) {
32854+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
32855+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
32856+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
32857+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
32858+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
32859+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
32860+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
32861+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
32862+ }
32863+
32864+}
32865+
32866 static void read_shdrs(FILE *fp)
32867 {
32868- int i;
32869+ unsigned int i;
32870 Elf_Shdr shdr;
32871
32872 secs = calloc(ehdr.e_shnum, sizeof(struct section));
32873@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
32874
32875 static void read_strtabs(FILE *fp)
32876 {
32877- int i;
32878+ unsigned int i;
32879 for (i = 0; i < ehdr.e_shnum; i++) {
32880 struct section *sec = &secs[i];
32881 if (sec->shdr.sh_type != SHT_STRTAB) {
32882@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
32883
32884 static void read_symtabs(FILE *fp)
32885 {
32886- int i,j;
32887+ unsigned int i,j;
32888 for (i = 0; i < ehdr.e_shnum; i++) {
32889 struct section *sec = &secs[i];
32890 if (sec->shdr.sh_type != SHT_SYMTAB) {
32891@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
32892 }
32893
32894
32895-static void read_relocs(FILE *fp)
32896+static void read_relocs(FILE *fp, int use_real_mode)
32897 {
32898- int i,j;
32899+ unsigned int i,j;
32900+ uint32_t base;
32901+
32902 for (i = 0; i < ehdr.e_shnum; i++) {
32903 struct section *sec = &secs[i];
32904 if (sec->shdr.sh_type != SHT_REL_TYPE) {
32905@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
32906 die("Cannot read symbol table: %s\n",
32907 strerror(errno));
32908 }
32909+ base = 0;
32910+
32911+#ifdef CONFIG_X86_32
32912+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
32913+ if (phdr[j].p_type != PT_LOAD )
32914+ continue;
32915+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
32916+ continue;
32917+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
32918+ break;
32919+ }
32920+#endif
32921+
32922 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
32923 Elf_Rel *rel = &sec->reltab[j];
32924- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
32925+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
32926 rel->r_info = elf_xword_to_cpu(rel->r_info);
32927 #if (SHT_REL_TYPE == SHT_RELA)
32928 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
32929@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
32930
32931 static void print_absolute_symbols(void)
32932 {
32933- int i;
32934+ unsigned int i;
32935 const char *format;
32936
32937 if (ELF_BITS == 64)
32938@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
32939 for (i = 0; i < ehdr.e_shnum; i++) {
32940 struct section *sec = &secs[i];
32941 char *sym_strtab;
32942- int j;
32943+ unsigned int j;
32944
32945 if (sec->shdr.sh_type != SHT_SYMTAB) {
32946 continue;
32947@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
32948
32949 static void print_absolute_relocs(void)
32950 {
32951- int i, printed = 0;
32952+ unsigned int i, printed = 0;
32953 const char *format;
32954
32955 if (ELF_BITS == 64)
32956@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
32957 struct section *sec_applies, *sec_symtab;
32958 char *sym_strtab;
32959 Elf_Sym *sh_symtab;
32960- int j;
32961+ unsigned int j;
32962 if (sec->shdr.sh_type != SHT_REL_TYPE) {
32963 continue;
32964 }
32965@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
32966 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
32967 Elf_Sym *sym, const char *symname))
32968 {
32969- int i;
32970+ unsigned int i;
32971 /* Walk through the relocations */
32972 for (i = 0; i < ehdr.e_shnum; i++) {
32973 char *sym_strtab;
32974 Elf_Sym *sh_symtab;
32975 struct section *sec_applies, *sec_symtab;
32976- int j;
32977+ unsigned int j;
32978 struct section *sec = &secs[i];
32979
32980 if (sec->shdr.sh_type != SHT_REL_TYPE) {
32981@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
32982 {
32983 unsigned r_type = ELF32_R_TYPE(rel->r_info);
32984 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
32985+ char *sym_strtab = sec->link->link->strtab;
32986+
32987+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
32988+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
32989+ return 0;
32990+
32991+#ifdef CONFIG_PAX_KERNEXEC
32992+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
32993+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
32994+ return 0;
32995+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
32996+ return 0;
32997+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
32998+ return 0;
32999+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
33000+ return 0;
33001+#endif
33002
33003 switch (r_type) {
33004 case R_386_NONE:
33005@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
33006
33007 static void emit_relocs(int as_text, int use_real_mode)
33008 {
33009- int i;
33010+ unsigned int i;
33011 int (*write_reloc)(uint32_t, FILE *) = write32;
33012 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33013 const char *symname);
33014@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
33015 {
33016 regex_init(use_real_mode);
33017 read_ehdr(fp);
33018+ read_phdrs(fp);
33019 read_shdrs(fp);
33020 read_strtabs(fp);
33021 read_symtabs(fp);
33022- read_relocs(fp);
33023+ read_relocs(fp, use_real_mode);
33024 if (ELF_BITS == 64)
33025 percpu_init();
33026 if (show_absolute_syms) {
33027diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
33028index 80ffa5b..a33bd15 100644
33029--- a/arch/x86/um/tls_32.c
33030+++ b/arch/x86/um/tls_32.c
33031@@ -260,7 +260,7 @@ out:
33032 if (unlikely(task == current &&
33033 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
33034 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
33035- "without flushed TLS.", current->pid);
33036+ "without flushed TLS.", task_pid_nr(current));
33037 }
33038
33039 return 0;
33040diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
33041index fd14be1..e3c79c0 100644
33042--- a/arch/x86/vdso/Makefile
33043+++ b/arch/x86/vdso/Makefile
33044@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
33045 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
33046 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
33047
33048-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33049+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33050 GCOV_PROFILE := n
33051
33052 #
33053diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
33054index 0faad64..39ef157 100644
33055--- a/arch/x86/vdso/vdso32-setup.c
33056+++ b/arch/x86/vdso/vdso32-setup.c
33057@@ -25,6 +25,7 @@
33058 #include <asm/tlbflush.h>
33059 #include <asm/vdso.h>
33060 #include <asm/proto.h>
33061+#include <asm/mman.h>
33062
33063 enum {
33064 VDSO_DISABLED = 0,
33065@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
33066 void enable_sep_cpu(void)
33067 {
33068 int cpu = get_cpu();
33069- struct tss_struct *tss = &per_cpu(init_tss, cpu);
33070+ struct tss_struct *tss = init_tss + cpu;
33071
33072 if (!boot_cpu_has(X86_FEATURE_SEP)) {
33073 put_cpu();
33074@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
33075 gate_vma.vm_start = FIXADDR_USER_START;
33076 gate_vma.vm_end = FIXADDR_USER_END;
33077 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
33078- gate_vma.vm_page_prot = __P101;
33079+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
33080
33081 return 0;
33082 }
33083@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33084 if (compat)
33085 addr = VDSO_HIGH_BASE;
33086 else {
33087- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
33088+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
33089 if (IS_ERR_VALUE(addr)) {
33090 ret = addr;
33091 goto up_fail;
33092 }
33093 }
33094
33095- current->mm->context.vdso = (void *)addr;
33096+ current->mm->context.vdso = addr;
33097
33098 if (compat_uses_vma || !compat) {
33099 /*
33100@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33101 }
33102
33103 current_thread_info()->sysenter_return =
33104- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33105+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33106
33107 up_fail:
33108 if (ret)
33109- current->mm->context.vdso = NULL;
33110+ current->mm->context.vdso = 0;
33111
33112 up_write(&mm->mmap_sem);
33113
33114@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
33115
33116 const char *arch_vma_name(struct vm_area_struct *vma)
33117 {
33118- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
33119+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
33120 return "[vdso]";
33121+
33122+#ifdef CONFIG_PAX_SEGMEXEC
33123+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
33124+ return "[vdso]";
33125+#endif
33126+
33127 return NULL;
33128 }
33129
33130@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
33131 * Check to see if the corresponding task was created in compat vdso
33132 * mode.
33133 */
33134- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
33135+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
33136 return &gate_vma;
33137 return NULL;
33138 }
33139diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
33140index 431e875..cbb23f3 100644
33141--- a/arch/x86/vdso/vma.c
33142+++ b/arch/x86/vdso/vma.c
33143@@ -16,8 +16,6 @@
33144 #include <asm/vdso.h>
33145 #include <asm/page.h>
33146
33147-unsigned int __read_mostly vdso_enabled = 1;
33148-
33149 extern char vdso_start[], vdso_end[];
33150 extern unsigned short vdso_sync_cpuid;
33151
33152@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
33153 * unaligned here as a result of stack start randomization.
33154 */
33155 addr = PAGE_ALIGN(addr);
33156- addr = align_vdso_addr(addr);
33157
33158 return addr;
33159 }
33160@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
33161 unsigned size)
33162 {
33163 struct mm_struct *mm = current->mm;
33164- unsigned long addr;
33165+ unsigned long addr = 0;
33166 int ret;
33167
33168- if (!vdso_enabled)
33169- return 0;
33170-
33171 down_write(&mm->mmap_sem);
33172+
33173+#ifdef CONFIG_PAX_RANDMMAP
33174+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33175+#endif
33176+
33177 addr = vdso_addr(mm->start_stack, size);
33178+ addr = align_vdso_addr(addr);
33179 addr = get_unmapped_area(NULL, addr, size, 0, 0);
33180 if (IS_ERR_VALUE(addr)) {
33181 ret = addr;
33182 goto up_fail;
33183 }
33184
33185- current->mm->context.vdso = (void *)addr;
33186+ mm->context.vdso = addr;
33187
33188 ret = install_special_mapping(mm, addr, size,
33189 VM_READ|VM_EXEC|
33190 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
33191 pages);
33192- if (ret) {
33193- current->mm->context.vdso = NULL;
33194- goto up_fail;
33195- }
33196+ if (ret)
33197+ mm->context.vdso = 0;
33198
33199 up_fail:
33200 up_write(&mm->mmap_sem);
33201@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33202 vdsox32_size);
33203 }
33204 #endif
33205-
33206-static __init int vdso_setup(char *s)
33207-{
33208- vdso_enabled = simple_strtoul(s, NULL, 0);
33209- return 0;
33210-}
33211-__setup("vdso=", vdso_setup);
33212diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
33213index a492be2..08678da 100644
33214--- a/arch/x86/xen/enlighten.c
33215+++ b/arch/x86/xen/enlighten.c
33216@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
33217
33218 struct shared_info xen_dummy_shared_info;
33219
33220-void *xen_initial_gdt;
33221-
33222 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
33223 __read_mostly int xen_have_vector_callback;
33224 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
33225@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
33226 {
33227 unsigned long va = dtr->address;
33228 unsigned int size = dtr->size + 1;
33229- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
33230- unsigned long frames[pages];
33231+ unsigned long frames[65536 / PAGE_SIZE];
33232 int f;
33233
33234 /*
33235@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
33236 {
33237 unsigned long va = dtr->address;
33238 unsigned int size = dtr->size + 1;
33239- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
33240- unsigned long frames[pages];
33241+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
33242 int f;
33243
33244 /*
33245@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
33246 * 8-byte entries, or 16 4k pages..
33247 */
33248
33249- BUG_ON(size > 65536);
33250+ BUG_ON(size > GDT_SIZE);
33251 BUG_ON(va & ~PAGE_MASK);
33252
33253 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
33254@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
33255 return 0;
33256 }
33257
33258-static void set_xen_basic_apic_ops(void)
33259+static void __init set_xen_basic_apic_ops(void)
33260 {
33261 apic->read = xen_apic_read;
33262 apic->write = xen_apic_write;
33263@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
33264 #endif
33265 };
33266
33267-static void xen_reboot(int reason)
33268+static __noreturn void xen_reboot(int reason)
33269 {
33270 struct sched_shutdown r = { .reason = reason };
33271
33272- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
33273- BUG();
33274+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
33275+ BUG();
33276 }
33277
33278-static void xen_restart(char *msg)
33279+static __noreturn void xen_restart(char *msg)
33280 {
33281 xen_reboot(SHUTDOWN_reboot);
33282 }
33283
33284-static void xen_emergency_restart(void)
33285+static __noreturn void xen_emergency_restart(void)
33286 {
33287 xen_reboot(SHUTDOWN_reboot);
33288 }
33289
33290-static void xen_machine_halt(void)
33291+static __noreturn void xen_machine_halt(void)
33292 {
33293 xen_reboot(SHUTDOWN_poweroff);
33294 }
33295
33296-static void xen_machine_power_off(void)
33297+static __noreturn void xen_machine_power_off(void)
33298 {
33299 if (pm_power_off)
33300 pm_power_off();
33301@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
33302 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
33303
33304 /* Work out if we support NX */
33305- x86_configure_nx();
33306+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33307+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
33308+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
33309+ unsigned l, h;
33310+
33311+ __supported_pte_mask |= _PAGE_NX;
33312+ rdmsr(MSR_EFER, l, h);
33313+ l |= EFER_NX;
33314+ wrmsr(MSR_EFER, l, h);
33315+ }
33316+#endif
33317
33318 xen_setup_features();
33319
33320@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
33321
33322 machine_ops = xen_machine_ops;
33323
33324- /*
33325- * The only reliable way to retain the initial address of the
33326- * percpu gdt_page is to remember it here, so we can go and
33327- * mark it RW later, when the initial percpu area is freed.
33328- */
33329- xen_initial_gdt = &per_cpu(gdt_page, 0);
33330-
33331 xen_smp_init();
33332
33333 #ifdef CONFIG_ACPI_NUMA
33334@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
33335 return NOTIFY_OK;
33336 }
33337
33338-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
33339+static struct notifier_block xen_hvm_cpu_notifier = {
33340 .notifier_call = xen_hvm_cpu_notify,
33341 };
33342
33343diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
33344index fdc3ba2..3daee39 100644
33345--- a/arch/x86/xen/mmu.c
33346+++ b/arch/x86/xen/mmu.c
33347@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
33348 /* L3_k[510] -> level2_kernel_pgt
33349 * L3_i[511] -> level2_fixmap_pgt */
33350 convert_pfn_mfn(level3_kernel_pgt);
33351+ convert_pfn_mfn(level3_vmalloc_start_pgt);
33352+ convert_pfn_mfn(level3_vmalloc_end_pgt);
33353+ convert_pfn_mfn(level3_vmemmap_pgt);
33354
33355 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
33356 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
33357@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
33358 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
33359 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
33360 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
33361+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
33362+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
33363+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
33364 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
33365 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
33366+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
33367 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
33368 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
33369
33370@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
33371 pv_mmu_ops.set_pud = xen_set_pud;
33372 #if PAGETABLE_LEVELS == 4
33373 pv_mmu_ops.set_pgd = xen_set_pgd;
33374+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
33375 #endif
33376
33377 /* This will work as long as patching hasn't happened yet
33378@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
33379 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
33380 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
33381 .set_pgd = xen_set_pgd_hyper,
33382+ .set_pgd_batched = xen_set_pgd_hyper,
33383
33384 .alloc_pud = xen_alloc_pmd_init,
33385 .release_pud = xen_release_pmd_init,
33386diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
33387index d99cae8..18401e1 100644
33388--- a/arch/x86/xen/smp.c
33389+++ b/arch/x86/xen/smp.c
33390@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
33391 {
33392 BUG_ON(smp_processor_id() != 0);
33393 native_smp_prepare_boot_cpu();
33394-
33395- /* We've switched to the "real" per-cpu gdt, so make sure the
33396- old memory can be recycled */
33397- make_lowmem_page_readwrite(xen_initial_gdt);
33398-
33399 xen_filter_cpu_maps();
33400 xen_setup_vcpu_info_placement();
33401 }
33402@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
33403 ctxt->user_regs.ss = __KERNEL_DS;
33404 #ifdef CONFIG_X86_32
33405 ctxt->user_regs.fs = __KERNEL_PERCPU;
33406- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
33407+ savesegment(gs, ctxt->user_regs.gs);
33408 #else
33409 ctxt->gs_base_kernel = per_cpu_offset(cpu);
33410 #endif
33411@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
33412
33413 {
33414 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
33415- ctxt->user_regs.ds = __USER_DS;
33416- ctxt->user_regs.es = __USER_DS;
33417+ ctxt->user_regs.ds = __KERNEL_DS;
33418+ ctxt->user_regs.es = __KERNEL_DS;
33419
33420 xen_copy_trap_info(ctxt->trap_ctxt);
33421
33422@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
33423 int rc;
33424
33425 per_cpu(current_task, cpu) = idle;
33426+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
33427 #ifdef CONFIG_X86_32
33428 irq_ctx_init(cpu);
33429 #else
33430 clear_tsk_thread_flag(idle, TIF_FORK);
33431- per_cpu(kernel_stack, cpu) =
33432- (unsigned long)task_stack_page(idle) -
33433- KERNEL_STACK_OFFSET + THREAD_SIZE;
33434+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
33435 #endif
33436 xen_setup_runstate_info(cpu);
33437 xen_setup_timer(cpu);
33438@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
33439
33440 void __init xen_smp_init(void)
33441 {
33442- smp_ops = xen_smp_ops;
33443+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
33444 xen_fill_possible_map();
33445 xen_init_spinlocks();
33446 }
33447diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
33448index 33ca6e4..0ded929 100644
33449--- a/arch/x86/xen/xen-asm_32.S
33450+++ b/arch/x86/xen/xen-asm_32.S
33451@@ -84,14 +84,14 @@ ENTRY(xen_iret)
33452 ESP_OFFSET=4 # bytes pushed onto stack
33453
33454 /*
33455- * Store vcpu_info pointer for easy access. Do it this way to
33456- * avoid having to reload %fs
33457+ * Store vcpu_info pointer for easy access.
33458 */
33459 #ifdef CONFIG_SMP
33460- GET_THREAD_INFO(%eax)
33461- movl %ss:TI_cpu(%eax), %eax
33462- movl %ss:__per_cpu_offset(,%eax,4), %eax
33463- mov %ss:xen_vcpu(%eax), %eax
33464+ push %fs
33465+ mov $(__KERNEL_PERCPU), %eax
33466+ mov %eax, %fs
33467+ mov PER_CPU_VAR(xen_vcpu), %eax
33468+ pop %fs
33469 #else
33470 movl %ss:xen_vcpu, %eax
33471 #endif
33472diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
33473index 7faed58..ba4427c 100644
33474--- a/arch/x86/xen/xen-head.S
33475+++ b/arch/x86/xen/xen-head.S
33476@@ -19,6 +19,17 @@ ENTRY(startup_xen)
33477 #ifdef CONFIG_X86_32
33478 mov %esi,xen_start_info
33479 mov $init_thread_union+THREAD_SIZE,%esp
33480+#ifdef CONFIG_SMP
33481+ movl $cpu_gdt_table,%edi
33482+ movl $__per_cpu_load,%eax
33483+ movw %ax,__KERNEL_PERCPU + 2(%edi)
33484+ rorl $16,%eax
33485+ movb %al,__KERNEL_PERCPU + 4(%edi)
33486+ movb %ah,__KERNEL_PERCPU + 7(%edi)
33487+ movl $__per_cpu_end - 1,%eax
33488+ subl $__per_cpu_start,%eax
33489+ movw %ax,__KERNEL_PERCPU + 0(%edi)
33490+#endif
33491 #else
33492 mov %rsi,xen_start_info
33493 mov $init_thread_union+THREAD_SIZE,%rsp
33494diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
33495index a95b417..b6dbd0b 100644
33496--- a/arch/x86/xen/xen-ops.h
33497+++ b/arch/x86/xen/xen-ops.h
33498@@ -10,8 +10,6 @@
33499 extern const char xen_hypervisor_callback[];
33500 extern const char xen_failsafe_callback[];
33501
33502-extern void *xen_initial_gdt;
33503-
33504 struct trap_info;
33505 void xen_copy_trap_info(struct trap_info *traps);
33506
33507diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
33508index 525bd3d..ef888b1 100644
33509--- a/arch/xtensa/variants/dc232b/include/variant/core.h
33510+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
33511@@ -119,9 +119,9 @@
33512 ----------------------------------------------------------------------*/
33513
33514 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
33515-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
33516 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
33517 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
33518+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
33519
33520 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
33521 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
33522diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
33523index 2f33760..835e50a 100644
33524--- a/arch/xtensa/variants/fsf/include/variant/core.h
33525+++ b/arch/xtensa/variants/fsf/include/variant/core.h
33526@@ -11,6 +11,7 @@
33527 #ifndef _XTENSA_CORE_H
33528 #define _XTENSA_CORE_H
33529
33530+#include <linux/const.h>
33531
33532 /****************************************************************************
33533 Parameters Useful for Any Code, USER or PRIVILEGED
33534@@ -112,9 +113,9 @@
33535 ----------------------------------------------------------------------*/
33536
33537 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
33538-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
33539 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
33540 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
33541+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
33542
33543 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
33544 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
33545diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
33546index af00795..2bb8105 100644
33547--- a/arch/xtensa/variants/s6000/include/variant/core.h
33548+++ b/arch/xtensa/variants/s6000/include/variant/core.h
33549@@ -11,6 +11,7 @@
33550 #ifndef _XTENSA_CORE_CONFIGURATION_H
33551 #define _XTENSA_CORE_CONFIGURATION_H
33552
33553+#include <linux/const.h>
33554
33555 /****************************************************************************
33556 Parameters Useful for Any Code, USER or PRIVILEGED
33557@@ -118,9 +119,9 @@
33558 ----------------------------------------------------------------------*/
33559
33560 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
33561-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
33562 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
33563 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
33564+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
33565
33566 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
33567 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
33568diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
33569index 58916af..eb9dbcf6 100644
33570--- a/block/blk-iopoll.c
33571+++ b/block/blk-iopoll.c
33572@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
33573 }
33574 EXPORT_SYMBOL(blk_iopoll_complete);
33575
33576-static void blk_iopoll_softirq(struct softirq_action *h)
33577+static void blk_iopoll_softirq(void)
33578 {
33579 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
33580 int rearm = 0, budget = blk_iopoll_budget;
33581@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
33582 return NOTIFY_OK;
33583 }
33584
33585-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
33586+static struct notifier_block blk_iopoll_cpu_notifier = {
33587 .notifier_call = blk_iopoll_cpu_notify,
33588 };
33589
33590diff --git a/block/blk-map.c b/block/blk-map.c
33591index 623e1cd..ca1e109 100644
33592--- a/block/blk-map.c
33593+++ b/block/blk-map.c
33594@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
33595 if (!len || !kbuf)
33596 return -EINVAL;
33597
33598- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
33599+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
33600 if (do_copy)
33601 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
33602 else
33603diff --git a/block/blk-softirq.c b/block/blk-softirq.c
33604index 467c8de..f3628c5 100644
33605--- a/block/blk-softirq.c
33606+++ b/block/blk-softirq.c
33607@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
33608 * Softirq action handler - move entries to local list and loop over them
33609 * while passing them to the queue registered handler.
33610 */
33611-static void blk_done_softirq(struct softirq_action *h)
33612+static void blk_done_softirq(void)
33613 {
33614 struct list_head *cpu_list, local_list;
33615
33616@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
33617 return NOTIFY_OK;
33618 }
33619
33620-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
33621+static struct notifier_block blk_cpu_notifier = {
33622 .notifier_call = blk_cpu_notify,
33623 };
33624
33625diff --git a/block/bsg.c b/block/bsg.c
33626index 420a5a9..23834aa 100644
33627--- a/block/bsg.c
33628+++ b/block/bsg.c
33629@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
33630 struct sg_io_v4 *hdr, struct bsg_device *bd,
33631 fmode_t has_write_perm)
33632 {
33633+ unsigned char tmpcmd[sizeof(rq->__cmd)];
33634+ unsigned char *cmdptr;
33635+
33636 if (hdr->request_len > BLK_MAX_CDB) {
33637 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
33638 if (!rq->cmd)
33639 return -ENOMEM;
33640- }
33641+ cmdptr = rq->cmd;
33642+ } else
33643+ cmdptr = tmpcmd;
33644
33645- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
33646+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
33647 hdr->request_len))
33648 return -EFAULT;
33649
33650+ if (cmdptr != rq->cmd)
33651+ memcpy(rq->cmd, cmdptr, hdr->request_len);
33652+
33653 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
33654 if (blk_verify_command(rq->cmd, has_write_perm))
33655 return -EPERM;
33656diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
33657index 7c668c8..db3521c 100644
33658--- a/block/compat_ioctl.c
33659+++ b/block/compat_ioctl.c
33660@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
33661 err |= __get_user(f->spec1, &uf->spec1);
33662 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
33663 err |= __get_user(name, &uf->name);
33664- f->name = compat_ptr(name);
33665+ f->name = (void __force_kernel *)compat_ptr(name);
33666 if (err) {
33667 err = -EFAULT;
33668 goto out;
33669diff --git a/block/genhd.c b/block/genhd.c
33670index cdeb527..10aa34db 100644
33671--- a/block/genhd.c
33672+++ b/block/genhd.c
33673@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
33674
33675 /*
33676 * Register device numbers dev..(dev+range-1)
33677- * range must be nonzero
33678+ * Noop if @range is zero.
33679 * The hash chain is sorted on range, so that subranges can override.
33680 */
33681 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
33682 struct kobject *(*probe)(dev_t, int *, void *),
33683 int (*lock)(dev_t, void *), void *data)
33684 {
33685- kobj_map(bdev_map, devt, range, module, probe, lock, data);
33686+ if (range)
33687+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
33688 }
33689
33690 EXPORT_SYMBOL(blk_register_region);
33691
33692+/* undo blk_register_region(), noop if @range is zero */
33693 void blk_unregister_region(dev_t devt, unsigned long range)
33694 {
33695- kobj_unmap(bdev_map, devt, range);
33696+ if (range)
33697+ kobj_unmap(bdev_map, devt, range);
33698 }
33699
33700 EXPORT_SYMBOL(blk_unregister_region);
33701diff --git a/block/partitions/efi.c b/block/partitions/efi.c
33702index c85fc89..51e690b 100644
33703--- a/block/partitions/efi.c
33704+++ b/block/partitions/efi.c
33705@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
33706 if (!gpt)
33707 return NULL;
33708
33709+ if (!le32_to_cpu(gpt->num_partition_entries))
33710+ return NULL;
33711+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
33712+ if (!pte)
33713+ return NULL;
33714+
33715 count = le32_to_cpu(gpt->num_partition_entries) *
33716 le32_to_cpu(gpt->sizeof_partition_entry);
33717- if (!count)
33718- return NULL;
33719- pte = kmalloc(count, GFP_KERNEL);
33720- if (!pte)
33721- return NULL;
33722-
33723 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
33724 (u8 *) pte,
33725 count) < count) {
33726diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
33727index a5ffcc9..3cedc9c 100644
33728--- a/block/scsi_ioctl.c
33729+++ b/block/scsi_ioctl.c
33730@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
33731 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
33732 struct sg_io_hdr *hdr, fmode_t mode)
33733 {
33734- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
33735+ unsigned char tmpcmd[sizeof(rq->__cmd)];
33736+ unsigned char *cmdptr;
33737+
33738+ if (rq->cmd != rq->__cmd)
33739+ cmdptr = rq->cmd;
33740+ else
33741+ cmdptr = tmpcmd;
33742+
33743+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
33744 return -EFAULT;
33745+
33746+ if (cmdptr != rq->cmd)
33747+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
33748+
33749 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
33750 return -EPERM;
33751
33752@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
33753 int err;
33754 unsigned int in_len, out_len, bytes, opcode, cmdlen;
33755 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
33756+ unsigned char tmpcmd[sizeof(rq->__cmd)];
33757+ unsigned char *cmdptr;
33758
33759 if (!sic)
33760 return -EINVAL;
33761@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
33762 */
33763 err = -EFAULT;
33764 rq->cmd_len = cmdlen;
33765- if (copy_from_user(rq->cmd, sic->data, cmdlen))
33766+
33767+ if (rq->cmd != rq->__cmd)
33768+ cmdptr = rq->cmd;
33769+ else
33770+ cmdptr = tmpcmd;
33771+
33772+ if (copy_from_user(cmdptr, sic->data, cmdlen))
33773 goto error;
33774
33775+ if (rq->cmd != cmdptr)
33776+ memcpy(rq->cmd, cmdptr, cmdlen);
33777+
33778 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
33779 goto error;
33780
33781diff --git a/crypto/cryptd.c b/crypto/cryptd.c
33782index 7bdd61b..afec999 100644
33783--- a/crypto/cryptd.c
33784+++ b/crypto/cryptd.c
33785@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
33786
33787 struct cryptd_blkcipher_request_ctx {
33788 crypto_completion_t complete;
33789-};
33790+} __no_const;
33791
33792 struct cryptd_hash_ctx {
33793 struct crypto_shash *child;
33794@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
33795
33796 struct cryptd_aead_request_ctx {
33797 crypto_completion_t complete;
33798-};
33799+} __no_const;
33800
33801 static void cryptd_queue_worker(struct work_struct *work);
33802
33803diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
33804index b2c99dc..476c9fb 100644
33805--- a/crypto/pcrypt.c
33806+++ b/crypto/pcrypt.c
33807@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
33808 int ret;
33809
33810 pinst->kobj.kset = pcrypt_kset;
33811- ret = kobject_add(&pinst->kobj, NULL, name);
33812+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
33813 if (!ret)
33814 kobject_uevent(&pinst->kobj, KOBJ_ADD);
33815
33816@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
33817
33818 get_online_cpus();
33819
33820- pcrypt->wq = alloc_workqueue(name,
33821- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
33822+ pcrypt->wq = alloc_workqueue("%s",
33823+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
33824 if (!pcrypt->wq)
33825 goto err;
33826
33827diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
33828index f220d64..d359ad6 100644
33829--- a/drivers/acpi/apei/apei-internal.h
33830+++ b/drivers/acpi/apei/apei-internal.h
33831@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
33832 struct apei_exec_ins_type {
33833 u32 flags;
33834 apei_exec_ins_func_t run;
33835-};
33836+} __do_const;
33837
33838 struct apei_exec_context {
33839 u32 ip;
33840diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
33841index 33dc6a0..4b24b47 100644
33842--- a/drivers/acpi/apei/cper.c
33843+++ b/drivers/acpi/apei/cper.c
33844@@ -39,12 +39,12 @@
33845 */
33846 u64 cper_next_record_id(void)
33847 {
33848- static atomic64_t seq;
33849+ static atomic64_unchecked_t seq;
33850
33851- if (!atomic64_read(&seq))
33852- atomic64_set(&seq, ((u64)get_seconds()) << 32);
33853+ if (!atomic64_read_unchecked(&seq))
33854+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
33855
33856- return atomic64_inc_return(&seq);
33857+ return atomic64_inc_return_unchecked(&seq);
33858 }
33859 EXPORT_SYMBOL_GPL(cper_next_record_id);
33860
33861diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
33862index be60399..778b33e8 100644
33863--- a/drivers/acpi/bgrt.c
33864+++ b/drivers/acpi/bgrt.c
33865@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
33866 return -ENODEV;
33867
33868 sysfs_bin_attr_init(&image_attr);
33869- image_attr.private = bgrt_image;
33870- image_attr.size = bgrt_image_size;
33871+ pax_open_kernel();
33872+ *(void **)&image_attr.private = bgrt_image;
33873+ *(size_t *)&image_attr.size = bgrt_image_size;
33874+ pax_close_kernel();
33875
33876 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
33877 if (!bgrt_kobj)
33878diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
33879index cb96296..b81293b 100644
33880--- a/drivers/acpi/blacklist.c
33881+++ b/drivers/acpi/blacklist.c
33882@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
33883 u32 is_critical_error;
33884 };
33885
33886-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
33887+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
33888
33889 /*
33890 * POLICY: If *anything* doesn't work, put it on the blacklist.
33891@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
33892 return 0;
33893 }
33894
33895-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
33896+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
33897 {
33898 .callback = dmi_disable_osi_vista,
33899 .ident = "Fujitsu Siemens",
33900diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
33901index 7586544..636a2f0 100644
33902--- a/drivers/acpi/ec_sys.c
33903+++ b/drivers/acpi/ec_sys.c
33904@@ -12,6 +12,7 @@
33905 #include <linux/acpi.h>
33906 #include <linux/debugfs.h>
33907 #include <linux/module.h>
33908+#include <linux/uaccess.h>
33909 #include "internal.h"
33910
33911 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
33912@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
33913 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
33914 */
33915 unsigned int size = EC_SPACE_SIZE;
33916- u8 *data = (u8 *) buf;
33917+ u8 data;
33918 loff_t init_off = *off;
33919 int err = 0;
33920
33921@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
33922 size = count;
33923
33924 while (size) {
33925- err = ec_read(*off, &data[*off - init_off]);
33926+ err = ec_read(*off, &data);
33927 if (err)
33928 return err;
33929+ if (put_user(data, &buf[*off - init_off]))
33930+ return -EFAULT;
33931 *off += 1;
33932 size--;
33933 }
33934@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
33935
33936 unsigned int size = count;
33937 loff_t init_off = *off;
33938- u8 *data = (u8 *) buf;
33939 int err = 0;
33940
33941 if (*off >= EC_SPACE_SIZE)
33942@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
33943 }
33944
33945 while (size) {
33946- u8 byte_write = data[*off - init_off];
33947+ u8 byte_write;
33948+ if (get_user(byte_write, &buf[*off - init_off]))
33949+ return -EFAULT;
33950 err = ec_write(*off, byte_write);
33951 if (err)
33952 return err;
33953diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
33954index eb133c7..f571552 100644
33955--- a/drivers/acpi/processor_idle.c
33956+++ b/drivers/acpi/processor_idle.c
33957@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
33958 {
33959 int i, count = CPUIDLE_DRIVER_STATE_START;
33960 struct acpi_processor_cx *cx;
33961- struct cpuidle_state *state;
33962+ cpuidle_state_no_const *state;
33963 struct cpuidle_driver *drv = &acpi_idle_driver;
33964
33965 if (!pr->flags.power_setup_done)
33966diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
33967index fcae5fa..e9f71ea 100644
33968--- a/drivers/acpi/sysfs.c
33969+++ b/drivers/acpi/sysfs.c
33970@@ -423,11 +423,11 @@ static u32 num_counters;
33971 static struct attribute **all_attrs;
33972 static u32 acpi_gpe_count;
33973
33974-static struct attribute_group interrupt_stats_attr_group = {
33975+static attribute_group_no_const interrupt_stats_attr_group = {
33976 .name = "interrupts",
33977 };
33978
33979-static struct kobj_attribute *counter_attrs;
33980+static kobj_attribute_no_const *counter_attrs;
33981
33982 static void delete_gpe_attr_array(void)
33983 {
33984diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
33985index 7b9bdd8..37638ca 100644
33986--- a/drivers/ata/libahci.c
33987+++ b/drivers/ata/libahci.c
33988@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
33989 }
33990 EXPORT_SYMBOL_GPL(ahci_kick_engine);
33991
33992-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
33993+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
33994 struct ata_taskfile *tf, int is_cmd, u16 flags,
33995 unsigned long timeout_msec)
33996 {
33997diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
33998index adf002a..39bb8f9 100644
33999--- a/drivers/ata/libata-core.c
34000+++ b/drivers/ata/libata-core.c
34001@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
34002 struct ata_port *ap;
34003 unsigned int tag;
34004
34005- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34006+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34007 ap = qc->ap;
34008
34009 qc->flags = 0;
34010@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
34011 struct ata_port *ap;
34012 struct ata_link *link;
34013
34014- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34015+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34016 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
34017 ap = qc->ap;
34018 link = qc->dev->link;
34019@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34020 return;
34021
34022 spin_lock(&lock);
34023+ pax_open_kernel();
34024
34025 for (cur = ops->inherits; cur; cur = cur->inherits) {
34026 void **inherit = (void **)cur;
34027@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34028 if (IS_ERR(*pp))
34029 *pp = NULL;
34030
34031- ops->inherits = NULL;
34032+ *(struct ata_port_operations **)&ops->inherits = NULL;
34033
34034+ pax_close_kernel();
34035 spin_unlock(&lock);
34036 }
34037
34038diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
34039index 7638121..357a965 100644
34040--- a/drivers/ata/pata_arasan_cf.c
34041+++ b/drivers/ata/pata_arasan_cf.c
34042@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
34043 /* Handle platform specific quirks */
34044 if (quirk) {
34045 if (quirk & CF_BROKEN_PIO) {
34046- ap->ops->set_piomode = NULL;
34047+ pax_open_kernel();
34048+ *(void **)&ap->ops->set_piomode = NULL;
34049+ pax_close_kernel();
34050 ap->pio_mask = 0;
34051 }
34052 if (quirk & CF_BROKEN_MWDMA)
34053diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
34054index f9b983a..887b9d8 100644
34055--- a/drivers/atm/adummy.c
34056+++ b/drivers/atm/adummy.c
34057@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
34058 vcc->pop(vcc, skb);
34059 else
34060 dev_kfree_skb_any(skb);
34061- atomic_inc(&vcc->stats->tx);
34062+ atomic_inc_unchecked(&vcc->stats->tx);
34063
34064 return 0;
34065 }
34066diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
34067index 77a7480d..05cde58 100644
34068--- a/drivers/atm/ambassador.c
34069+++ b/drivers/atm/ambassador.c
34070@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
34071 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
34072
34073 // VC layer stats
34074- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34075+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34076
34077 // free the descriptor
34078 kfree (tx_descr);
34079@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34080 dump_skb ("<<<", vc, skb);
34081
34082 // VC layer stats
34083- atomic_inc(&atm_vcc->stats->rx);
34084+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34085 __net_timestamp(skb);
34086 // end of our responsibility
34087 atm_vcc->push (atm_vcc, skb);
34088@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34089 } else {
34090 PRINTK (KERN_INFO, "dropped over-size frame");
34091 // should we count this?
34092- atomic_inc(&atm_vcc->stats->rx_drop);
34093+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34094 }
34095
34096 } else {
34097@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
34098 }
34099
34100 if (check_area (skb->data, skb->len)) {
34101- atomic_inc(&atm_vcc->stats->tx_err);
34102+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
34103 return -ENOMEM; // ?
34104 }
34105
34106diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
34107index 0e3f8f9..765a7a5 100644
34108--- a/drivers/atm/atmtcp.c
34109+++ b/drivers/atm/atmtcp.c
34110@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34111 if (vcc->pop) vcc->pop(vcc,skb);
34112 else dev_kfree_skb(skb);
34113 if (dev_data) return 0;
34114- atomic_inc(&vcc->stats->tx_err);
34115+ atomic_inc_unchecked(&vcc->stats->tx_err);
34116 return -ENOLINK;
34117 }
34118 size = skb->len+sizeof(struct atmtcp_hdr);
34119@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34120 if (!new_skb) {
34121 if (vcc->pop) vcc->pop(vcc,skb);
34122 else dev_kfree_skb(skb);
34123- atomic_inc(&vcc->stats->tx_err);
34124+ atomic_inc_unchecked(&vcc->stats->tx_err);
34125 return -ENOBUFS;
34126 }
34127 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
34128@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34129 if (vcc->pop) vcc->pop(vcc,skb);
34130 else dev_kfree_skb(skb);
34131 out_vcc->push(out_vcc,new_skb);
34132- atomic_inc(&vcc->stats->tx);
34133- atomic_inc(&out_vcc->stats->rx);
34134+ atomic_inc_unchecked(&vcc->stats->tx);
34135+ atomic_inc_unchecked(&out_vcc->stats->rx);
34136 return 0;
34137 }
34138
34139@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34140 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
34141 read_unlock(&vcc_sklist_lock);
34142 if (!out_vcc) {
34143- atomic_inc(&vcc->stats->tx_err);
34144+ atomic_inc_unchecked(&vcc->stats->tx_err);
34145 goto done;
34146 }
34147 skb_pull(skb,sizeof(struct atmtcp_hdr));
34148@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34149 __net_timestamp(new_skb);
34150 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
34151 out_vcc->push(out_vcc,new_skb);
34152- atomic_inc(&vcc->stats->tx);
34153- atomic_inc(&out_vcc->stats->rx);
34154+ atomic_inc_unchecked(&vcc->stats->tx);
34155+ atomic_inc_unchecked(&out_vcc->stats->rx);
34156 done:
34157 if (vcc->pop) vcc->pop(vcc,skb);
34158 else dev_kfree_skb(skb);
34159diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
34160index b1955ba..b179940 100644
34161--- a/drivers/atm/eni.c
34162+++ b/drivers/atm/eni.c
34163@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
34164 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
34165 vcc->dev->number);
34166 length = 0;
34167- atomic_inc(&vcc->stats->rx_err);
34168+ atomic_inc_unchecked(&vcc->stats->rx_err);
34169 }
34170 else {
34171 length = ATM_CELL_SIZE-1; /* no HEC */
34172@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34173 size);
34174 }
34175 eff = length = 0;
34176- atomic_inc(&vcc->stats->rx_err);
34177+ atomic_inc_unchecked(&vcc->stats->rx_err);
34178 }
34179 else {
34180 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
34181@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34182 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
34183 vcc->dev->number,vcc->vci,length,size << 2,descr);
34184 length = eff = 0;
34185- atomic_inc(&vcc->stats->rx_err);
34186+ atomic_inc_unchecked(&vcc->stats->rx_err);
34187 }
34188 }
34189 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
34190@@ -767,7 +767,7 @@ rx_dequeued++;
34191 vcc->push(vcc,skb);
34192 pushed++;
34193 }
34194- atomic_inc(&vcc->stats->rx);
34195+ atomic_inc_unchecked(&vcc->stats->rx);
34196 }
34197 wake_up(&eni_dev->rx_wait);
34198 }
34199@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
34200 PCI_DMA_TODEVICE);
34201 if (vcc->pop) vcc->pop(vcc,skb);
34202 else dev_kfree_skb_irq(skb);
34203- atomic_inc(&vcc->stats->tx);
34204+ atomic_inc_unchecked(&vcc->stats->tx);
34205 wake_up(&eni_dev->tx_wait);
34206 dma_complete++;
34207 }
34208diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
34209index b41c948..a002b17 100644
34210--- a/drivers/atm/firestream.c
34211+++ b/drivers/atm/firestream.c
34212@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
34213 }
34214 }
34215
34216- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34217+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34218
34219 fs_dprintk (FS_DEBUG_TXMEM, "i");
34220 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
34221@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
34222 #endif
34223 skb_put (skb, qe->p1 & 0xffff);
34224 ATM_SKB(skb)->vcc = atm_vcc;
34225- atomic_inc(&atm_vcc->stats->rx);
34226+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34227 __net_timestamp(skb);
34228 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
34229 atm_vcc->push (atm_vcc, skb);
34230@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
34231 kfree (pe);
34232 }
34233 if (atm_vcc)
34234- atomic_inc(&atm_vcc->stats->rx_drop);
34235+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34236 break;
34237 case 0x1f: /* Reassembly abort: no buffers. */
34238 /* Silently increment error counter. */
34239 if (atm_vcc)
34240- atomic_inc(&atm_vcc->stats->rx_drop);
34241+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34242 break;
34243 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
34244 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
34245diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
34246index 204814e..cede831 100644
34247--- a/drivers/atm/fore200e.c
34248+++ b/drivers/atm/fore200e.c
34249@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
34250 #endif
34251 /* check error condition */
34252 if (*entry->status & STATUS_ERROR)
34253- atomic_inc(&vcc->stats->tx_err);
34254+ atomic_inc_unchecked(&vcc->stats->tx_err);
34255 else
34256- atomic_inc(&vcc->stats->tx);
34257+ atomic_inc_unchecked(&vcc->stats->tx);
34258 }
34259 }
34260
34261@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
34262 if (skb == NULL) {
34263 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
34264
34265- atomic_inc(&vcc->stats->rx_drop);
34266+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34267 return -ENOMEM;
34268 }
34269
34270@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
34271
34272 dev_kfree_skb_any(skb);
34273
34274- atomic_inc(&vcc->stats->rx_drop);
34275+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34276 return -ENOMEM;
34277 }
34278
34279 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
34280
34281 vcc->push(vcc, skb);
34282- atomic_inc(&vcc->stats->rx);
34283+ atomic_inc_unchecked(&vcc->stats->rx);
34284
34285 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
34286
34287@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
34288 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
34289 fore200e->atm_dev->number,
34290 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
34291- atomic_inc(&vcc->stats->rx_err);
34292+ atomic_inc_unchecked(&vcc->stats->rx_err);
34293 }
34294 }
34295
34296@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
34297 goto retry_here;
34298 }
34299
34300- atomic_inc(&vcc->stats->tx_err);
34301+ atomic_inc_unchecked(&vcc->stats->tx_err);
34302
34303 fore200e->tx_sat++;
34304 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
34305diff --git a/drivers/atm/he.c b/drivers/atm/he.c
34306index 507362a..a845e57 100644
34307--- a/drivers/atm/he.c
34308+++ b/drivers/atm/he.c
34309@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
34310
34311 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
34312 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
34313- atomic_inc(&vcc->stats->rx_drop);
34314+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34315 goto return_host_buffers;
34316 }
34317
34318@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
34319 RBRQ_LEN_ERR(he_dev->rbrq_head)
34320 ? "LEN_ERR" : "",
34321 vcc->vpi, vcc->vci);
34322- atomic_inc(&vcc->stats->rx_err);
34323+ atomic_inc_unchecked(&vcc->stats->rx_err);
34324 goto return_host_buffers;
34325 }
34326
34327@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
34328 vcc->push(vcc, skb);
34329 spin_lock(&he_dev->global_lock);
34330
34331- atomic_inc(&vcc->stats->rx);
34332+ atomic_inc_unchecked(&vcc->stats->rx);
34333
34334 return_host_buffers:
34335 ++pdus_assembled;
34336@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
34337 tpd->vcc->pop(tpd->vcc, tpd->skb);
34338 else
34339 dev_kfree_skb_any(tpd->skb);
34340- atomic_inc(&tpd->vcc->stats->tx_err);
34341+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
34342 }
34343 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
34344 return;
34345@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34346 vcc->pop(vcc, skb);
34347 else
34348 dev_kfree_skb_any(skb);
34349- atomic_inc(&vcc->stats->tx_err);
34350+ atomic_inc_unchecked(&vcc->stats->tx_err);
34351 return -EINVAL;
34352 }
34353
34354@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34355 vcc->pop(vcc, skb);
34356 else
34357 dev_kfree_skb_any(skb);
34358- atomic_inc(&vcc->stats->tx_err);
34359+ atomic_inc_unchecked(&vcc->stats->tx_err);
34360 return -EINVAL;
34361 }
34362 #endif
34363@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34364 vcc->pop(vcc, skb);
34365 else
34366 dev_kfree_skb_any(skb);
34367- atomic_inc(&vcc->stats->tx_err);
34368+ atomic_inc_unchecked(&vcc->stats->tx_err);
34369 spin_unlock_irqrestore(&he_dev->global_lock, flags);
34370 return -ENOMEM;
34371 }
34372@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34373 vcc->pop(vcc, skb);
34374 else
34375 dev_kfree_skb_any(skb);
34376- atomic_inc(&vcc->stats->tx_err);
34377+ atomic_inc_unchecked(&vcc->stats->tx_err);
34378 spin_unlock_irqrestore(&he_dev->global_lock, flags);
34379 return -ENOMEM;
34380 }
34381@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34382 __enqueue_tpd(he_dev, tpd, cid);
34383 spin_unlock_irqrestore(&he_dev->global_lock, flags);
34384
34385- atomic_inc(&vcc->stats->tx);
34386+ atomic_inc_unchecked(&vcc->stats->tx);
34387
34388 return 0;
34389 }
34390diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
34391index 1dc0519..1aadaf7 100644
34392--- a/drivers/atm/horizon.c
34393+++ b/drivers/atm/horizon.c
34394@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
34395 {
34396 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
34397 // VC layer stats
34398- atomic_inc(&vcc->stats->rx);
34399+ atomic_inc_unchecked(&vcc->stats->rx);
34400 __net_timestamp(skb);
34401 // end of our responsibility
34402 vcc->push (vcc, skb);
34403@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
34404 dev->tx_iovec = NULL;
34405
34406 // VC layer stats
34407- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34408+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34409
34410 // free the skb
34411 hrz_kfree_skb (skb);
34412diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
34413index 272f009..a18ba55 100644
34414--- a/drivers/atm/idt77252.c
34415+++ b/drivers/atm/idt77252.c
34416@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
34417 else
34418 dev_kfree_skb(skb);
34419
34420- atomic_inc(&vcc->stats->tx);
34421+ atomic_inc_unchecked(&vcc->stats->tx);
34422 }
34423
34424 atomic_dec(&scq->used);
34425@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34426 if ((sb = dev_alloc_skb(64)) == NULL) {
34427 printk("%s: Can't allocate buffers for aal0.\n",
34428 card->name);
34429- atomic_add(i, &vcc->stats->rx_drop);
34430+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
34431 break;
34432 }
34433 if (!atm_charge(vcc, sb->truesize)) {
34434 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
34435 card->name);
34436- atomic_add(i - 1, &vcc->stats->rx_drop);
34437+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
34438 dev_kfree_skb(sb);
34439 break;
34440 }
34441@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34442 ATM_SKB(sb)->vcc = vcc;
34443 __net_timestamp(sb);
34444 vcc->push(vcc, sb);
34445- atomic_inc(&vcc->stats->rx);
34446+ atomic_inc_unchecked(&vcc->stats->rx);
34447
34448 cell += ATM_CELL_PAYLOAD;
34449 }
34450@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34451 "(CDC: %08x)\n",
34452 card->name, len, rpp->len, readl(SAR_REG_CDC));
34453 recycle_rx_pool_skb(card, rpp);
34454- atomic_inc(&vcc->stats->rx_err);
34455+ atomic_inc_unchecked(&vcc->stats->rx_err);
34456 return;
34457 }
34458 if (stat & SAR_RSQE_CRC) {
34459 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
34460 recycle_rx_pool_skb(card, rpp);
34461- atomic_inc(&vcc->stats->rx_err);
34462+ atomic_inc_unchecked(&vcc->stats->rx_err);
34463 return;
34464 }
34465 if (skb_queue_len(&rpp->queue) > 1) {
34466@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34467 RXPRINTK("%s: Can't alloc RX skb.\n",
34468 card->name);
34469 recycle_rx_pool_skb(card, rpp);
34470- atomic_inc(&vcc->stats->rx_err);
34471+ atomic_inc_unchecked(&vcc->stats->rx_err);
34472 return;
34473 }
34474 if (!atm_charge(vcc, skb->truesize)) {
34475@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34476 __net_timestamp(skb);
34477
34478 vcc->push(vcc, skb);
34479- atomic_inc(&vcc->stats->rx);
34480+ atomic_inc_unchecked(&vcc->stats->rx);
34481
34482 return;
34483 }
34484@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34485 __net_timestamp(skb);
34486
34487 vcc->push(vcc, skb);
34488- atomic_inc(&vcc->stats->rx);
34489+ atomic_inc_unchecked(&vcc->stats->rx);
34490
34491 if (skb->truesize > SAR_FB_SIZE_3)
34492 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
34493@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
34494 if (vcc->qos.aal != ATM_AAL0) {
34495 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
34496 card->name, vpi, vci);
34497- atomic_inc(&vcc->stats->rx_drop);
34498+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34499 goto drop;
34500 }
34501
34502 if ((sb = dev_alloc_skb(64)) == NULL) {
34503 printk("%s: Can't allocate buffers for AAL0.\n",
34504 card->name);
34505- atomic_inc(&vcc->stats->rx_err);
34506+ atomic_inc_unchecked(&vcc->stats->rx_err);
34507 goto drop;
34508 }
34509
34510@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
34511 ATM_SKB(sb)->vcc = vcc;
34512 __net_timestamp(sb);
34513 vcc->push(vcc, sb);
34514- atomic_inc(&vcc->stats->rx);
34515+ atomic_inc_unchecked(&vcc->stats->rx);
34516
34517 drop:
34518 skb_pull(queue, 64);
34519@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
34520
34521 if (vc == NULL) {
34522 printk("%s: NULL connection in send().\n", card->name);
34523- atomic_inc(&vcc->stats->tx_err);
34524+ atomic_inc_unchecked(&vcc->stats->tx_err);
34525 dev_kfree_skb(skb);
34526 return -EINVAL;
34527 }
34528 if (!test_bit(VCF_TX, &vc->flags)) {
34529 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
34530- atomic_inc(&vcc->stats->tx_err);
34531+ atomic_inc_unchecked(&vcc->stats->tx_err);
34532 dev_kfree_skb(skb);
34533 return -EINVAL;
34534 }
34535@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
34536 break;
34537 default:
34538 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
34539- atomic_inc(&vcc->stats->tx_err);
34540+ atomic_inc_unchecked(&vcc->stats->tx_err);
34541 dev_kfree_skb(skb);
34542 return -EINVAL;
34543 }
34544
34545 if (skb_shinfo(skb)->nr_frags != 0) {
34546 printk("%s: No scatter-gather yet.\n", card->name);
34547- atomic_inc(&vcc->stats->tx_err);
34548+ atomic_inc_unchecked(&vcc->stats->tx_err);
34549 dev_kfree_skb(skb);
34550 return -EINVAL;
34551 }
34552@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
34553
34554 err = queue_skb(card, vc, skb, oam);
34555 if (err) {
34556- atomic_inc(&vcc->stats->tx_err);
34557+ atomic_inc_unchecked(&vcc->stats->tx_err);
34558 dev_kfree_skb(skb);
34559 return err;
34560 }
34561@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
34562 skb = dev_alloc_skb(64);
34563 if (!skb) {
34564 printk("%s: Out of memory in send_oam().\n", card->name);
34565- atomic_inc(&vcc->stats->tx_err);
34566+ atomic_inc_unchecked(&vcc->stats->tx_err);
34567 return -ENOMEM;
34568 }
34569 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
34570diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
34571index 4217f29..88f547a 100644
34572--- a/drivers/atm/iphase.c
34573+++ b/drivers/atm/iphase.c
34574@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
34575 status = (u_short) (buf_desc_ptr->desc_mode);
34576 if (status & (RX_CER | RX_PTE | RX_OFL))
34577 {
34578- atomic_inc(&vcc->stats->rx_err);
34579+ atomic_inc_unchecked(&vcc->stats->rx_err);
34580 IF_ERR(printk("IA: bad packet, dropping it");)
34581 if (status & RX_CER) {
34582 IF_ERR(printk(" cause: packet CRC error\n");)
34583@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
34584 len = dma_addr - buf_addr;
34585 if (len > iadev->rx_buf_sz) {
34586 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
34587- atomic_inc(&vcc->stats->rx_err);
34588+ atomic_inc_unchecked(&vcc->stats->rx_err);
34589 goto out_free_desc;
34590 }
34591
34592@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
34593 ia_vcc = INPH_IA_VCC(vcc);
34594 if (ia_vcc == NULL)
34595 {
34596- atomic_inc(&vcc->stats->rx_err);
34597+ atomic_inc_unchecked(&vcc->stats->rx_err);
34598 atm_return(vcc, skb->truesize);
34599 dev_kfree_skb_any(skb);
34600 goto INCR_DLE;
34601@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
34602 if ((length > iadev->rx_buf_sz) || (length >
34603 (skb->len - sizeof(struct cpcs_trailer))))
34604 {
34605- atomic_inc(&vcc->stats->rx_err);
34606+ atomic_inc_unchecked(&vcc->stats->rx_err);
34607 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
34608 length, skb->len);)
34609 atm_return(vcc, skb->truesize);
34610@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
34611
34612 IF_RX(printk("rx_dle_intr: skb push");)
34613 vcc->push(vcc,skb);
34614- atomic_inc(&vcc->stats->rx);
34615+ atomic_inc_unchecked(&vcc->stats->rx);
34616 iadev->rx_pkt_cnt++;
34617 }
34618 INCR_DLE:
34619@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
34620 {
34621 struct k_sonet_stats *stats;
34622 stats = &PRIV(_ia_dev[board])->sonet_stats;
34623- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
34624- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
34625- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
34626- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
34627- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
34628- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
34629- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
34630- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
34631- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
34632+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
34633+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
34634+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
34635+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
34636+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
34637+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
34638+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
34639+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
34640+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
34641 }
34642 ia_cmds.status = 0;
34643 break;
34644@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
34645 if ((desc == 0) || (desc > iadev->num_tx_desc))
34646 {
34647 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
34648- atomic_inc(&vcc->stats->tx);
34649+ atomic_inc_unchecked(&vcc->stats->tx);
34650 if (vcc->pop)
34651 vcc->pop(vcc, skb);
34652 else
34653@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
34654 ATM_DESC(skb) = vcc->vci;
34655 skb_queue_tail(&iadev->tx_dma_q, skb);
34656
34657- atomic_inc(&vcc->stats->tx);
34658+ atomic_inc_unchecked(&vcc->stats->tx);
34659 iadev->tx_pkt_cnt++;
34660 /* Increment transaction counter */
34661 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
34662
34663 #if 0
34664 /* add flow control logic */
34665- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
34666+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
34667 if (iavcc->vc_desc_cnt > 10) {
34668 vcc->tx_quota = vcc->tx_quota * 3 / 4;
34669 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
34670diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
34671index fa7d701..1e404c7 100644
34672--- a/drivers/atm/lanai.c
34673+++ b/drivers/atm/lanai.c
34674@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
34675 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
34676 lanai_endtx(lanai, lvcc);
34677 lanai_free_skb(lvcc->tx.atmvcc, skb);
34678- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
34679+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
34680 }
34681
34682 /* Try to fill the buffer - don't call unless there is backlog */
34683@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
34684 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
34685 __net_timestamp(skb);
34686 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
34687- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
34688+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
34689 out:
34690 lvcc->rx.buf.ptr = end;
34691 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
34692@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34693 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
34694 "vcc %d\n", lanai->number, (unsigned int) s, vci);
34695 lanai->stats.service_rxnotaal5++;
34696- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34697+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34698 return 0;
34699 }
34700 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
34701@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34702 int bytes;
34703 read_unlock(&vcc_sklist_lock);
34704 DPRINTK("got trashed rx pdu on vci %d\n", vci);
34705- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34706+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34707 lvcc->stats.x.aal5.service_trash++;
34708 bytes = (SERVICE_GET_END(s) * 16) -
34709 (((unsigned long) lvcc->rx.buf.ptr) -
34710@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34711 }
34712 if (s & SERVICE_STREAM) {
34713 read_unlock(&vcc_sklist_lock);
34714- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34715+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34716 lvcc->stats.x.aal5.service_stream++;
34717 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
34718 "PDU on VCI %d!\n", lanai->number, vci);
34719@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34720 return 0;
34721 }
34722 DPRINTK("got rx crc error on vci %d\n", vci);
34723- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34724+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34725 lvcc->stats.x.aal5.service_rxcrc++;
34726 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
34727 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
34728diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
34729index 6587dc2..149833d 100644
34730--- a/drivers/atm/nicstar.c
34731+++ b/drivers/atm/nicstar.c
34732@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34733 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
34734 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
34735 card->index);
34736- atomic_inc(&vcc->stats->tx_err);
34737+ atomic_inc_unchecked(&vcc->stats->tx_err);
34738 dev_kfree_skb_any(skb);
34739 return -EINVAL;
34740 }
34741@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34742 if (!vc->tx) {
34743 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
34744 card->index);
34745- atomic_inc(&vcc->stats->tx_err);
34746+ atomic_inc_unchecked(&vcc->stats->tx_err);
34747 dev_kfree_skb_any(skb);
34748 return -EINVAL;
34749 }
34750@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34751 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
34752 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
34753 card->index);
34754- atomic_inc(&vcc->stats->tx_err);
34755+ atomic_inc_unchecked(&vcc->stats->tx_err);
34756 dev_kfree_skb_any(skb);
34757 return -EINVAL;
34758 }
34759
34760 if (skb_shinfo(skb)->nr_frags != 0) {
34761 printk("nicstar%d: No scatter-gather yet.\n", card->index);
34762- atomic_inc(&vcc->stats->tx_err);
34763+ atomic_inc_unchecked(&vcc->stats->tx_err);
34764 dev_kfree_skb_any(skb);
34765 return -EINVAL;
34766 }
34767@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34768 }
34769
34770 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
34771- atomic_inc(&vcc->stats->tx_err);
34772+ atomic_inc_unchecked(&vcc->stats->tx_err);
34773 dev_kfree_skb_any(skb);
34774 return -EIO;
34775 }
34776- atomic_inc(&vcc->stats->tx);
34777+ atomic_inc_unchecked(&vcc->stats->tx);
34778
34779 return 0;
34780 }
34781@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34782 printk
34783 ("nicstar%d: Can't allocate buffers for aal0.\n",
34784 card->index);
34785- atomic_add(i, &vcc->stats->rx_drop);
34786+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
34787 break;
34788 }
34789 if (!atm_charge(vcc, sb->truesize)) {
34790 RXPRINTK
34791 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
34792 card->index);
34793- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
34794+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
34795 dev_kfree_skb_any(sb);
34796 break;
34797 }
34798@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34799 ATM_SKB(sb)->vcc = vcc;
34800 __net_timestamp(sb);
34801 vcc->push(vcc, sb);
34802- atomic_inc(&vcc->stats->rx);
34803+ atomic_inc_unchecked(&vcc->stats->rx);
34804 cell += ATM_CELL_PAYLOAD;
34805 }
34806
34807@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34808 if (iovb == NULL) {
34809 printk("nicstar%d: Out of iovec buffers.\n",
34810 card->index);
34811- atomic_inc(&vcc->stats->rx_drop);
34812+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34813 recycle_rx_buf(card, skb);
34814 return;
34815 }
34816@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34817 small or large buffer itself. */
34818 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
34819 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
34820- atomic_inc(&vcc->stats->rx_err);
34821+ atomic_inc_unchecked(&vcc->stats->rx_err);
34822 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
34823 NS_MAX_IOVECS);
34824 NS_PRV_IOVCNT(iovb) = 0;
34825@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34826 ("nicstar%d: Expected a small buffer, and this is not one.\n",
34827 card->index);
34828 which_list(card, skb);
34829- atomic_inc(&vcc->stats->rx_err);
34830+ atomic_inc_unchecked(&vcc->stats->rx_err);
34831 recycle_rx_buf(card, skb);
34832 vc->rx_iov = NULL;
34833 recycle_iov_buf(card, iovb);
34834@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34835 ("nicstar%d: Expected a large buffer, and this is not one.\n",
34836 card->index);
34837 which_list(card, skb);
34838- atomic_inc(&vcc->stats->rx_err);
34839+ atomic_inc_unchecked(&vcc->stats->rx_err);
34840 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
34841 NS_PRV_IOVCNT(iovb));
34842 vc->rx_iov = NULL;
34843@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34844 printk(" - PDU size mismatch.\n");
34845 else
34846 printk(".\n");
34847- atomic_inc(&vcc->stats->rx_err);
34848+ atomic_inc_unchecked(&vcc->stats->rx_err);
34849 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
34850 NS_PRV_IOVCNT(iovb));
34851 vc->rx_iov = NULL;
34852@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34853 /* skb points to a small buffer */
34854 if (!atm_charge(vcc, skb->truesize)) {
34855 push_rxbufs(card, skb);
34856- atomic_inc(&vcc->stats->rx_drop);
34857+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34858 } else {
34859 skb_put(skb, len);
34860 dequeue_sm_buf(card, skb);
34861@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34862 ATM_SKB(skb)->vcc = vcc;
34863 __net_timestamp(skb);
34864 vcc->push(vcc, skb);
34865- atomic_inc(&vcc->stats->rx);
34866+ atomic_inc_unchecked(&vcc->stats->rx);
34867 }
34868 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
34869 struct sk_buff *sb;
34870@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34871 if (len <= NS_SMBUFSIZE) {
34872 if (!atm_charge(vcc, sb->truesize)) {
34873 push_rxbufs(card, sb);
34874- atomic_inc(&vcc->stats->rx_drop);
34875+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34876 } else {
34877 skb_put(sb, len);
34878 dequeue_sm_buf(card, sb);
34879@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34880 ATM_SKB(sb)->vcc = vcc;
34881 __net_timestamp(sb);
34882 vcc->push(vcc, sb);
34883- atomic_inc(&vcc->stats->rx);
34884+ atomic_inc_unchecked(&vcc->stats->rx);
34885 }
34886
34887 push_rxbufs(card, skb);
34888@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34889
34890 if (!atm_charge(vcc, skb->truesize)) {
34891 push_rxbufs(card, skb);
34892- atomic_inc(&vcc->stats->rx_drop);
34893+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34894 } else {
34895 dequeue_lg_buf(card, skb);
34896 #ifdef NS_USE_DESTRUCTORS
34897@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34898 ATM_SKB(skb)->vcc = vcc;
34899 __net_timestamp(skb);
34900 vcc->push(vcc, skb);
34901- atomic_inc(&vcc->stats->rx);
34902+ atomic_inc_unchecked(&vcc->stats->rx);
34903 }
34904
34905 push_rxbufs(card, sb);
34906@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34907 printk
34908 ("nicstar%d: Out of huge buffers.\n",
34909 card->index);
34910- atomic_inc(&vcc->stats->rx_drop);
34911+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34912 recycle_iovec_rx_bufs(card,
34913 (struct iovec *)
34914 iovb->data,
34915@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34916 card->hbpool.count++;
34917 } else
34918 dev_kfree_skb_any(hb);
34919- atomic_inc(&vcc->stats->rx_drop);
34920+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34921 } else {
34922 /* Copy the small buffer to the huge buffer */
34923 sb = (struct sk_buff *)iov->iov_base;
34924@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34925 #endif /* NS_USE_DESTRUCTORS */
34926 __net_timestamp(hb);
34927 vcc->push(vcc, hb);
34928- atomic_inc(&vcc->stats->rx);
34929+ atomic_inc_unchecked(&vcc->stats->rx);
34930 }
34931 }
34932
34933diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
34934index 32784d1..4a8434a 100644
34935--- a/drivers/atm/solos-pci.c
34936+++ b/drivers/atm/solos-pci.c
34937@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
34938 }
34939 atm_charge(vcc, skb->truesize);
34940 vcc->push(vcc, skb);
34941- atomic_inc(&vcc->stats->rx);
34942+ atomic_inc_unchecked(&vcc->stats->rx);
34943 break;
34944
34945 case PKT_STATUS:
34946@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
34947 vcc = SKB_CB(oldskb)->vcc;
34948
34949 if (vcc) {
34950- atomic_inc(&vcc->stats->tx);
34951+ atomic_inc_unchecked(&vcc->stats->tx);
34952 solos_pop(vcc, oldskb);
34953 } else {
34954 dev_kfree_skb_irq(oldskb);
34955diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
34956index 0215934..ce9f5b1 100644
34957--- a/drivers/atm/suni.c
34958+++ b/drivers/atm/suni.c
34959@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
34960
34961
34962 #define ADD_LIMITED(s,v) \
34963- atomic_add((v),&stats->s); \
34964- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
34965+ atomic_add_unchecked((v),&stats->s); \
34966+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
34967
34968
34969 static void suni_hz(unsigned long from_timer)
34970diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
34971index 5120a96..e2572bd 100644
34972--- a/drivers/atm/uPD98402.c
34973+++ b/drivers/atm/uPD98402.c
34974@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
34975 struct sonet_stats tmp;
34976 int error = 0;
34977
34978- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
34979+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
34980 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
34981 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
34982 if (zero && !error) {
34983@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
34984
34985
34986 #define ADD_LIMITED(s,v) \
34987- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
34988- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
34989- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
34990+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
34991+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
34992+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
34993
34994
34995 static void stat_event(struct atm_dev *dev)
34996@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
34997 if (reason & uPD98402_INT_PFM) stat_event(dev);
34998 if (reason & uPD98402_INT_PCO) {
34999 (void) GET(PCOCR); /* clear interrupt cause */
35000- atomic_add(GET(HECCT),
35001+ atomic_add_unchecked(GET(HECCT),
35002 &PRIV(dev)->sonet_stats.uncorr_hcs);
35003 }
35004 if ((reason & uPD98402_INT_RFO) &&
35005@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
35006 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
35007 uPD98402_INT_LOS),PIMR); /* enable them */
35008 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
35009- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35010- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
35011- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
35012+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35013+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
35014+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
35015 return 0;
35016 }
35017
35018diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
35019index 969c3c2..9b72956 100644
35020--- a/drivers/atm/zatm.c
35021+++ b/drivers/atm/zatm.c
35022@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35023 }
35024 if (!size) {
35025 dev_kfree_skb_irq(skb);
35026- if (vcc) atomic_inc(&vcc->stats->rx_err);
35027+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
35028 continue;
35029 }
35030 if (!atm_charge(vcc,skb->truesize)) {
35031@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35032 skb->len = size;
35033 ATM_SKB(skb)->vcc = vcc;
35034 vcc->push(vcc,skb);
35035- atomic_inc(&vcc->stats->rx);
35036+ atomic_inc_unchecked(&vcc->stats->rx);
35037 }
35038 zout(pos & 0xffff,MTA(mbx));
35039 #if 0 /* probably a stupid idea */
35040@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
35041 skb_queue_head(&zatm_vcc->backlog,skb);
35042 break;
35043 }
35044- atomic_inc(&vcc->stats->tx);
35045+ atomic_inc_unchecked(&vcc->stats->tx);
35046 wake_up(&zatm_vcc->tx_wait);
35047 }
35048
35049diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
35050index d78b204..ecc1929 100644
35051--- a/drivers/base/attribute_container.c
35052+++ b/drivers/base/attribute_container.c
35053@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
35054 ic->classdev.parent = get_device(dev);
35055 ic->classdev.class = cont->class;
35056 cont->class->dev_release = attribute_container_release;
35057- dev_set_name(&ic->classdev, dev_name(dev));
35058+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
35059 if (fn)
35060 fn(cont, dev, &ic->classdev);
35061 else
35062diff --git a/drivers/base/bus.c b/drivers/base/bus.c
35063index d414331..b4dd4ba 100644
35064--- a/drivers/base/bus.c
35065+++ b/drivers/base/bus.c
35066@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
35067 return -EINVAL;
35068
35069 mutex_lock(&subsys->p->mutex);
35070- list_add_tail(&sif->node, &subsys->p->interfaces);
35071+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
35072 if (sif->add_dev) {
35073 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35074 while ((dev = subsys_dev_iter_next(&iter)))
35075@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
35076 subsys = sif->subsys;
35077
35078 mutex_lock(&subsys->p->mutex);
35079- list_del_init(&sif->node);
35080+ pax_list_del_init((struct list_head *)&sif->node);
35081 if (sif->remove_dev) {
35082 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35083 while ((dev = subsys_dev_iter_next(&iter)))
35084diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
35085index 7413d06..79155fa 100644
35086--- a/drivers/base/devtmpfs.c
35087+++ b/drivers/base/devtmpfs.c
35088@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
35089 if (!thread)
35090 return 0;
35091
35092- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
35093+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
35094 if (err)
35095 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
35096 else
35097@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
35098 *err = sys_unshare(CLONE_NEWNS);
35099 if (*err)
35100 goto out;
35101- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
35102+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
35103 if (*err)
35104 goto out;
35105- sys_chdir("/.."); /* will traverse into overmounted root */
35106- sys_chroot(".");
35107+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
35108+ sys_chroot((char __force_user *)".");
35109 complete(&setup_done);
35110 while (1) {
35111 spin_lock(&req_lock);
35112diff --git a/drivers/base/node.c b/drivers/base/node.c
35113index 7616a77c..8f57f51 100644
35114--- a/drivers/base/node.c
35115+++ b/drivers/base/node.c
35116@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
35117 struct node_attr {
35118 struct device_attribute attr;
35119 enum node_states state;
35120-};
35121+} __do_const;
35122
35123 static ssize_t show_node_state(struct device *dev,
35124 struct device_attribute *attr, char *buf)
35125diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
35126index 7072404..76dcebd 100644
35127--- a/drivers/base/power/domain.c
35128+++ b/drivers/base/power/domain.c
35129@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
35130 {
35131 struct cpuidle_driver *cpuidle_drv;
35132 struct gpd_cpu_data *cpu_data;
35133- struct cpuidle_state *idle_state;
35134+ cpuidle_state_no_const *idle_state;
35135 int ret = 0;
35136
35137 if (IS_ERR_OR_NULL(genpd) || state < 0)
35138@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
35139 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
35140 {
35141 struct gpd_cpu_data *cpu_data;
35142- struct cpuidle_state *idle_state;
35143+ cpuidle_state_no_const *idle_state;
35144 int ret = 0;
35145
35146 if (IS_ERR_OR_NULL(genpd))
35147diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
35148index a53ebd2..8f73eeb 100644
35149--- a/drivers/base/power/sysfs.c
35150+++ b/drivers/base/power/sysfs.c
35151@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
35152 return -EIO;
35153 }
35154 }
35155- return sprintf(buf, p);
35156+ return sprintf(buf, "%s", p);
35157 }
35158
35159 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
35160diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
35161index 79715e7..df06b3b 100644
35162--- a/drivers/base/power/wakeup.c
35163+++ b/drivers/base/power/wakeup.c
35164@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
35165 * They need to be modified together atomically, so it's better to use one
35166 * atomic variable to hold them both.
35167 */
35168-static atomic_t combined_event_count = ATOMIC_INIT(0);
35169+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
35170
35171 #define IN_PROGRESS_BITS (sizeof(int) * 4)
35172 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
35173
35174 static void split_counters(unsigned int *cnt, unsigned int *inpr)
35175 {
35176- unsigned int comb = atomic_read(&combined_event_count);
35177+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
35178
35179 *cnt = (comb >> IN_PROGRESS_BITS);
35180 *inpr = comb & MAX_IN_PROGRESS;
35181@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
35182 ws->start_prevent_time = ws->last_time;
35183
35184 /* Increment the counter of events in progress. */
35185- cec = atomic_inc_return(&combined_event_count);
35186+ cec = atomic_inc_return_unchecked(&combined_event_count);
35187
35188 trace_wakeup_source_activate(ws->name, cec);
35189 }
35190@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
35191 * Increment the counter of registered wakeup events and decrement the
35192 * couter of wakeup events in progress simultaneously.
35193 */
35194- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
35195+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
35196 trace_wakeup_source_deactivate(ws->name, cec);
35197
35198 split_counters(&cnt, &inpr);
35199diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
35200index e8d11b6..7b1b36f 100644
35201--- a/drivers/base/syscore.c
35202+++ b/drivers/base/syscore.c
35203@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
35204 void register_syscore_ops(struct syscore_ops *ops)
35205 {
35206 mutex_lock(&syscore_ops_lock);
35207- list_add_tail(&ops->node, &syscore_ops_list);
35208+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
35209 mutex_unlock(&syscore_ops_lock);
35210 }
35211 EXPORT_SYMBOL_GPL(register_syscore_ops);
35212@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
35213 void unregister_syscore_ops(struct syscore_ops *ops)
35214 {
35215 mutex_lock(&syscore_ops_lock);
35216- list_del(&ops->node);
35217+ pax_list_del((struct list_head *)&ops->node);
35218 mutex_unlock(&syscore_ops_lock);
35219 }
35220 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
35221diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
35222index 62b6c2c..4a11354 100644
35223--- a/drivers/block/cciss.c
35224+++ b/drivers/block/cciss.c
35225@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
35226 int err;
35227 u32 cp;
35228
35229+ memset(&arg64, 0, sizeof(arg64));
35230+
35231 err = 0;
35232 err |=
35233 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
35234@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
35235 while (!list_empty(&h->reqQ)) {
35236 c = list_entry(h->reqQ.next, CommandList_struct, list);
35237 /* can't do anything if fifo is full */
35238- if ((h->access.fifo_full(h))) {
35239+ if ((h->access->fifo_full(h))) {
35240 dev_warn(&h->pdev->dev, "fifo full\n");
35241 break;
35242 }
35243@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
35244 h->Qdepth--;
35245
35246 /* Tell the controller execute command */
35247- h->access.submit_command(h, c);
35248+ h->access->submit_command(h, c);
35249
35250 /* Put job onto the completed Q */
35251 addQ(&h->cmpQ, c);
35252@@ -3446,17 +3448,17 @@ startio:
35253
35254 static inline unsigned long get_next_completion(ctlr_info_t *h)
35255 {
35256- return h->access.command_completed(h);
35257+ return h->access->command_completed(h);
35258 }
35259
35260 static inline int interrupt_pending(ctlr_info_t *h)
35261 {
35262- return h->access.intr_pending(h);
35263+ return h->access->intr_pending(h);
35264 }
35265
35266 static inline long interrupt_not_for_us(ctlr_info_t *h)
35267 {
35268- return ((h->access.intr_pending(h) == 0) ||
35269+ return ((h->access->intr_pending(h) == 0) ||
35270 (h->interrupts_enabled == 0));
35271 }
35272
35273@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
35274 u32 a;
35275
35276 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35277- return h->access.command_completed(h);
35278+ return h->access->command_completed(h);
35279
35280 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35281 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35282@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
35283 trans_support & CFGTBL_Trans_use_short_tags);
35284
35285 /* Change the access methods to the performant access methods */
35286- h->access = SA5_performant_access;
35287+ h->access = &SA5_performant_access;
35288 h->transMethod = CFGTBL_Trans_Performant;
35289
35290 return;
35291@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
35292 if (prod_index < 0)
35293 return -ENODEV;
35294 h->product_name = products[prod_index].product_name;
35295- h->access = *(products[prod_index].access);
35296+ h->access = products[prod_index].access;
35297
35298 if (cciss_board_disabled(h)) {
35299 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35300@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
35301 }
35302
35303 /* make sure the board interrupts are off */
35304- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35305+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35306 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
35307 if (rc)
35308 goto clean2;
35309@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
35310 * fake ones to scoop up any residual completions.
35311 */
35312 spin_lock_irqsave(&h->lock, flags);
35313- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35314+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35315 spin_unlock_irqrestore(&h->lock, flags);
35316 free_irq(h->intr[h->intr_mode], h);
35317 rc = cciss_request_irq(h, cciss_msix_discard_completions,
35318@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
35319 dev_info(&h->pdev->dev, "Board READY.\n");
35320 dev_info(&h->pdev->dev,
35321 "Waiting for stale completions to drain.\n");
35322- h->access.set_intr_mask(h, CCISS_INTR_ON);
35323+ h->access->set_intr_mask(h, CCISS_INTR_ON);
35324 msleep(10000);
35325- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35326+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35327
35328 rc = controller_reset_failed(h->cfgtable);
35329 if (rc)
35330@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
35331 cciss_scsi_setup(h);
35332
35333 /* Turn the interrupts on so we can service requests */
35334- h->access.set_intr_mask(h, CCISS_INTR_ON);
35335+ h->access->set_intr_mask(h, CCISS_INTR_ON);
35336
35337 /* Get the firmware version */
35338 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
35339@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
35340 kfree(flush_buf);
35341 if (return_code != IO_OK)
35342 dev_warn(&h->pdev->dev, "Error flushing cache\n");
35343- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35344+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35345 free_irq(h->intr[h->intr_mode], h);
35346 }
35347
35348diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
35349index 7fda30e..eb5dfe0 100644
35350--- a/drivers/block/cciss.h
35351+++ b/drivers/block/cciss.h
35352@@ -101,7 +101,7 @@ struct ctlr_info
35353 /* information about each logical volume */
35354 drive_info_struct *drv[CISS_MAX_LUN];
35355
35356- struct access_method access;
35357+ struct access_method *access;
35358
35359 /* queue and queue Info */
35360 struct list_head reqQ;
35361diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
35362index 639d26b..fd6ad1f 100644
35363--- a/drivers/block/cpqarray.c
35364+++ b/drivers/block/cpqarray.c
35365@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
35366 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
35367 goto Enomem4;
35368 }
35369- hba[i]->access.set_intr_mask(hba[i], 0);
35370+ hba[i]->access->set_intr_mask(hba[i], 0);
35371 if (request_irq(hba[i]->intr, do_ida_intr,
35372 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
35373 {
35374@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
35375 add_timer(&hba[i]->timer);
35376
35377 /* Enable IRQ now that spinlock and rate limit timer are set up */
35378- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
35379+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
35380
35381 for(j=0; j<NWD; j++) {
35382 struct gendisk *disk = ida_gendisk[i][j];
35383@@ -694,7 +694,7 @@ DBGINFO(
35384 for(i=0; i<NR_PRODUCTS; i++) {
35385 if (board_id == products[i].board_id) {
35386 c->product_name = products[i].product_name;
35387- c->access = *(products[i].access);
35388+ c->access = products[i].access;
35389 break;
35390 }
35391 }
35392@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
35393 hba[ctlr]->intr = intr;
35394 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
35395 hba[ctlr]->product_name = products[j].product_name;
35396- hba[ctlr]->access = *(products[j].access);
35397+ hba[ctlr]->access = products[j].access;
35398 hba[ctlr]->ctlr = ctlr;
35399 hba[ctlr]->board_id = board_id;
35400 hba[ctlr]->pci_dev = NULL; /* not PCI */
35401@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
35402
35403 while((c = h->reqQ) != NULL) {
35404 /* Can't do anything if we're busy */
35405- if (h->access.fifo_full(h) == 0)
35406+ if (h->access->fifo_full(h) == 0)
35407 return;
35408
35409 /* Get the first entry from the request Q */
35410@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
35411 h->Qdepth--;
35412
35413 /* Tell the controller to do our bidding */
35414- h->access.submit_command(h, c);
35415+ h->access->submit_command(h, c);
35416
35417 /* Get onto the completion Q */
35418 addQ(&h->cmpQ, c);
35419@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
35420 unsigned long flags;
35421 __u32 a,a1;
35422
35423- istat = h->access.intr_pending(h);
35424+ istat = h->access->intr_pending(h);
35425 /* Is this interrupt for us? */
35426 if (istat == 0)
35427 return IRQ_NONE;
35428@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
35429 */
35430 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
35431 if (istat & FIFO_NOT_EMPTY) {
35432- while((a = h->access.command_completed(h))) {
35433+ while((a = h->access->command_completed(h))) {
35434 a1 = a; a &= ~3;
35435 if ((c = h->cmpQ) == NULL)
35436 {
35437@@ -1193,6 +1193,7 @@ out_passthru:
35438 ida_pci_info_struct pciinfo;
35439
35440 if (!arg) return -EINVAL;
35441+ memset(&pciinfo, 0, sizeof(pciinfo));
35442 pciinfo.bus = host->pci_dev->bus->number;
35443 pciinfo.dev_fn = host->pci_dev->devfn;
35444 pciinfo.board_id = host->board_id;
35445@@ -1447,11 +1448,11 @@ static int sendcmd(
35446 /*
35447 * Disable interrupt
35448 */
35449- info_p->access.set_intr_mask(info_p, 0);
35450+ info_p->access->set_intr_mask(info_p, 0);
35451 /* Make sure there is room in the command FIFO */
35452 /* Actually it should be completely empty at this time. */
35453 for (i = 200000; i > 0; i--) {
35454- temp = info_p->access.fifo_full(info_p);
35455+ temp = info_p->access->fifo_full(info_p);
35456 if (temp != 0) {
35457 break;
35458 }
35459@@ -1464,7 +1465,7 @@ DBG(
35460 /*
35461 * Send the cmd
35462 */
35463- info_p->access.submit_command(info_p, c);
35464+ info_p->access->submit_command(info_p, c);
35465 complete = pollcomplete(ctlr);
35466
35467 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
35468@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
35469 * we check the new geometry. Then turn interrupts back on when
35470 * we're done.
35471 */
35472- host->access.set_intr_mask(host, 0);
35473+ host->access->set_intr_mask(host, 0);
35474 getgeometry(ctlr);
35475- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
35476+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
35477
35478 for(i=0; i<NWD; i++) {
35479 struct gendisk *disk = ida_gendisk[ctlr][i];
35480@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
35481 /* Wait (up to 2 seconds) for a command to complete */
35482
35483 for (i = 200000; i > 0; i--) {
35484- done = hba[ctlr]->access.command_completed(hba[ctlr]);
35485+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
35486 if (done == 0) {
35487 udelay(10); /* a short fixed delay */
35488 } else
35489diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
35490index be73e9d..7fbf140 100644
35491--- a/drivers/block/cpqarray.h
35492+++ b/drivers/block/cpqarray.h
35493@@ -99,7 +99,7 @@ struct ctlr_info {
35494 drv_info_t drv[NWD];
35495 struct proc_dir_entry *proc;
35496
35497- struct access_method access;
35498+ struct access_method *access;
35499
35500 cmdlist_t *reqQ;
35501 cmdlist_t *cmpQ;
35502diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
35503index f943aac..99bfd19 100644
35504--- a/drivers/block/drbd/drbd_int.h
35505+++ b/drivers/block/drbd/drbd_int.h
35506@@ -582,7 +582,7 @@ struct drbd_epoch {
35507 struct drbd_tconn *tconn;
35508 struct list_head list;
35509 unsigned int barrier_nr;
35510- atomic_t epoch_size; /* increased on every request added. */
35511+ atomic_unchecked_t epoch_size; /* increased on every request added. */
35512 atomic_t active; /* increased on every req. added, and dec on every finished. */
35513 unsigned long flags;
35514 };
35515@@ -1021,7 +1021,7 @@ struct drbd_conf {
35516 unsigned int al_tr_number;
35517 int al_tr_cycle;
35518 wait_queue_head_t seq_wait;
35519- atomic_t packet_seq;
35520+ atomic_unchecked_t packet_seq;
35521 unsigned int peer_seq;
35522 spinlock_t peer_seq_lock;
35523 unsigned int minor;
35524@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
35525 char __user *uoptval;
35526 int err;
35527
35528- uoptval = (char __user __force *)optval;
35529+ uoptval = (char __force_user *)optval;
35530
35531 set_fs(KERNEL_DS);
35532 if (level == SOL_SOCKET)
35533diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
35534index a5dca6a..bb27967 100644
35535--- a/drivers/block/drbd/drbd_main.c
35536+++ b/drivers/block/drbd/drbd_main.c
35537@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
35538 p->sector = sector;
35539 p->block_id = block_id;
35540 p->blksize = blksize;
35541- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
35542+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
35543 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
35544 }
35545
35546@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
35547 return -EIO;
35548 p->sector = cpu_to_be64(req->i.sector);
35549 p->block_id = (unsigned long)req;
35550- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
35551+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
35552 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
35553 if (mdev->state.conn >= C_SYNC_SOURCE &&
35554 mdev->state.conn <= C_PAUSED_SYNC_T)
35555@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
35556 {
35557 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
35558
35559- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
35560- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
35561+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
35562+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
35563 kfree(tconn->current_epoch);
35564
35565 idr_destroy(&tconn->volumes);
35566diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
35567index 4222aff..1f79506 100644
35568--- a/drivers/block/drbd/drbd_receiver.c
35569+++ b/drivers/block/drbd/drbd_receiver.c
35570@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
35571 {
35572 int err;
35573
35574- atomic_set(&mdev->packet_seq, 0);
35575+ atomic_set_unchecked(&mdev->packet_seq, 0);
35576 mdev->peer_seq = 0;
35577
35578 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
35579@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
35580 do {
35581 next_epoch = NULL;
35582
35583- epoch_size = atomic_read(&epoch->epoch_size);
35584+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
35585
35586 switch (ev & ~EV_CLEANUP) {
35587 case EV_PUT:
35588@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
35589 rv = FE_DESTROYED;
35590 } else {
35591 epoch->flags = 0;
35592- atomic_set(&epoch->epoch_size, 0);
35593+ atomic_set_unchecked(&epoch->epoch_size, 0);
35594 /* atomic_set(&epoch->active, 0); is already zero */
35595 if (rv == FE_STILL_LIVE)
35596 rv = FE_RECYCLED;
35597@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
35598 conn_wait_active_ee_empty(tconn);
35599 drbd_flush(tconn);
35600
35601- if (atomic_read(&tconn->current_epoch->epoch_size)) {
35602+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
35603 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
35604 if (epoch)
35605 break;
35606@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
35607 }
35608
35609 epoch->flags = 0;
35610- atomic_set(&epoch->epoch_size, 0);
35611+ atomic_set_unchecked(&epoch->epoch_size, 0);
35612 atomic_set(&epoch->active, 0);
35613
35614 spin_lock(&tconn->epoch_lock);
35615- if (atomic_read(&tconn->current_epoch->epoch_size)) {
35616+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
35617 list_add(&epoch->list, &tconn->current_epoch->list);
35618 tconn->current_epoch = epoch;
35619 tconn->epochs++;
35620@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
35621
35622 err = wait_for_and_update_peer_seq(mdev, peer_seq);
35623 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
35624- atomic_inc(&tconn->current_epoch->epoch_size);
35625+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
35626 err2 = drbd_drain_block(mdev, pi->size);
35627 if (!err)
35628 err = err2;
35629@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
35630
35631 spin_lock(&tconn->epoch_lock);
35632 peer_req->epoch = tconn->current_epoch;
35633- atomic_inc(&peer_req->epoch->epoch_size);
35634+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
35635 atomic_inc(&peer_req->epoch->active);
35636 spin_unlock(&tconn->epoch_lock);
35637
35638@@ -4347,7 +4347,7 @@ struct data_cmd {
35639 int expect_payload;
35640 size_t pkt_size;
35641 int (*fn)(struct drbd_tconn *, struct packet_info *);
35642-};
35643+} __do_const;
35644
35645 static struct data_cmd drbd_cmd_handler[] = {
35646 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
35647@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
35648 if (!list_empty(&tconn->current_epoch->list))
35649 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
35650 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
35651- atomic_set(&tconn->current_epoch->epoch_size, 0);
35652+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
35653 tconn->send.seen_any_write_yet = false;
35654
35655 conn_info(tconn, "Connection closed\n");
35656@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
35657 struct asender_cmd {
35658 size_t pkt_size;
35659 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
35660-};
35661+} __do_const;
35662
35663 static struct asender_cmd asender_tbl[] = {
35664 [P_PING] = { 0, got_Ping },
35665diff --git a/drivers/block/loop.c b/drivers/block/loop.c
35666index d92d50f..a7e9d97 100644
35667--- a/drivers/block/loop.c
35668+++ b/drivers/block/loop.c
35669@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
35670
35671 file_start_write(file);
35672 set_fs(get_ds());
35673- bw = file->f_op->write(file, buf, len, &pos);
35674+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
35675 set_fs(old_fs);
35676 file_end_write(file);
35677 if (likely(bw == len))
35678diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
35679index f5d0ea1..c62380a 100644
35680--- a/drivers/block/pktcdvd.c
35681+++ b/drivers/block/pktcdvd.c
35682@@ -84,7 +84,7 @@
35683 #define MAX_SPEED 0xffff
35684
35685 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
35686- ~(sector_t)((pd)->settings.size - 1))
35687+ ~(sector_t)((pd)->settings.size - 1UL))
35688
35689 static DEFINE_MUTEX(pktcdvd_mutex);
35690 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
35691diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
35692index 8a3aff7..d7538c2 100644
35693--- a/drivers/cdrom/cdrom.c
35694+++ b/drivers/cdrom/cdrom.c
35695@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
35696 ENSURE(reset, CDC_RESET);
35697 ENSURE(generic_packet, CDC_GENERIC_PACKET);
35698 cdi->mc_flags = 0;
35699- cdo->n_minors = 0;
35700 cdi->options = CDO_USE_FFLAGS;
35701
35702 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
35703@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
35704 else
35705 cdi->cdda_method = CDDA_OLD;
35706
35707- if (!cdo->generic_packet)
35708- cdo->generic_packet = cdrom_dummy_generic_packet;
35709+ if (!cdo->generic_packet) {
35710+ pax_open_kernel();
35711+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
35712+ pax_close_kernel();
35713+ }
35714
35715 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
35716 mutex_lock(&cdrom_mutex);
35717@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
35718 if (cdi->exit)
35719 cdi->exit(cdi);
35720
35721- cdi->ops->n_minors--;
35722 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
35723 }
35724
35725@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
35726 */
35727 nr = nframes;
35728 do {
35729- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
35730+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
35731 if (cgc.buffer)
35732 break;
35733
35734@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
35735 struct cdrom_device_info *cdi;
35736 int ret;
35737
35738- ret = scnprintf(info + *pos, max_size - *pos, header);
35739+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
35740 if (!ret)
35741 return 1;
35742
35743diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
35744index 4afcb65..a68a32d 100644
35745--- a/drivers/cdrom/gdrom.c
35746+++ b/drivers/cdrom/gdrom.c
35747@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
35748 .audio_ioctl = gdrom_audio_ioctl,
35749 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
35750 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
35751- .n_minors = 1,
35752 };
35753
35754 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
35755diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
35756index 3bb6fa3..34013fb 100644
35757--- a/drivers/char/Kconfig
35758+++ b/drivers/char/Kconfig
35759@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
35760
35761 config DEVKMEM
35762 bool "/dev/kmem virtual device support"
35763- default y
35764+ default n
35765+ depends on !GRKERNSEC_KMEM
35766 help
35767 Say Y here if you want to support the /dev/kmem device. The
35768 /dev/kmem device is rarely used, but can be used for certain
35769@@ -582,6 +583,7 @@ config DEVPORT
35770 bool
35771 depends on !M68K
35772 depends on ISA || PCI
35773+ depends on !GRKERNSEC_KMEM
35774 default y
35775
35776 source "drivers/s390/char/Kconfig"
35777diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
35778index a48e05b..6bac831 100644
35779--- a/drivers/char/agp/compat_ioctl.c
35780+++ b/drivers/char/agp/compat_ioctl.c
35781@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
35782 return -ENOMEM;
35783 }
35784
35785- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
35786+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
35787 sizeof(*usegment) * ureserve.seg_count)) {
35788 kfree(usegment);
35789 kfree(ksegment);
35790diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
35791index 2e04433..771f2cc 100644
35792--- a/drivers/char/agp/frontend.c
35793+++ b/drivers/char/agp/frontend.c
35794@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
35795 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
35796 return -EFAULT;
35797
35798- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
35799+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
35800 return -EFAULT;
35801
35802 client = agp_find_client_by_pid(reserve.pid);
35803@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
35804 if (segment == NULL)
35805 return -ENOMEM;
35806
35807- if (copy_from_user(segment, (void __user *) reserve.seg_list,
35808+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
35809 sizeof(struct agp_segment) * reserve.seg_count)) {
35810 kfree(segment);
35811 return -EFAULT;
35812diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
35813index 4f94375..413694e 100644
35814--- a/drivers/char/genrtc.c
35815+++ b/drivers/char/genrtc.c
35816@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
35817 switch (cmd) {
35818
35819 case RTC_PLL_GET:
35820+ memset(&pll, 0, sizeof(pll));
35821 if (get_rtc_pll(&pll))
35822 return -EINVAL;
35823 else
35824diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
35825index d784650..e8bfd69 100644
35826--- a/drivers/char/hpet.c
35827+++ b/drivers/char/hpet.c
35828@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
35829 }
35830
35831 static int
35832-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
35833+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
35834 struct hpet_info *info)
35835 {
35836 struct hpet_timer __iomem *timer;
35837diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
35838index 86fe45c..c0ea948 100644
35839--- a/drivers/char/hw_random/intel-rng.c
35840+++ b/drivers/char/hw_random/intel-rng.c
35841@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
35842
35843 if (no_fwh_detect)
35844 return -ENODEV;
35845- printk(warning);
35846+ printk("%s", warning);
35847 return -EBUSY;
35848 }
35849
35850diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
35851index 4445fa1..7c6de37 100644
35852--- a/drivers/char/ipmi/ipmi_msghandler.c
35853+++ b/drivers/char/ipmi/ipmi_msghandler.c
35854@@ -420,7 +420,7 @@ struct ipmi_smi {
35855 struct proc_dir_entry *proc_dir;
35856 char proc_dir_name[10];
35857
35858- atomic_t stats[IPMI_NUM_STATS];
35859+ atomic_unchecked_t stats[IPMI_NUM_STATS];
35860
35861 /*
35862 * run_to_completion duplicate of smb_info, smi_info
35863@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
35864
35865
35866 #define ipmi_inc_stat(intf, stat) \
35867- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
35868+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
35869 #define ipmi_get_stat(intf, stat) \
35870- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
35871+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
35872
35873 static int is_lan_addr(struct ipmi_addr *addr)
35874 {
35875@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
35876 INIT_LIST_HEAD(&intf->cmd_rcvrs);
35877 init_waitqueue_head(&intf->waitq);
35878 for (i = 0; i < IPMI_NUM_STATS; i++)
35879- atomic_set(&intf->stats[i], 0);
35880+ atomic_set_unchecked(&intf->stats[i], 0);
35881
35882 intf->proc_dir = NULL;
35883
35884diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
35885index af4b23f..79806fc 100644
35886--- a/drivers/char/ipmi/ipmi_si_intf.c
35887+++ b/drivers/char/ipmi/ipmi_si_intf.c
35888@@ -275,7 +275,7 @@ struct smi_info {
35889 unsigned char slave_addr;
35890
35891 /* Counters and things for the proc filesystem. */
35892- atomic_t stats[SI_NUM_STATS];
35893+ atomic_unchecked_t stats[SI_NUM_STATS];
35894
35895 struct task_struct *thread;
35896
35897@@ -284,9 +284,9 @@ struct smi_info {
35898 };
35899
35900 #define smi_inc_stat(smi, stat) \
35901- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
35902+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
35903 #define smi_get_stat(smi, stat) \
35904- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
35905+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
35906
35907 #define SI_MAX_PARMS 4
35908
35909@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
35910 atomic_set(&new_smi->req_events, 0);
35911 new_smi->run_to_completion = 0;
35912 for (i = 0; i < SI_NUM_STATS; i++)
35913- atomic_set(&new_smi->stats[i], 0);
35914+ atomic_set_unchecked(&new_smi->stats[i], 0);
35915
35916 new_smi->interrupt_disabled = 1;
35917 atomic_set(&new_smi->stop_operation, 0);
35918diff --git a/drivers/char/mem.c b/drivers/char/mem.c
35919index 1ccbe94..6ad651a 100644
35920--- a/drivers/char/mem.c
35921+++ b/drivers/char/mem.c
35922@@ -18,6 +18,7 @@
35923 #include <linux/raw.h>
35924 #include <linux/tty.h>
35925 #include <linux/capability.h>
35926+#include <linux/security.h>
35927 #include <linux/ptrace.h>
35928 #include <linux/device.h>
35929 #include <linux/highmem.h>
35930@@ -38,6 +39,10 @@
35931
35932 #define DEVPORT_MINOR 4
35933
35934+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
35935+extern const struct file_operations grsec_fops;
35936+#endif
35937+
35938 static inline unsigned long size_inside_page(unsigned long start,
35939 unsigned long size)
35940 {
35941@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35942
35943 while (cursor < to) {
35944 if (!devmem_is_allowed(pfn)) {
35945+#ifdef CONFIG_GRKERNSEC_KMEM
35946+ gr_handle_mem_readwrite(from, to);
35947+#else
35948 printk(KERN_INFO
35949 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
35950 current->comm, from, to);
35951+#endif
35952 return 0;
35953 }
35954 cursor += PAGE_SIZE;
35955@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35956 }
35957 return 1;
35958 }
35959+#elif defined(CONFIG_GRKERNSEC_KMEM)
35960+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35961+{
35962+ return 0;
35963+}
35964 #else
35965 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35966 {
35967@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
35968
35969 while (count > 0) {
35970 unsigned long remaining;
35971+ char *temp;
35972
35973 sz = size_inside_page(p, count);
35974
35975@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
35976 if (!ptr)
35977 return -EFAULT;
35978
35979- remaining = copy_to_user(buf, ptr, sz);
35980+#ifdef CONFIG_PAX_USERCOPY
35981+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
35982+ if (!temp) {
35983+ unxlate_dev_mem_ptr(p, ptr);
35984+ return -ENOMEM;
35985+ }
35986+ memcpy(temp, ptr, sz);
35987+#else
35988+ temp = ptr;
35989+#endif
35990+
35991+ remaining = copy_to_user(buf, temp, sz);
35992+
35993+#ifdef CONFIG_PAX_USERCOPY
35994+ kfree(temp);
35995+#endif
35996+
35997 unxlate_dev_mem_ptr(p, ptr);
35998 if (remaining)
35999 return -EFAULT;
36000@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
36001 else
36002 csize = count;
36003
36004- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
36005+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
36006 if (rc < 0)
36007 return rc;
36008 buf += csize;
36009@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36010 size_t count, loff_t *ppos)
36011 {
36012 unsigned long p = *ppos;
36013- ssize_t low_count, read, sz;
36014+ ssize_t low_count, read, sz, err = 0;
36015 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
36016- int err = 0;
36017
36018 read = 0;
36019 if (p < (unsigned long) high_memory) {
36020@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36021 }
36022 #endif
36023 while (low_count > 0) {
36024+ char *temp;
36025+
36026 sz = size_inside_page(p, low_count);
36027
36028 /*
36029@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36030 */
36031 kbuf = xlate_dev_kmem_ptr((char *)p);
36032
36033- if (copy_to_user(buf, kbuf, sz))
36034+#ifdef CONFIG_PAX_USERCOPY
36035+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36036+ if (!temp)
36037+ return -ENOMEM;
36038+ memcpy(temp, kbuf, sz);
36039+#else
36040+ temp = kbuf;
36041+#endif
36042+
36043+ err = copy_to_user(buf, temp, sz);
36044+
36045+#ifdef CONFIG_PAX_USERCOPY
36046+ kfree(temp);
36047+#endif
36048+
36049+ if (err)
36050 return -EFAULT;
36051 buf += sz;
36052 p += sz;
36053@@ -869,6 +916,9 @@ static const struct memdev {
36054 #ifdef CONFIG_CRASH_DUMP
36055 [12] = { "oldmem", 0, &oldmem_fops, NULL },
36056 #endif
36057+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36058+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
36059+#endif
36060 };
36061
36062 static int memory_open(struct inode *inode, struct file *filp)
36063@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
36064 continue;
36065
36066 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
36067- NULL, devlist[minor].name);
36068+ NULL, "%s", devlist[minor].name);
36069 }
36070
36071 return tty_init();
36072diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
36073index c689697..04e6d6a2 100644
36074--- a/drivers/char/mwave/tp3780i.c
36075+++ b/drivers/char/mwave/tp3780i.c
36076@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
36077 PRINTK_2(TRACE_TP3780I,
36078 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
36079
36080+ memset(pAbilities, 0, sizeof(*pAbilities));
36081 /* fill out standard constant fields */
36082 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
36083 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
36084diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
36085index 9df78e2..01ba9ae 100644
36086--- a/drivers/char/nvram.c
36087+++ b/drivers/char/nvram.c
36088@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
36089
36090 spin_unlock_irq(&rtc_lock);
36091
36092- if (copy_to_user(buf, contents, tmp - contents))
36093+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
36094 return -EFAULT;
36095
36096 *ppos = i;
36097diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
36098index 5c5cc00..ac9edb7 100644
36099--- a/drivers/char/pcmcia/synclink_cs.c
36100+++ b/drivers/char/pcmcia/synclink_cs.c
36101@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36102
36103 if (debug_level >= DEBUG_LEVEL_INFO)
36104 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
36105- __FILE__, __LINE__, info->device_name, port->count);
36106+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
36107
36108- WARN_ON(!port->count);
36109+ WARN_ON(!atomic_read(&port->count));
36110
36111 if (tty_port_close_start(port, tty, filp) == 0)
36112 goto cleanup;
36113@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36114 cleanup:
36115 if (debug_level >= DEBUG_LEVEL_INFO)
36116 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
36117- tty->driver->name, port->count);
36118+ tty->driver->name, atomic_read(&port->count));
36119 }
36120
36121 /* Wait until the transmitter is empty.
36122@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36123
36124 if (debug_level >= DEBUG_LEVEL_INFO)
36125 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
36126- __FILE__, __LINE__, tty->driver->name, port->count);
36127+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
36128
36129 /* If port is closing, signal caller to try again */
36130 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
36131@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36132 goto cleanup;
36133 }
36134 spin_lock(&port->lock);
36135- port->count++;
36136+ atomic_inc(&port->count);
36137 spin_unlock(&port->lock);
36138 spin_unlock_irqrestore(&info->netlock, flags);
36139
36140- if (port->count == 1) {
36141+ if (atomic_read(&port->count) == 1) {
36142 /* 1st open on this device, init hardware */
36143 retval = startup(info, tty);
36144 if (retval < 0)
36145@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
36146 unsigned short new_crctype;
36147
36148 /* return error if TTY interface open */
36149- if (info->port.count)
36150+ if (atomic_read(&info->port.count))
36151 return -EBUSY;
36152
36153 switch (encoding)
36154@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
36155
36156 /* arbitrate between network and tty opens */
36157 spin_lock_irqsave(&info->netlock, flags);
36158- if (info->port.count != 0 || info->netcount != 0) {
36159+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
36160 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
36161 spin_unlock_irqrestore(&info->netlock, flags);
36162 return -EBUSY;
36163@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36164 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
36165
36166 /* return error if TTY interface open */
36167- if (info->port.count)
36168+ if (atomic_read(&info->port.count))
36169 return -EBUSY;
36170
36171 if (cmd != SIOCWANDEV)
36172diff --git a/drivers/char/random.c b/drivers/char/random.c
36173index 35487e8..dac8bd1 100644
36174--- a/drivers/char/random.c
36175+++ b/drivers/char/random.c
36176@@ -272,8 +272,13 @@
36177 /*
36178 * Configuration information
36179 */
36180+#ifdef CONFIG_GRKERNSEC_RANDNET
36181+#define INPUT_POOL_WORDS 512
36182+#define OUTPUT_POOL_WORDS 128
36183+#else
36184 #define INPUT_POOL_WORDS 128
36185 #define OUTPUT_POOL_WORDS 32
36186+#endif
36187 #define SEC_XFER_SIZE 512
36188 #define EXTRACT_SIZE 10
36189
36190@@ -313,10 +318,17 @@ static struct poolinfo {
36191 int poolwords;
36192 int tap1, tap2, tap3, tap4, tap5;
36193 } poolinfo_table[] = {
36194+#ifdef CONFIG_GRKERNSEC_RANDNET
36195+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
36196+ { 512, 411, 308, 208, 104, 1 },
36197+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
36198+ { 128, 103, 76, 51, 25, 1 },
36199+#else
36200 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
36201 { 128, 103, 76, 51, 25, 1 },
36202 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
36203 { 32, 26, 20, 14, 7, 1 },
36204+#endif
36205 #if 0
36206 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
36207 { 2048, 1638, 1231, 819, 411, 1 },
36208@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
36209 input_rotate += i ? 7 : 14;
36210 }
36211
36212- ACCESS_ONCE(r->input_rotate) = input_rotate;
36213- ACCESS_ONCE(r->add_ptr) = i;
36214+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
36215+ ACCESS_ONCE_RW(r->add_ptr) = i;
36216 smp_wmb();
36217
36218 if (out)
36219@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
36220
36221 extract_buf(r, tmp);
36222 i = min_t(int, nbytes, EXTRACT_SIZE);
36223- if (copy_to_user(buf, tmp, i)) {
36224+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
36225 ret = -EFAULT;
36226 break;
36227 }
36228@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
36229 #include <linux/sysctl.h>
36230
36231 static int min_read_thresh = 8, min_write_thresh;
36232-static int max_read_thresh = INPUT_POOL_WORDS * 32;
36233+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
36234 static int max_write_thresh = INPUT_POOL_WORDS * 32;
36235 static char sysctl_bootid[16];
36236
36237@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
36238 static int proc_do_uuid(ctl_table *table, int write,
36239 void __user *buffer, size_t *lenp, loff_t *ppos)
36240 {
36241- ctl_table fake_table;
36242+ ctl_table_no_const fake_table;
36243 unsigned char buf[64], tmp_uuid[16], *uuid;
36244
36245 uuid = table->data;
36246diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
36247index bf2349db..5456d53 100644
36248--- a/drivers/char/sonypi.c
36249+++ b/drivers/char/sonypi.c
36250@@ -54,6 +54,7 @@
36251
36252 #include <asm/uaccess.h>
36253 #include <asm/io.h>
36254+#include <asm/local.h>
36255
36256 #include <linux/sonypi.h>
36257
36258@@ -490,7 +491,7 @@ static struct sonypi_device {
36259 spinlock_t fifo_lock;
36260 wait_queue_head_t fifo_proc_list;
36261 struct fasync_struct *fifo_async;
36262- int open_count;
36263+ local_t open_count;
36264 int model;
36265 struct input_dev *input_jog_dev;
36266 struct input_dev *input_key_dev;
36267@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
36268 static int sonypi_misc_release(struct inode *inode, struct file *file)
36269 {
36270 mutex_lock(&sonypi_device.lock);
36271- sonypi_device.open_count--;
36272+ local_dec(&sonypi_device.open_count);
36273 mutex_unlock(&sonypi_device.lock);
36274 return 0;
36275 }
36276@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
36277 {
36278 mutex_lock(&sonypi_device.lock);
36279 /* Flush input queue on first open */
36280- if (!sonypi_device.open_count)
36281+ if (!local_read(&sonypi_device.open_count))
36282 kfifo_reset(&sonypi_device.fifo);
36283- sonypi_device.open_count++;
36284+ local_inc(&sonypi_device.open_count);
36285 mutex_unlock(&sonypi_device.lock);
36286
36287 return 0;
36288diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
36289index 64420b3..5c40b56 100644
36290--- a/drivers/char/tpm/tpm_acpi.c
36291+++ b/drivers/char/tpm/tpm_acpi.c
36292@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
36293 virt = acpi_os_map_memory(start, len);
36294 if (!virt) {
36295 kfree(log->bios_event_log);
36296+ log->bios_event_log = NULL;
36297 printk("%s: ERROR - Unable to map memory\n", __func__);
36298 return -EIO;
36299 }
36300
36301- memcpy_fromio(log->bios_event_log, virt, len);
36302+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
36303
36304 acpi_os_unmap_memory(virt, len);
36305 return 0;
36306diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
36307index 84ddc55..1d32f1e 100644
36308--- a/drivers/char/tpm/tpm_eventlog.c
36309+++ b/drivers/char/tpm/tpm_eventlog.c
36310@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
36311 event = addr;
36312
36313 if ((event->event_type == 0 && event->event_size == 0) ||
36314- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
36315+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
36316 return NULL;
36317
36318 return addr;
36319@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
36320 return NULL;
36321
36322 if ((event->event_type == 0 && event->event_size == 0) ||
36323- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
36324+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
36325 return NULL;
36326
36327 (*pos)++;
36328@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
36329 int i;
36330
36331 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
36332- seq_putc(m, data[i]);
36333+ if (!seq_putc(m, data[i]))
36334+ return -EFAULT;
36335
36336 return 0;
36337 }
36338diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
36339index fc45567..fa2a590 100644
36340--- a/drivers/char/virtio_console.c
36341+++ b/drivers/char/virtio_console.c
36342@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
36343 if (to_user) {
36344 ssize_t ret;
36345
36346- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
36347+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
36348 if (ret)
36349 return -EFAULT;
36350 } else {
36351@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
36352 if (!port_has_data(port) && !port->host_connected)
36353 return 0;
36354
36355- return fill_readbuf(port, ubuf, count, true);
36356+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
36357 }
36358
36359 static int wait_port_writable(struct port *port, bool nonblock)
36360diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
36361index a33f46f..a720eed 100644
36362--- a/drivers/clk/clk-composite.c
36363+++ b/drivers/clk/clk-composite.c
36364@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
36365 struct clk *clk;
36366 struct clk_init_data init;
36367 struct clk_composite *composite;
36368- struct clk_ops *clk_composite_ops;
36369+ clk_ops_no_const *clk_composite_ops;
36370
36371 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
36372 if (!composite) {
36373diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
36374index bd11315..7f87098 100644
36375--- a/drivers/clk/socfpga/clk.c
36376+++ b/drivers/clk/socfpga/clk.c
36377@@ -22,6 +22,7 @@
36378 #include <linux/clk-provider.h>
36379 #include <linux/io.h>
36380 #include <linux/of.h>
36381+#include <asm/pgtable.h>
36382
36383 /* Clock Manager offsets */
36384 #define CLKMGR_CTRL 0x0
36385@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
36386 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
36387 strcmp(clk_name, "sdram_pll")) {
36388 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
36389- clk_pll_ops.enable = clk_gate_ops.enable;
36390- clk_pll_ops.disable = clk_gate_ops.disable;
36391+ pax_open_kernel();
36392+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
36393+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
36394+ pax_close_kernel();
36395 }
36396
36397 clk = clk_register(NULL, &socfpga_clk->hw.hw);
36398diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
36399index a2b2541..bc1e7ff 100644
36400--- a/drivers/clocksource/arm_arch_timer.c
36401+++ b/drivers/clocksource/arm_arch_timer.c
36402@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
36403 return NOTIFY_OK;
36404 }
36405
36406-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
36407+static struct notifier_block arch_timer_cpu_nb = {
36408 .notifier_call = arch_timer_cpu_notify,
36409 };
36410
36411diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
36412index 350f493..489479e 100644
36413--- a/drivers/clocksource/bcm_kona_timer.c
36414+++ b/drivers/clocksource/bcm_kona_timer.c
36415@@ -199,7 +199,7 @@ static struct irqaction kona_timer_irq = {
36416 .handler = kona_timer_interrupt,
36417 };
36418
36419-static void __init kona_timer_init(void)
36420+static void __init kona_timer_init(struct device_node *np)
36421 {
36422 kona_timers_init();
36423 kona_timer_clockevents_init();
36424diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
36425index ade7513..069445f 100644
36426--- a/drivers/clocksource/metag_generic.c
36427+++ b/drivers/clocksource/metag_generic.c
36428@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
36429 return NOTIFY_OK;
36430 }
36431
36432-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
36433+static struct notifier_block arch_timer_cpu_nb = {
36434 .notifier_call = arch_timer_cpu_notify,
36435 };
36436
36437diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
36438index edc089e..bc7c0bc 100644
36439--- a/drivers/cpufreq/acpi-cpufreq.c
36440+++ b/drivers/cpufreq/acpi-cpufreq.c
36441@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
36442 return sprintf(buf, "%u\n", boost_enabled);
36443 }
36444
36445-static struct global_attr global_boost = __ATTR(boost, 0644,
36446+static global_attr_no_const global_boost = __ATTR(boost, 0644,
36447 show_global_boost,
36448 store_global_boost);
36449
36450@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
36451 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
36452 per_cpu(acfreq_data, cpu) = data;
36453
36454- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
36455- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
36456+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
36457+ pax_open_kernel();
36458+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
36459+ pax_close_kernel();
36460+ }
36461
36462 result = acpi_processor_register_performance(data->acpi_data, cpu);
36463 if (result)
36464@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
36465 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
36466 break;
36467 case ACPI_ADR_SPACE_FIXED_HARDWARE:
36468- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
36469+ pax_open_kernel();
36470+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
36471+ pax_close_kernel();
36472 policy->cur = get_cur_freq_on_cpu(cpu);
36473 break;
36474 default:
36475@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
36476 acpi_processor_notify_smm(THIS_MODULE);
36477
36478 /* Check for APERF/MPERF support in hardware */
36479- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
36480- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
36481+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
36482+ pax_open_kernel();
36483+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
36484+ pax_close_kernel();
36485+ }
36486
36487 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
36488 for (i = 0; i < perf->state_count; i++)
36489diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
36490index 6485547..477033e 100644
36491--- a/drivers/cpufreq/cpufreq.c
36492+++ b/drivers/cpufreq/cpufreq.c
36493@@ -1854,7 +1854,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
36494 return NOTIFY_OK;
36495 }
36496
36497-static struct notifier_block __refdata cpufreq_cpu_notifier = {
36498+static struct notifier_block cpufreq_cpu_notifier = {
36499 .notifier_call = cpufreq_cpu_callback,
36500 };
36501
36502@@ -1886,8 +1886,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
36503
36504 pr_debug("trying to register driver %s\n", driver_data->name);
36505
36506- if (driver_data->setpolicy)
36507- driver_data->flags |= CPUFREQ_CONST_LOOPS;
36508+ if (driver_data->setpolicy) {
36509+ pax_open_kernel();
36510+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
36511+ pax_close_kernel();
36512+ }
36513
36514 write_lock_irqsave(&cpufreq_driver_lock, flags);
36515 if (cpufreq_driver) {
36516diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
36517index a86ff72..aad2b03 100644
36518--- a/drivers/cpufreq/cpufreq_governor.c
36519+++ b/drivers/cpufreq/cpufreq_governor.c
36520@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
36521 struct dbs_data *dbs_data;
36522 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
36523 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
36524- struct od_ops *od_ops = NULL;
36525+ const struct od_ops *od_ops = NULL;
36526 struct od_dbs_tuners *od_tuners = NULL;
36527 struct cs_dbs_tuners *cs_tuners = NULL;
36528 struct cpu_dbs_common_info *cpu_cdbs;
36529@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
36530
36531 if ((cdata->governor == GOV_CONSERVATIVE) &&
36532 (!policy->governor->initialized)) {
36533- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36534+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36535
36536 cpufreq_register_notifier(cs_ops->notifier_block,
36537 CPUFREQ_TRANSITION_NOTIFIER);
36538@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
36539
36540 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
36541 (policy->governor->initialized == 1)) {
36542- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36543+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36544
36545 cpufreq_unregister_notifier(cs_ops->notifier_block,
36546 CPUFREQ_TRANSITION_NOTIFIER);
36547diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
36548index 0d9e6be..461fd3b 100644
36549--- a/drivers/cpufreq/cpufreq_governor.h
36550+++ b/drivers/cpufreq/cpufreq_governor.h
36551@@ -204,7 +204,7 @@ struct common_dbs_data {
36552 void (*exit)(struct dbs_data *dbs_data);
36553
36554 /* Governor specific ops, see below */
36555- void *gov_ops;
36556+ const void *gov_ops;
36557 };
36558
36559 /* Governer Per policy data */
36560diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
36561index c087347..dad6268 100644
36562--- a/drivers/cpufreq/cpufreq_ondemand.c
36563+++ b/drivers/cpufreq/cpufreq_ondemand.c
36564@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
36565 (struct cpufreq_policy *, unsigned int, unsigned int),
36566 unsigned int powersave_bias)
36567 {
36568- od_ops.powersave_bias_target = f;
36569+ pax_open_kernel();
36570+ *(void **)&od_ops.powersave_bias_target = f;
36571+ pax_close_kernel();
36572 od_set_powersave_bias(powersave_bias);
36573 }
36574 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
36575
36576 void od_unregister_powersave_bias_handler(void)
36577 {
36578- od_ops.powersave_bias_target = generic_powersave_bias_target;
36579+ pax_open_kernel();
36580+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
36581+ pax_close_kernel();
36582 od_set_powersave_bias(0);
36583 }
36584 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
36585diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
36586index bfd6273..e39dd63 100644
36587--- a/drivers/cpufreq/cpufreq_stats.c
36588+++ b/drivers/cpufreq/cpufreq_stats.c
36589@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
36590 }
36591
36592 /* priority=1 so this will get called before cpufreq_remove_dev */
36593-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
36594+static struct notifier_block cpufreq_stat_cpu_notifier = {
36595 .notifier_call = cpufreq_stat_cpu_callback,
36596 .priority = 1,
36597 };
36598diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
36599index 421ef37..e708530c 100644
36600--- a/drivers/cpufreq/p4-clockmod.c
36601+++ b/drivers/cpufreq/p4-clockmod.c
36602@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
36603 case 0x0F: /* Core Duo */
36604 case 0x16: /* Celeron Core */
36605 case 0x1C: /* Atom */
36606- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36607+ pax_open_kernel();
36608+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36609+ pax_close_kernel();
36610 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
36611 case 0x0D: /* Pentium M (Dothan) */
36612- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36613+ pax_open_kernel();
36614+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36615+ pax_close_kernel();
36616 /* fall through */
36617 case 0x09: /* Pentium M (Banias) */
36618 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
36619@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
36620
36621 /* on P-4s, the TSC runs with constant frequency independent whether
36622 * throttling is active or not. */
36623- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36624+ pax_open_kernel();
36625+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36626+ pax_close_kernel();
36627
36628 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
36629 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
36630diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
36631index c71ee14..7c2e183 100644
36632--- a/drivers/cpufreq/sparc-us3-cpufreq.c
36633+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
36634@@ -18,14 +18,12 @@
36635 #include <asm/head.h>
36636 #include <asm/timer.h>
36637
36638-static struct cpufreq_driver *cpufreq_us3_driver;
36639-
36640 struct us3_freq_percpu_info {
36641 struct cpufreq_frequency_table table[4];
36642 };
36643
36644 /* Indexed by cpu number. */
36645-static struct us3_freq_percpu_info *us3_freq_table;
36646+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
36647
36648 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
36649 * in the Safari config register.
36650@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
36651
36652 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
36653 {
36654- if (cpufreq_us3_driver)
36655- us3_set_cpu_divider_index(policy, 0);
36656+ us3_set_cpu_divider_index(policy->cpu, 0);
36657
36658 return 0;
36659 }
36660
36661+static int __init us3_freq_init(void);
36662+static void __exit us3_freq_exit(void);
36663+
36664+static struct cpufreq_driver cpufreq_us3_driver = {
36665+ .init = us3_freq_cpu_init,
36666+ .verify = us3_freq_verify,
36667+ .target = us3_freq_target,
36668+ .get = us3_freq_get,
36669+ .exit = us3_freq_cpu_exit,
36670+ .owner = THIS_MODULE,
36671+ .name = "UltraSPARC-III",
36672+
36673+};
36674+
36675 static int __init us3_freq_init(void)
36676 {
36677 unsigned long manuf, impl, ver;
36678@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
36679 (impl == CHEETAH_IMPL ||
36680 impl == CHEETAH_PLUS_IMPL ||
36681 impl == JAGUAR_IMPL ||
36682- impl == PANTHER_IMPL)) {
36683- struct cpufreq_driver *driver;
36684-
36685- ret = -ENOMEM;
36686- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
36687- if (!driver)
36688- goto err_out;
36689-
36690- us3_freq_table = kzalloc(
36691- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
36692- GFP_KERNEL);
36693- if (!us3_freq_table)
36694- goto err_out;
36695-
36696- driver->init = us3_freq_cpu_init;
36697- driver->verify = us3_freq_verify;
36698- driver->target = us3_freq_target;
36699- driver->get = us3_freq_get;
36700- driver->exit = us3_freq_cpu_exit;
36701- driver->owner = THIS_MODULE,
36702- strcpy(driver->name, "UltraSPARC-III");
36703-
36704- cpufreq_us3_driver = driver;
36705- ret = cpufreq_register_driver(driver);
36706- if (ret)
36707- goto err_out;
36708-
36709- return 0;
36710-
36711-err_out:
36712- if (driver) {
36713- kfree(driver);
36714- cpufreq_us3_driver = NULL;
36715- }
36716- kfree(us3_freq_table);
36717- us3_freq_table = NULL;
36718- return ret;
36719- }
36720+ impl == PANTHER_IMPL))
36721+ return cpufreq_register_driver(&cpufreq_us3_driver);
36722
36723 return -ENODEV;
36724 }
36725
36726 static void __exit us3_freq_exit(void)
36727 {
36728- if (cpufreq_us3_driver) {
36729- cpufreq_unregister_driver(cpufreq_us3_driver);
36730- kfree(cpufreq_us3_driver);
36731- cpufreq_us3_driver = NULL;
36732- kfree(us3_freq_table);
36733- us3_freq_table = NULL;
36734- }
36735+ cpufreq_unregister_driver(&cpufreq_us3_driver);
36736 }
36737
36738 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
36739diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
36740index 618e6f4..e89d915 100644
36741--- a/drivers/cpufreq/speedstep-centrino.c
36742+++ b/drivers/cpufreq/speedstep-centrino.c
36743@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
36744 !cpu_has(cpu, X86_FEATURE_EST))
36745 return -ENODEV;
36746
36747- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
36748- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
36749+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
36750+ pax_open_kernel();
36751+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
36752+ pax_close_kernel();
36753+ }
36754
36755 if (policy->cpu != 0)
36756 return -ENODEV;
36757diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
36758index c3a93fe..e808f24 100644
36759--- a/drivers/cpuidle/cpuidle.c
36760+++ b/drivers/cpuidle/cpuidle.c
36761@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
36762
36763 static void poll_idle_init(struct cpuidle_driver *drv)
36764 {
36765- struct cpuidle_state *state = &drv->states[0];
36766+ cpuidle_state_no_const *state = &drv->states[0];
36767
36768 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
36769 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
36770diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
36771index ea2f8e7..70ac501 100644
36772--- a/drivers/cpuidle/governor.c
36773+++ b/drivers/cpuidle/governor.c
36774@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
36775 mutex_lock(&cpuidle_lock);
36776 if (__cpuidle_find_governor(gov->name) == NULL) {
36777 ret = 0;
36778- list_add_tail(&gov->governor_list, &cpuidle_governors);
36779+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
36780 if (!cpuidle_curr_governor ||
36781 cpuidle_curr_governor->rating < gov->rating)
36782 cpuidle_switch_governor(gov);
36783@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
36784 new_gov = cpuidle_replace_governor(gov->rating);
36785 cpuidle_switch_governor(new_gov);
36786 }
36787- list_del(&gov->governor_list);
36788+ pax_list_del((struct list_head *)&gov->governor_list);
36789 mutex_unlock(&cpuidle_lock);
36790 }
36791
36792diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
36793index 428754a..8bdf9cc 100644
36794--- a/drivers/cpuidle/sysfs.c
36795+++ b/drivers/cpuidle/sysfs.c
36796@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
36797 NULL
36798 };
36799
36800-static struct attribute_group cpuidle_attr_group = {
36801+static attribute_group_no_const cpuidle_attr_group = {
36802 .attrs = cpuidle_default_attrs,
36803 .name = "cpuidle",
36804 };
36805diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
36806index 3b36797..db0b0c0 100644
36807--- a/drivers/devfreq/devfreq.c
36808+++ b/drivers/devfreq/devfreq.c
36809@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
36810 GFP_KERNEL);
36811 devfreq->last_stat_updated = jiffies;
36812
36813- dev_set_name(&devfreq->dev, dev_name(dev));
36814+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
36815 err = device_register(&devfreq->dev);
36816 if (err) {
36817 put_device(&devfreq->dev);
36818@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
36819 goto err_out;
36820 }
36821
36822- list_add(&governor->node, &devfreq_governor_list);
36823+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
36824
36825 list_for_each_entry(devfreq, &devfreq_list, node) {
36826 int ret = 0;
36827@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
36828 }
36829 }
36830
36831- list_del(&governor->node);
36832+ pax_list_del((struct list_head *)&governor->node);
36833 err_out:
36834 mutex_unlock(&devfreq_list_lock);
36835
36836diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
36837index b70709b..1d8d02a 100644
36838--- a/drivers/dma/sh/shdma.c
36839+++ b/drivers/dma/sh/shdma.c
36840@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
36841 return ret;
36842 }
36843
36844-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
36845+static struct notifier_block sh_dmae_nmi_notifier = {
36846 .notifier_call = sh_dmae_nmi_handler,
36847
36848 /* Run before NMI debug handler and KGDB */
36849diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
36850index c4d700a..0b57abd 100644
36851--- a/drivers/edac/edac_mc_sysfs.c
36852+++ b/drivers/edac/edac_mc_sysfs.c
36853@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
36854 struct dev_ch_attribute {
36855 struct device_attribute attr;
36856 int channel;
36857-};
36858+} __do_const;
36859
36860 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
36861 struct dev_ch_attribute dev_attr_legacy_##_name = \
36862@@ -1005,14 +1005,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
36863 }
36864
36865 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
36866+ pax_open_kernel();
36867 if (mci->get_sdram_scrub_rate) {
36868- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
36869- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
36870+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
36871+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
36872 }
36873 if (mci->set_sdram_scrub_rate) {
36874- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
36875- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
36876+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
36877+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
36878 }
36879+ pax_close_kernel();
36880 err = device_create_file(&mci->dev,
36881 &dev_attr_sdram_scrub_rate);
36882 if (err) {
36883diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
36884index e8658e4..22746d6 100644
36885--- a/drivers/edac/edac_pci_sysfs.c
36886+++ b/drivers/edac/edac_pci_sysfs.c
36887@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
36888 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
36889 static int edac_pci_poll_msec = 1000; /* one second workq period */
36890
36891-static atomic_t pci_parity_count = ATOMIC_INIT(0);
36892-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
36893+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
36894+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
36895
36896 static struct kobject *edac_pci_top_main_kobj;
36897 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
36898@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
36899 void *value;
36900 ssize_t(*show) (void *, char *);
36901 ssize_t(*store) (void *, const char *, size_t);
36902-};
36903+} __do_const;
36904
36905 /* Set of show/store abstract level functions for PCI Parity object */
36906 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
36907@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36908 edac_printk(KERN_CRIT, EDAC_PCI,
36909 "Signaled System Error on %s\n",
36910 pci_name(dev));
36911- atomic_inc(&pci_nonparity_count);
36912+ atomic_inc_unchecked(&pci_nonparity_count);
36913 }
36914
36915 if (status & (PCI_STATUS_PARITY)) {
36916@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36917 "Master Data Parity Error on %s\n",
36918 pci_name(dev));
36919
36920- atomic_inc(&pci_parity_count);
36921+ atomic_inc_unchecked(&pci_parity_count);
36922 }
36923
36924 if (status & (PCI_STATUS_DETECTED_PARITY)) {
36925@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36926 "Detected Parity Error on %s\n",
36927 pci_name(dev));
36928
36929- atomic_inc(&pci_parity_count);
36930+ atomic_inc_unchecked(&pci_parity_count);
36931 }
36932 }
36933
36934@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36935 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
36936 "Signaled System Error on %s\n",
36937 pci_name(dev));
36938- atomic_inc(&pci_nonparity_count);
36939+ atomic_inc_unchecked(&pci_nonparity_count);
36940 }
36941
36942 if (status & (PCI_STATUS_PARITY)) {
36943@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36944 "Master Data Parity Error on "
36945 "%s\n", pci_name(dev));
36946
36947- atomic_inc(&pci_parity_count);
36948+ atomic_inc_unchecked(&pci_parity_count);
36949 }
36950
36951 if (status & (PCI_STATUS_DETECTED_PARITY)) {
36952@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36953 "Detected Parity Error on %s\n",
36954 pci_name(dev));
36955
36956- atomic_inc(&pci_parity_count);
36957+ atomic_inc_unchecked(&pci_parity_count);
36958 }
36959 }
36960 }
36961@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
36962 if (!check_pci_errors)
36963 return;
36964
36965- before_count = atomic_read(&pci_parity_count);
36966+ before_count = atomic_read_unchecked(&pci_parity_count);
36967
36968 /* scan all PCI devices looking for a Parity Error on devices and
36969 * bridges.
36970@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
36971 /* Only if operator has selected panic on PCI Error */
36972 if (edac_pci_get_panic_on_pe()) {
36973 /* If the count is different 'after' from 'before' */
36974- if (before_count != atomic_read(&pci_parity_count))
36975+ if (before_count != atomic_read_unchecked(&pci_parity_count))
36976 panic("EDAC: PCI Parity Error");
36977 }
36978 }
36979diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
36980index 51b7e3a..aa8a3e8 100644
36981--- a/drivers/edac/mce_amd.h
36982+++ b/drivers/edac/mce_amd.h
36983@@ -77,7 +77,7 @@ struct amd_decoder_ops {
36984 bool (*mc0_mce)(u16, u8);
36985 bool (*mc1_mce)(u16, u8);
36986 bool (*mc2_mce)(u16, u8);
36987-};
36988+} __no_const;
36989
36990 void amd_report_gart_errors(bool);
36991 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
36992diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
36993index 57ea7f4..789e3c3 100644
36994--- a/drivers/firewire/core-card.c
36995+++ b/drivers/firewire/core-card.c
36996@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
36997
36998 void fw_core_remove_card(struct fw_card *card)
36999 {
37000- struct fw_card_driver dummy_driver = dummy_driver_template;
37001+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
37002
37003 card->driver->update_phy_reg(card, 4,
37004 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
37005diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
37006index 664a6ff..af13580 100644
37007--- a/drivers/firewire/core-device.c
37008+++ b/drivers/firewire/core-device.c
37009@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
37010 struct config_rom_attribute {
37011 struct device_attribute attr;
37012 u32 key;
37013-};
37014+} __do_const;
37015
37016 static ssize_t show_immediate(struct device *dev,
37017 struct device_attribute *dattr, char *buf)
37018diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
37019index 28a94c7..58da63a 100644
37020--- a/drivers/firewire/core-transaction.c
37021+++ b/drivers/firewire/core-transaction.c
37022@@ -38,6 +38,7 @@
37023 #include <linux/timer.h>
37024 #include <linux/types.h>
37025 #include <linux/workqueue.h>
37026+#include <linux/sched.h>
37027
37028 #include <asm/byteorder.h>
37029
37030diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
37031index 515a42c..5ecf3ba 100644
37032--- a/drivers/firewire/core.h
37033+++ b/drivers/firewire/core.h
37034@@ -111,6 +111,7 @@ struct fw_card_driver {
37035
37036 int (*stop_iso)(struct fw_iso_context *ctx);
37037 };
37038+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
37039
37040 void fw_card_initialize(struct fw_card *card,
37041 const struct fw_card_driver *driver, struct device *device);
37042diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
37043index 94a58a0..f5eba42 100644
37044--- a/drivers/firmware/dmi-id.c
37045+++ b/drivers/firmware/dmi-id.c
37046@@ -16,7 +16,7 @@
37047 struct dmi_device_attribute{
37048 struct device_attribute dev_attr;
37049 int field;
37050-};
37051+} __do_const;
37052 #define to_dmi_dev_attr(_dev_attr) \
37053 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
37054
37055diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
37056index b95159b..841ae55 100644
37057--- a/drivers/firmware/dmi_scan.c
37058+++ b/drivers/firmware/dmi_scan.c
37059@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
37060 }
37061 }
37062 else {
37063- /*
37064- * no iounmap() for that ioremap(); it would be a no-op, but
37065- * it's so early in setup that sucker gets confused into doing
37066- * what it shouldn't if we actually call it.
37067- */
37068 p = dmi_ioremap(0xF0000, 0x10000);
37069 if (p == NULL)
37070 goto error;
37071@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
37072 if (buf == NULL)
37073 return -1;
37074
37075- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
37076+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
37077
37078 iounmap(buf);
37079 return 0;
37080diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
37081index 5145fa3..0d3babd 100644
37082--- a/drivers/firmware/efi/efi.c
37083+++ b/drivers/firmware/efi/efi.c
37084@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
37085 };
37086
37087 static struct efivars generic_efivars;
37088-static struct efivar_operations generic_ops;
37089+static efivar_operations_no_const generic_ops __read_only;
37090
37091 static int generic_ops_register(void)
37092 {
37093- generic_ops.get_variable = efi.get_variable;
37094- generic_ops.set_variable = efi.set_variable;
37095- generic_ops.get_next_variable = efi.get_next_variable;
37096- generic_ops.query_variable_store = efi_query_variable_store;
37097+ pax_open_kernel();
37098+ *(void **)&generic_ops.get_variable = efi.get_variable;
37099+ *(void **)&generic_ops.set_variable = efi.set_variable;
37100+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
37101+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
37102+ pax_close_kernel();
37103
37104 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
37105 }
37106diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
37107index 8bd1bb6..c48b0c6 100644
37108--- a/drivers/firmware/efi/efivars.c
37109+++ b/drivers/firmware/efi/efivars.c
37110@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
37111 static int
37112 create_efivars_bin_attributes(void)
37113 {
37114- struct bin_attribute *attr;
37115+ bin_attribute_no_const *attr;
37116 int error;
37117
37118 /* new_var */
37119diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
37120index 2a90ba6..07f3733 100644
37121--- a/drivers/firmware/google/memconsole.c
37122+++ b/drivers/firmware/google/memconsole.c
37123@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
37124 if (!found_memconsole())
37125 return -ENODEV;
37126
37127- memconsole_bin_attr.size = memconsole_length;
37128+ pax_open_kernel();
37129+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
37130+ pax_close_kernel();
37131
37132 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
37133
37134diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
37135index e16d932..f0206ef 100644
37136--- a/drivers/gpio/gpio-ich.c
37137+++ b/drivers/gpio/gpio-ich.c
37138@@ -69,7 +69,7 @@ struct ichx_desc {
37139 /* Some chipsets have quirks, let these use their own request/get */
37140 int (*request)(struct gpio_chip *chip, unsigned offset);
37141 int (*get)(struct gpio_chip *chip, unsigned offset);
37142-};
37143+} __do_const;
37144
37145 static struct {
37146 spinlock_t lock;
37147diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
37148index 9902732..64b62dd 100644
37149--- a/drivers/gpio/gpio-vr41xx.c
37150+++ b/drivers/gpio/gpio-vr41xx.c
37151@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
37152 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
37153 maskl, pendl, maskh, pendh);
37154
37155- atomic_inc(&irq_err_count);
37156+ atomic_inc_unchecked(&irq_err_count);
37157
37158 return -EINVAL;
37159 }
37160diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
37161index ed1334e..ee0dd42 100644
37162--- a/drivers/gpu/drm/drm_crtc_helper.c
37163+++ b/drivers/gpu/drm/drm_crtc_helper.c
37164@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
37165 struct drm_crtc *tmp;
37166 int crtc_mask = 1;
37167
37168- WARN(!crtc, "checking null crtc?\n");
37169+ BUG_ON(!crtc);
37170
37171 dev = crtc->dev;
37172
37173diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
37174index 9cc247f..36aa285 100644
37175--- a/drivers/gpu/drm/drm_drv.c
37176+++ b/drivers/gpu/drm/drm_drv.c
37177@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
37178 /**
37179 * Copy and IOCTL return string to user space
37180 */
37181-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
37182+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
37183 {
37184 int len;
37185
37186@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
37187 struct drm_file *file_priv = filp->private_data;
37188 struct drm_device *dev;
37189 const struct drm_ioctl_desc *ioctl = NULL;
37190- drm_ioctl_t *func;
37191+ drm_ioctl_no_const_t func;
37192 unsigned int nr = DRM_IOCTL_NR(cmd);
37193 int retcode = -EINVAL;
37194 char stack_kdata[128];
37195@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
37196 return -ENODEV;
37197
37198 atomic_inc(&dev->ioctl_count);
37199- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
37200+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
37201 ++file_priv->ioctl_count;
37202
37203 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
37204diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
37205index 429e07d..e681a2c 100644
37206--- a/drivers/gpu/drm/drm_fops.c
37207+++ b/drivers/gpu/drm/drm_fops.c
37208@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
37209 }
37210
37211 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
37212- atomic_set(&dev->counts[i], 0);
37213+ atomic_set_unchecked(&dev->counts[i], 0);
37214
37215 dev->sigdata.lock = NULL;
37216
37217@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
37218 if (drm_device_is_unplugged(dev))
37219 return -ENODEV;
37220
37221- if (!dev->open_count++)
37222+ if (local_inc_return(&dev->open_count) == 1)
37223 need_setup = 1;
37224 mutex_lock(&dev->struct_mutex);
37225 old_imapping = inode->i_mapping;
37226@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
37227 retcode = drm_open_helper(inode, filp, dev);
37228 if (retcode)
37229 goto err_undo;
37230- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
37231+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
37232 if (need_setup) {
37233 retcode = drm_setup(dev);
37234 if (retcode)
37235@@ -166,7 +166,7 @@ err_undo:
37236 iput(container_of(dev->dev_mapping, struct inode, i_data));
37237 dev->dev_mapping = old_mapping;
37238 mutex_unlock(&dev->struct_mutex);
37239- dev->open_count--;
37240+ local_dec(&dev->open_count);
37241 return retcode;
37242 }
37243 EXPORT_SYMBOL(drm_open);
37244@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
37245
37246 mutex_lock(&drm_global_mutex);
37247
37248- DRM_DEBUG("open_count = %d\n", dev->open_count);
37249+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
37250
37251 if (dev->driver->preclose)
37252 dev->driver->preclose(dev, file_priv);
37253@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
37254 * Begin inline drm_release
37255 */
37256
37257- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
37258+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
37259 task_pid_nr(current),
37260 (long)old_encode_dev(file_priv->minor->device),
37261- dev->open_count);
37262+ local_read(&dev->open_count));
37263
37264 /* Release any auth tokens that might point to this file_priv,
37265 (do that under the drm_global_mutex) */
37266@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
37267 * End inline drm_release
37268 */
37269
37270- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
37271- if (!--dev->open_count) {
37272+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
37273+ if (local_dec_and_test(&dev->open_count)) {
37274 if (atomic_read(&dev->ioctl_count)) {
37275 DRM_ERROR("Device busy: %d\n",
37276 atomic_read(&dev->ioctl_count));
37277diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
37278index f731116..629842c 100644
37279--- a/drivers/gpu/drm/drm_global.c
37280+++ b/drivers/gpu/drm/drm_global.c
37281@@ -36,7 +36,7 @@
37282 struct drm_global_item {
37283 struct mutex mutex;
37284 void *object;
37285- int refcount;
37286+ atomic_t refcount;
37287 };
37288
37289 static struct drm_global_item glob[DRM_GLOBAL_NUM];
37290@@ -49,7 +49,7 @@ void drm_global_init(void)
37291 struct drm_global_item *item = &glob[i];
37292 mutex_init(&item->mutex);
37293 item->object = NULL;
37294- item->refcount = 0;
37295+ atomic_set(&item->refcount, 0);
37296 }
37297 }
37298
37299@@ -59,7 +59,7 @@ void drm_global_release(void)
37300 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
37301 struct drm_global_item *item = &glob[i];
37302 BUG_ON(item->object != NULL);
37303- BUG_ON(item->refcount != 0);
37304+ BUG_ON(atomic_read(&item->refcount) != 0);
37305 }
37306 }
37307
37308@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
37309 void *object;
37310
37311 mutex_lock(&item->mutex);
37312- if (item->refcount == 0) {
37313+ if (atomic_read(&item->refcount) == 0) {
37314 item->object = kzalloc(ref->size, GFP_KERNEL);
37315 if (unlikely(item->object == NULL)) {
37316 ret = -ENOMEM;
37317@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
37318 goto out_err;
37319
37320 }
37321- ++item->refcount;
37322+ atomic_inc(&item->refcount);
37323 ref->object = item->object;
37324 object = item->object;
37325 mutex_unlock(&item->mutex);
37326@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
37327 struct drm_global_item *item = &glob[ref->global_type];
37328
37329 mutex_lock(&item->mutex);
37330- BUG_ON(item->refcount == 0);
37331+ BUG_ON(atomic_read(&item->refcount) == 0);
37332 BUG_ON(ref->object != item->object);
37333- if (--item->refcount == 0) {
37334+ if (atomic_dec_and_test(&item->refcount)) {
37335 ref->release(ref);
37336 item->object = NULL;
37337 }
37338diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
37339index d4b20ce..77a8d41 100644
37340--- a/drivers/gpu/drm/drm_info.c
37341+++ b/drivers/gpu/drm/drm_info.c
37342@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
37343 struct drm_local_map *map;
37344 struct drm_map_list *r_list;
37345
37346- /* Hardcoded from _DRM_FRAME_BUFFER,
37347- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
37348- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
37349- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
37350+ static const char * const types[] = {
37351+ [_DRM_FRAME_BUFFER] = "FB",
37352+ [_DRM_REGISTERS] = "REG",
37353+ [_DRM_SHM] = "SHM",
37354+ [_DRM_AGP] = "AGP",
37355+ [_DRM_SCATTER_GATHER] = "SG",
37356+ [_DRM_CONSISTENT] = "PCI",
37357+ [_DRM_GEM] = "GEM" };
37358 const char *type;
37359 int i;
37360
37361@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
37362 map = r_list->map;
37363 if (!map)
37364 continue;
37365- if (map->type < 0 || map->type > 5)
37366+ if (map->type >= ARRAY_SIZE(types))
37367 type = "??";
37368 else
37369 type = types[map->type];
37370@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
37371 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
37372 vma->vm_flags & VM_LOCKED ? 'l' : '-',
37373 vma->vm_flags & VM_IO ? 'i' : '-',
37374+#ifdef CONFIG_GRKERNSEC_HIDESYM
37375+ 0);
37376+#else
37377 vma->vm_pgoff);
37378+#endif
37379
37380 #if defined(__i386__)
37381 pgprot = pgprot_val(vma->vm_page_prot);
37382diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
37383index 2f4c434..dd12cd2 100644
37384--- a/drivers/gpu/drm/drm_ioc32.c
37385+++ b/drivers/gpu/drm/drm_ioc32.c
37386@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
37387 request = compat_alloc_user_space(nbytes);
37388 if (!access_ok(VERIFY_WRITE, request, nbytes))
37389 return -EFAULT;
37390- list = (struct drm_buf_desc *) (request + 1);
37391+ list = (struct drm_buf_desc __user *) (request + 1);
37392
37393 if (__put_user(count, &request->count)
37394 || __put_user(list, &request->list))
37395@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
37396 request = compat_alloc_user_space(nbytes);
37397 if (!access_ok(VERIFY_WRITE, request, nbytes))
37398 return -EFAULT;
37399- list = (struct drm_buf_pub *) (request + 1);
37400+ list = (struct drm_buf_pub __user *) (request + 1);
37401
37402 if (__put_user(count, &request->count)
37403 || __put_user(list, &request->list))
37404@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
37405 return 0;
37406 }
37407
37408-drm_ioctl_compat_t *drm_compat_ioctls[] = {
37409+drm_ioctl_compat_t drm_compat_ioctls[] = {
37410 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
37411 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
37412 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
37413@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
37414 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37415 {
37416 unsigned int nr = DRM_IOCTL_NR(cmd);
37417- drm_ioctl_compat_t *fn;
37418 int ret;
37419
37420 /* Assume that ioctls without an explicit compat routine will just
37421@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37422 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
37423 return drm_ioctl(filp, cmd, arg);
37424
37425- fn = drm_compat_ioctls[nr];
37426-
37427- if (fn != NULL)
37428- ret = (*fn) (filp, cmd, arg);
37429+ if (drm_compat_ioctls[nr] != NULL)
37430+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
37431 else
37432 ret = drm_ioctl(filp, cmd, arg);
37433
37434diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
37435index e77bd8b..1571b85 100644
37436--- a/drivers/gpu/drm/drm_ioctl.c
37437+++ b/drivers/gpu/drm/drm_ioctl.c
37438@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
37439 stats->data[i].value =
37440 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
37441 else
37442- stats->data[i].value = atomic_read(&dev->counts[i]);
37443+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
37444 stats->data[i].type = dev->types[i];
37445 }
37446
37447diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
37448index d752c96..fe08455 100644
37449--- a/drivers/gpu/drm/drm_lock.c
37450+++ b/drivers/gpu/drm/drm_lock.c
37451@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
37452 if (drm_lock_take(&master->lock, lock->context)) {
37453 master->lock.file_priv = file_priv;
37454 master->lock.lock_time = jiffies;
37455- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
37456+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
37457 break; /* Got lock */
37458 }
37459
37460@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
37461 return -EINVAL;
37462 }
37463
37464- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
37465+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
37466
37467 if (drm_lock_free(&master->lock, lock->context)) {
37468 /* FIXME: Should really bail out here. */
37469diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
37470index 16f3ec5..b28f9ca 100644
37471--- a/drivers/gpu/drm/drm_stub.c
37472+++ b/drivers/gpu/drm/drm_stub.c
37473@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
37474
37475 drm_device_set_unplugged(dev);
37476
37477- if (dev->open_count == 0) {
37478+ if (local_read(&dev->open_count) == 0) {
37479 drm_put_dev(dev);
37480 }
37481 mutex_unlock(&drm_global_mutex);
37482diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
37483index 0229665..f61329c 100644
37484--- a/drivers/gpu/drm/drm_sysfs.c
37485+++ b/drivers/gpu/drm/drm_sysfs.c
37486@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
37487 int drm_sysfs_device_add(struct drm_minor *minor)
37488 {
37489 int err;
37490- char *minor_str;
37491+ const char *minor_str;
37492
37493 minor->kdev.parent = minor->dev->dev;
37494
37495diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
37496index 004ecdf..db1f6e0 100644
37497--- a/drivers/gpu/drm/i810/i810_dma.c
37498+++ b/drivers/gpu/drm/i810/i810_dma.c
37499@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
37500 dma->buflist[vertex->idx],
37501 vertex->discard, vertex->used);
37502
37503- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
37504- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
37505+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
37506+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
37507 sarea_priv->last_enqueue = dev_priv->counter - 1;
37508 sarea_priv->last_dispatch = (int)hw_status[5];
37509
37510@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
37511 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
37512 mc->last_render);
37513
37514- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
37515- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
37516+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
37517+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
37518 sarea_priv->last_enqueue = dev_priv->counter - 1;
37519 sarea_priv->last_dispatch = (int)hw_status[5];
37520
37521diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
37522index 6e0acad..93c8289 100644
37523--- a/drivers/gpu/drm/i810/i810_drv.h
37524+++ b/drivers/gpu/drm/i810/i810_drv.h
37525@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
37526 int page_flipping;
37527
37528 wait_queue_head_t irq_queue;
37529- atomic_t irq_received;
37530- atomic_t irq_emitted;
37531+ atomic_unchecked_t irq_received;
37532+ atomic_unchecked_t irq_emitted;
37533
37534 int front_offset;
37535 } drm_i810_private_t;
37536diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
37537index e913d32..4d9b351 100644
37538--- a/drivers/gpu/drm/i915/i915_debugfs.c
37539+++ b/drivers/gpu/drm/i915/i915_debugfs.c
37540@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
37541 I915_READ(GTIMR));
37542 }
37543 seq_printf(m, "Interrupts received: %d\n",
37544- atomic_read(&dev_priv->irq_received));
37545+ atomic_read_unchecked(&dev_priv->irq_received));
37546 for_each_ring(ring, dev_priv, i) {
37547 if (IS_GEN6(dev) || IS_GEN7(dev)) {
37548 seq_printf(m,
37549diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
37550index 17d9b0b..860e6d9 100644
37551--- a/drivers/gpu/drm/i915/i915_dma.c
37552+++ b/drivers/gpu/drm/i915/i915_dma.c
37553@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
37554 bool can_switch;
37555
37556 spin_lock(&dev->count_lock);
37557- can_switch = (dev->open_count == 0);
37558+ can_switch = (local_read(&dev->open_count) == 0);
37559 spin_unlock(&dev->count_lock);
37560 return can_switch;
37561 }
37562diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
37563index 47d8b68..52f5d8d 100644
37564--- a/drivers/gpu/drm/i915/i915_drv.h
37565+++ b/drivers/gpu/drm/i915/i915_drv.h
37566@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
37567 drm_dma_handle_t *status_page_dmah;
37568 struct resource mch_res;
37569
37570- atomic_t irq_received;
37571+ atomic_unchecked_t irq_received;
37572
37573 /* protects the irq masks */
37574 spinlock_t irq_lock;
37575@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
37576 struct drm_i915_private *dev_priv, unsigned port);
37577 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
37578 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
37579-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
37580+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
37581 {
37582 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
37583 }
37584diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
37585index 117ce38..eefd237 100644
37586--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
37587+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
37588@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
37589
37590 static int
37591 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
37592- int count)
37593+ unsigned int count)
37594 {
37595- int i;
37596+ unsigned int i;
37597 int relocs_total = 0;
37598 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
37599
37600diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
37601index 3c59584..500f2e9 100644
37602--- a/drivers/gpu/drm/i915/i915_ioc32.c
37603+++ b/drivers/gpu/drm/i915/i915_ioc32.c
37604@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
37605 (unsigned long)request);
37606 }
37607
37608-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
37609+static drm_ioctl_compat_t i915_compat_ioctls[] = {
37610 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
37611 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
37612 [DRM_I915_GETPARAM] = compat_i915_getparam,
37613@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
37614 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37615 {
37616 unsigned int nr = DRM_IOCTL_NR(cmd);
37617- drm_ioctl_compat_t *fn = NULL;
37618 int ret;
37619
37620 if (nr < DRM_COMMAND_BASE)
37621 return drm_compat_ioctl(filp, cmd, arg);
37622
37623- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
37624- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
37625-
37626- if (fn != NULL)
37627+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
37628+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
37629 ret = (*fn) (filp, cmd, arg);
37630- else
37631+ } else
37632 ret = drm_ioctl(filp, cmd, arg);
37633
37634 return ret;
37635diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
37636index e5e32869..1678f36 100644
37637--- a/drivers/gpu/drm/i915/i915_irq.c
37638+++ b/drivers/gpu/drm/i915/i915_irq.c
37639@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
37640 int pipe;
37641 u32 pipe_stats[I915_MAX_PIPES];
37642
37643- atomic_inc(&dev_priv->irq_received);
37644+ atomic_inc_unchecked(&dev_priv->irq_received);
37645
37646 while (true) {
37647 iir = I915_READ(VLV_IIR);
37648@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
37649 irqreturn_t ret = IRQ_NONE;
37650 int i;
37651
37652- atomic_inc(&dev_priv->irq_received);
37653+ atomic_inc_unchecked(&dev_priv->irq_received);
37654
37655 /* disable master interrupt before clearing iir */
37656 de_ier = I915_READ(DEIER);
37657@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
37658 int ret = IRQ_NONE;
37659 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
37660
37661- atomic_inc(&dev_priv->irq_received);
37662+ atomic_inc_unchecked(&dev_priv->irq_received);
37663
37664 /* disable master interrupt before clearing iir */
37665 de_ier = I915_READ(DEIER);
37666@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
37667 {
37668 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37669
37670- atomic_set(&dev_priv->irq_received, 0);
37671+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37672
37673 I915_WRITE(HWSTAM, 0xeffe);
37674
37675@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
37676 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37677 int pipe;
37678
37679- atomic_set(&dev_priv->irq_received, 0);
37680+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37681
37682 /* VLV magic */
37683 I915_WRITE(VLV_IMR, 0);
37684@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
37685 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37686 int pipe;
37687
37688- atomic_set(&dev_priv->irq_received, 0);
37689+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37690
37691 for_each_pipe(pipe)
37692 I915_WRITE(PIPESTAT(pipe), 0);
37693@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
37694 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37695 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
37696
37697- atomic_inc(&dev_priv->irq_received);
37698+ atomic_inc_unchecked(&dev_priv->irq_received);
37699
37700 iir = I915_READ16(IIR);
37701 if (iir == 0)
37702@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
37703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37704 int pipe;
37705
37706- atomic_set(&dev_priv->irq_received, 0);
37707+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37708
37709 if (I915_HAS_HOTPLUG(dev)) {
37710 I915_WRITE(PORT_HOTPLUG_EN, 0);
37711@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
37712 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
37713 int pipe, ret = IRQ_NONE;
37714
37715- atomic_inc(&dev_priv->irq_received);
37716+ atomic_inc_unchecked(&dev_priv->irq_received);
37717
37718 iir = I915_READ(IIR);
37719 do {
37720@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
37721 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37722 int pipe;
37723
37724- atomic_set(&dev_priv->irq_received, 0);
37725+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37726
37727 I915_WRITE(PORT_HOTPLUG_EN, 0);
37728 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
37729@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
37730 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37731 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
37732
37733- atomic_inc(&dev_priv->irq_received);
37734+ atomic_inc_unchecked(&dev_priv->irq_received);
37735
37736 iir = I915_READ(IIR);
37737
37738diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
37739index eea5982..eeef407 100644
37740--- a/drivers/gpu/drm/i915/intel_display.c
37741+++ b/drivers/gpu/drm/i915/intel_display.c
37742@@ -8935,13 +8935,13 @@ struct intel_quirk {
37743 int subsystem_vendor;
37744 int subsystem_device;
37745 void (*hook)(struct drm_device *dev);
37746-};
37747+} __do_const;
37748
37749 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
37750 struct intel_dmi_quirk {
37751 void (*hook)(struct drm_device *dev);
37752 const struct dmi_system_id (*dmi_id_list)[];
37753-};
37754+} __do_const;
37755
37756 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
37757 {
37758@@ -8949,18 +8949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
37759 return 1;
37760 }
37761
37762-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
37763+static const struct dmi_system_id intel_dmi_quirks_table[] = {
37764 {
37765- .dmi_id_list = &(const struct dmi_system_id[]) {
37766- {
37767- .callback = intel_dmi_reverse_brightness,
37768- .ident = "NCR Corporation",
37769- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
37770- DMI_MATCH(DMI_PRODUCT_NAME, ""),
37771- },
37772- },
37773- { } /* terminating entry */
37774+ .callback = intel_dmi_reverse_brightness,
37775+ .ident = "NCR Corporation",
37776+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
37777+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
37778 },
37779+ },
37780+ { } /* terminating entry */
37781+};
37782+
37783+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
37784+ {
37785+ .dmi_id_list = &intel_dmi_quirks_table,
37786 .hook = quirk_invert_brightness,
37787 },
37788 };
37789diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
37790index 54558a0..2d97005 100644
37791--- a/drivers/gpu/drm/mga/mga_drv.h
37792+++ b/drivers/gpu/drm/mga/mga_drv.h
37793@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
37794 u32 clear_cmd;
37795 u32 maccess;
37796
37797- atomic_t vbl_received; /**< Number of vblanks received. */
37798+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
37799 wait_queue_head_t fence_queue;
37800- atomic_t last_fence_retired;
37801+ atomic_unchecked_t last_fence_retired;
37802 u32 next_fence_to_post;
37803
37804 unsigned int fb_cpp;
37805diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
37806index 709e90d..89a1c0d 100644
37807--- a/drivers/gpu/drm/mga/mga_ioc32.c
37808+++ b/drivers/gpu/drm/mga/mga_ioc32.c
37809@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
37810 return 0;
37811 }
37812
37813-drm_ioctl_compat_t *mga_compat_ioctls[] = {
37814+drm_ioctl_compat_t mga_compat_ioctls[] = {
37815 [DRM_MGA_INIT] = compat_mga_init,
37816 [DRM_MGA_GETPARAM] = compat_mga_getparam,
37817 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
37818@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
37819 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37820 {
37821 unsigned int nr = DRM_IOCTL_NR(cmd);
37822- drm_ioctl_compat_t *fn = NULL;
37823 int ret;
37824
37825 if (nr < DRM_COMMAND_BASE)
37826 return drm_compat_ioctl(filp, cmd, arg);
37827
37828- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
37829- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
37830-
37831- if (fn != NULL)
37832+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
37833+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
37834 ret = (*fn) (filp, cmd, arg);
37835- else
37836+ } else
37837 ret = drm_ioctl(filp, cmd, arg);
37838
37839 return ret;
37840diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
37841index 598c281..60d590e 100644
37842--- a/drivers/gpu/drm/mga/mga_irq.c
37843+++ b/drivers/gpu/drm/mga/mga_irq.c
37844@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
37845 if (crtc != 0)
37846 return 0;
37847
37848- return atomic_read(&dev_priv->vbl_received);
37849+ return atomic_read_unchecked(&dev_priv->vbl_received);
37850 }
37851
37852
37853@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
37854 /* VBLANK interrupt */
37855 if (status & MGA_VLINEPEN) {
37856 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
37857- atomic_inc(&dev_priv->vbl_received);
37858+ atomic_inc_unchecked(&dev_priv->vbl_received);
37859 drm_handle_vblank(dev, 0);
37860 handled = 1;
37861 }
37862@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
37863 if ((prim_start & ~0x03) != (prim_end & ~0x03))
37864 MGA_WRITE(MGA_PRIMEND, prim_end);
37865
37866- atomic_inc(&dev_priv->last_fence_retired);
37867+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
37868 DRM_WAKEUP(&dev_priv->fence_queue);
37869 handled = 1;
37870 }
37871@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
37872 * using fences.
37873 */
37874 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
37875- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
37876+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
37877 - *sequence) <= (1 << 23)));
37878
37879 *sequence = cur_fence;
37880diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
37881index 6aa2137..fe8dc55 100644
37882--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
37883+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
37884@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
37885 struct bit_table {
37886 const char id;
37887 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
37888-};
37889+} __no_const;
37890
37891 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
37892
37893diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
37894index f2b30f8..d0f9a95 100644
37895--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
37896+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
37897@@ -92,7 +92,7 @@ struct nouveau_drm {
37898 struct drm_global_reference mem_global_ref;
37899 struct ttm_bo_global_ref bo_global_ref;
37900 struct ttm_bo_device bdev;
37901- atomic_t validate_sequence;
37902+ atomic_unchecked_t validate_sequence;
37903 int (*move)(struct nouveau_channel *,
37904 struct ttm_buffer_object *,
37905 struct ttm_mem_reg *, struct ttm_mem_reg *);
37906diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
37907index b4b4d0c..b7edc15 100644
37908--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
37909+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
37910@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
37911 int ret, i;
37912 struct nouveau_bo *res_bo = NULL;
37913
37914- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
37915+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
37916 retry:
37917 if (++trycnt > 100000) {
37918 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
37919@@ -359,7 +359,7 @@ retry:
37920 if (ret) {
37921 validate_fini(op, NULL);
37922 if (unlikely(ret == -EAGAIN)) {
37923- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
37924+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
37925 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
37926 sequence);
37927 if (!ret)
37928diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
37929index 08214bc..9208577 100644
37930--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
37931+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
37932@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
37933 unsigned long arg)
37934 {
37935 unsigned int nr = DRM_IOCTL_NR(cmd);
37936- drm_ioctl_compat_t *fn = NULL;
37937+ drm_ioctl_compat_t fn = NULL;
37938 int ret;
37939
37940 if (nr < DRM_COMMAND_BASE)
37941diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
37942index 25d3495..d81aaf6 100644
37943--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
37944+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
37945@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
37946 bool can_switch;
37947
37948 spin_lock(&dev->count_lock);
37949- can_switch = (dev->open_count == 0);
37950+ can_switch = (local_read(&dev->open_count) == 0);
37951 spin_unlock(&dev->count_lock);
37952 return can_switch;
37953 }
37954diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
37955index 489cb8c..0b8d0d3 100644
37956--- a/drivers/gpu/drm/qxl/qxl_ttm.c
37957+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
37958@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
37959 }
37960 }
37961
37962-static struct vm_operations_struct qxl_ttm_vm_ops;
37963+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
37964 static const struct vm_operations_struct *ttm_vm_ops;
37965
37966 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37967@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
37968 return r;
37969 if (unlikely(ttm_vm_ops == NULL)) {
37970 ttm_vm_ops = vma->vm_ops;
37971+ pax_open_kernel();
37972 qxl_ttm_vm_ops = *ttm_vm_ops;
37973 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
37974+ pax_close_kernel();
37975 }
37976 vma->vm_ops = &qxl_ttm_vm_ops;
37977 return 0;
37978@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
37979 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
37980 {
37981 #if defined(CONFIG_DEBUG_FS)
37982- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
37983- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
37984- unsigned i;
37985+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
37986+ {
37987+ .name = "qxl_mem_mm",
37988+ .show = &qxl_mm_dump_table,
37989+ },
37990+ {
37991+ .name = "qxl_surf_mm",
37992+ .show = &qxl_mm_dump_table,
37993+ }
37994+ };
37995
37996- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
37997- if (i == 0)
37998- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
37999- else
38000- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
38001- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
38002- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
38003- qxl_mem_types_list[i].driver_features = 0;
38004- if (i == 0)
38005- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38006- else
38007- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38008+ pax_open_kernel();
38009+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38010+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38011+ pax_close_kernel();
38012
38013- }
38014- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
38015+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
38016 #else
38017 return 0;
38018 #endif
38019diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
38020index d4660cf..70dbe65 100644
38021--- a/drivers/gpu/drm/r128/r128_cce.c
38022+++ b/drivers/gpu/drm/r128/r128_cce.c
38023@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
38024
38025 /* GH: Simple idle check.
38026 */
38027- atomic_set(&dev_priv->idle_count, 0);
38028+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38029
38030 /* We don't support anything other than bus-mastering ring mode,
38031 * but the ring can be in either AGP or PCI space for the ring
38032diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
38033index 930c71b..499aded 100644
38034--- a/drivers/gpu/drm/r128/r128_drv.h
38035+++ b/drivers/gpu/drm/r128/r128_drv.h
38036@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
38037 int is_pci;
38038 unsigned long cce_buffers_offset;
38039
38040- atomic_t idle_count;
38041+ atomic_unchecked_t idle_count;
38042
38043 int page_flipping;
38044 int current_page;
38045 u32 crtc_offset;
38046 u32 crtc_offset_cntl;
38047
38048- atomic_t vbl_received;
38049+ atomic_unchecked_t vbl_received;
38050
38051 u32 color_fmt;
38052 unsigned int front_offset;
38053diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
38054index a954c54..9cc595c 100644
38055--- a/drivers/gpu/drm/r128/r128_ioc32.c
38056+++ b/drivers/gpu/drm/r128/r128_ioc32.c
38057@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
38058 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
38059 }
38060
38061-drm_ioctl_compat_t *r128_compat_ioctls[] = {
38062+drm_ioctl_compat_t r128_compat_ioctls[] = {
38063 [DRM_R128_INIT] = compat_r128_init,
38064 [DRM_R128_DEPTH] = compat_r128_depth,
38065 [DRM_R128_STIPPLE] = compat_r128_stipple,
38066@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
38067 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38068 {
38069 unsigned int nr = DRM_IOCTL_NR(cmd);
38070- drm_ioctl_compat_t *fn = NULL;
38071 int ret;
38072
38073 if (nr < DRM_COMMAND_BASE)
38074 return drm_compat_ioctl(filp, cmd, arg);
38075
38076- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
38077- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38078-
38079- if (fn != NULL)
38080+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
38081+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38082 ret = (*fn) (filp, cmd, arg);
38083- else
38084+ } else
38085 ret = drm_ioctl(filp, cmd, arg);
38086
38087 return ret;
38088diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
38089index 2ea4f09..d391371 100644
38090--- a/drivers/gpu/drm/r128/r128_irq.c
38091+++ b/drivers/gpu/drm/r128/r128_irq.c
38092@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
38093 if (crtc != 0)
38094 return 0;
38095
38096- return atomic_read(&dev_priv->vbl_received);
38097+ return atomic_read_unchecked(&dev_priv->vbl_received);
38098 }
38099
38100 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38101@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38102 /* VBLANK interrupt */
38103 if (status & R128_CRTC_VBLANK_INT) {
38104 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
38105- atomic_inc(&dev_priv->vbl_received);
38106+ atomic_inc_unchecked(&dev_priv->vbl_received);
38107 drm_handle_vblank(dev, 0);
38108 return IRQ_HANDLED;
38109 }
38110diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
38111index 19bb7e6..de7e2a2 100644
38112--- a/drivers/gpu/drm/r128/r128_state.c
38113+++ b/drivers/gpu/drm/r128/r128_state.c
38114@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
38115
38116 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
38117 {
38118- if (atomic_read(&dev_priv->idle_count) == 0)
38119+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
38120 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
38121 else
38122- atomic_set(&dev_priv->idle_count, 0);
38123+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38124 }
38125
38126 #endif
38127diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
38128index 5a82b6b..9e69c73 100644
38129--- a/drivers/gpu/drm/radeon/mkregtable.c
38130+++ b/drivers/gpu/drm/radeon/mkregtable.c
38131@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
38132 regex_t mask_rex;
38133 regmatch_t match[4];
38134 char buf[1024];
38135- size_t end;
38136+ long end;
38137 int len;
38138 int done = 0;
38139 int r;
38140 unsigned o;
38141 struct offset *offset;
38142 char last_reg_s[10];
38143- int last_reg;
38144+ unsigned long last_reg;
38145
38146 if (regcomp
38147 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
38148diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
38149index b0dc0b6..a9bfe9c 100644
38150--- a/drivers/gpu/drm/radeon/radeon_device.c
38151+++ b/drivers/gpu/drm/radeon/radeon_device.c
38152@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
38153 bool can_switch;
38154
38155 spin_lock(&dev->count_lock);
38156- can_switch = (dev->open_count == 0);
38157+ can_switch = (local_read(&dev->open_count) == 0);
38158 spin_unlock(&dev->count_lock);
38159 return can_switch;
38160 }
38161diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
38162index b369d42..8dd04eb 100644
38163--- a/drivers/gpu/drm/radeon/radeon_drv.h
38164+++ b/drivers/gpu/drm/radeon/radeon_drv.h
38165@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
38166
38167 /* SW interrupt */
38168 wait_queue_head_t swi_queue;
38169- atomic_t swi_emitted;
38170+ atomic_unchecked_t swi_emitted;
38171 int vblank_crtc;
38172 uint32_t irq_enable_reg;
38173 uint32_t r500_disp_irq_reg;
38174diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
38175index c180df8..5fd8186 100644
38176--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
38177+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
38178@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38179 request = compat_alloc_user_space(sizeof(*request));
38180 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
38181 || __put_user(req32.param, &request->param)
38182- || __put_user((void __user *)(unsigned long)req32.value,
38183+ || __put_user((unsigned long)req32.value,
38184 &request->value))
38185 return -EFAULT;
38186
38187@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38188 #define compat_radeon_cp_setparam NULL
38189 #endif /* X86_64 || IA64 */
38190
38191-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38192+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
38193 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
38194 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
38195 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
38196@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38197 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38198 {
38199 unsigned int nr = DRM_IOCTL_NR(cmd);
38200- drm_ioctl_compat_t *fn = NULL;
38201 int ret;
38202
38203 if (nr < DRM_COMMAND_BASE)
38204 return drm_compat_ioctl(filp, cmd, arg);
38205
38206- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
38207- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38208-
38209- if (fn != NULL)
38210+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
38211+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38212 ret = (*fn) (filp, cmd, arg);
38213- else
38214+ } else
38215 ret = drm_ioctl(filp, cmd, arg);
38216
38217 return ret;
38218diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
38219index 8d68e97..9dcfed8 100644
38220--- a/drivers/gpu/drm/radeon/radeon_irq.c
38221+++ b/drivers/gpu/drm/radeon/radeon_irq.c
38222@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
38223 unsigned int ret;
38224 RING_LOCALS;
38225
38226- atomic_inc(&dev_priv->swi_emitted);
38227- ret = atomic_read(&dev_priv->swi_emitted);
38228+ atomic_inc_unchecked(&dev_priv->swi_emitted);
38229+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
38230
38231 BEGIN_RING(4);
38232 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
38233@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
38234 drm_radeon_private_t *dev_priv =
38235 (drm_radeon_private_t *) dev->dev_private;
38236
38237- atomic_set(&dev_priv->swi_emitted, 0);
38238+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
38239 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
38240
38241 dev->max_vblank_count = 0x001fffff;
38242diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
38243index 4d20910..6726b6d 100644
38244--- a/drivers/gpu/drm/radeon/radeon_state.c
38245+++ b/drivers/gpu/drm/radeon/radeon_state.c
38246@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
38247 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
38248 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
38249
38250- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
38251+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
38252 sarea_priv->nbox * sizeof(depth_boxes[0])))
38253 return -EFAULT;
38254
38255@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
38256 {
38257 drm_radeon_private_t *dev_priv = dev->dev_private;
38258 drm_radeon_getparam_t *param = data;
38259- int value;
38260+ int value = 0;
38261
38262 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
38263
38264diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
38265index 6c0ce89..57a2529 100644
38266--- a/drivers/gpu/drm/radeon/radeon_ttm.c
38267+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
38268@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
38269 man->size = size >> PAGE_SHIFT;
38270 }
38271
38272-static struct vm_operations_struct radeon_ttm_vm_ops;
38273+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
38274 static const struct vm_operations_struct *ttm_vm_ops = NULL;
38275
38276 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38277@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
38278 }
38279 if (unlikely(ttm_vm_ops == NULL)) {
38280 ttm_vm_ops = vma->vm_ops;
38281+ pax_open_kernel();
38282 radeon_ttm_vm_ops = *ttm_vm_ops;
38283 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
38284+ pax_close_kernel();
38285 }
38286 vma->vm_ops = &radeon_ttm_vm_ops;
38287 return 0;
38288@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
38289 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
38290 {
38291 #if defined(CONFIG_DEBUG_FS)
38292- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
38293- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
38294+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
38295+ {
38296+ .name = "radeon_vram_mm",
38297+ .show = &radeon_mm_dump_table,
38298+ },
38299+ {
38300+ .name = "radeon_gtt_mm",
38301+ .show = &radeon_mm_dump_table,
38302+ },
38303+ {
38304+ .name = "ttm_page_pool",
38305+ .show = &ttm_page_alloc_debugfs,
38306+ },
38307+ {
38308+ .name = "ttm_dma_page_pool",
38309+ .show = &ttm_dma_page_alloc_debugfs,
38310+ },
38311+ };
38312 unsigned i;
38313
38314- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
38315- if (i == 0)
38316- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
38317- else
38318- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
38319- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
38320- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
38321- radeon_mem_types_list[i].driver_features = 0;
38322- if (i == 0)
38323- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
38324- else
38325- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
38326-
38327- }
38328- /* Add ttm page pool to debugfs */
38329- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
38330- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
38331- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
38332- radeon_mem_types_list[i].driver_features = 0;
38333- radeon_mem_types_list[i++].data = NULL;
38334+ pax_open_kernel();
38335+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
38336+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
38337+ pax_close_kernel();
38338 #ifdef CONFIG_SWIOTLB
38339- if (swiotlb_nr_tbl()) {
38340- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
38341- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
38342- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
38343- radeon_mem_types_list[i].driver_features = 0;
38344- radeon_mem_types_list[i++].data = NULL;
38345- }
38346+ if (swiotlb_nr_tbl())
38347+ i++;
38348 #endif
38349 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
38350
38351diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
38352index 55880d5..9e95342 100644
38353--- a/drivers/gpu/drm/radeon/rs690.c
38354+++ b/drivers/gpu/drm/radeon/rs690.c
38355@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
38356 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
38357 rdev->pm.sideport_bandwidth.full)
38358 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
38359- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
38360+ read_delay_latency.full = dfixed_const(800 * 1000);
38361 read_delay_latency.full = dfixed_div(read_delay_latency,
38362 rdev->pm.igp_sideport_mclk);
38363+ a.full = dfixed_const(370);
38364+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
38365 } else {
38366 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
38367 rdev->pm.k8_bandwidth.full)
38368diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
38369index dbc2def..0a9f710 100644
38370--- a/drivers/gpu/drm/ttm/ttm_memory.c
38371+++ b/drivers/gpu/drm/ttm/ttm_memory.c
38372@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
38373 zone->glob = glob;
38374 glob->zone_kernel = zone;
38375 ret = kobject_init_and_add(
38376- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
38377+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
38378 if (unlikely(ret != 0)) {
38379 kobject_put(&zone->kobj);
38380 return ret;
38381@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
38382 zone->glob = glob;
38383 glob->zone_dma32 = zone;
38384 ret = kobject_init_and_add(
38385- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
38386+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
38387 if (unlikely(ret != 0)) {
38388 kobject_put(&zone->kobj);
38389 return ret;
38390diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
38391index bd2a3b4..122d9ad 100644
38392--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
38393+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
38394@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
38395 static int ttm_pool_mm_shrink(struct shrinker *shrink,
38396 struct shrink_control *sc)
38397 {
38398- static atomic_t start_pool = ATOMIC_INIT(0);
38399+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
38400 unsigned i;
38401- unsigned pool_offset = atomic_add_return(1, &start_pool);
38402+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
38403 struct ttm_page_pool *pool;
38404 int shrink_pages = sc->nr_to_scan;
38405
38406diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
38407index dc0c065..58a0782 100644
38408--- a/drivers/gpu/drm/udl/udl_fb.c
38409+++ b/drivers/gpu/drm/udl/udl_fb.c
38410@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
38411 fb_deferred_io_cleanup(info);
38412 kfree(info->fbdefio);
38413 info->fbdefio = NULL;
38414- info->fbops->fb_mmap = udl_fb_mmap;
38415 }
38416
38417 pr_warn("released /dev/fb%d user=%d count=%d\n",
38418diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
38419index 893a650..6190d3b 100644
38420--- a/drivers/gpu/drm/via/via_drv.h
38421+++ b/drivers/gpu/drm/via/via_drv.h
38422@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
38423 typedef uint32_t maskarray_t[5];
38424
38425 typedef struct drm_via_irq {
38426- atomic_t irq_received;
38427+ atomic_unchecked_t irq_received;
38428 uint32_t pending_mask;
38429 uint32_t enable_mask;
38430 wait_queue_head_t irq_queue;
38431@@ -75,7 +75,7 @@ typedef struct drm_via_private {
38432 struct timeval last_vblank;
38433 int last_vblank_valid;
38434 unsigned usec_per_vblank;
38435- atomic_t vbl_received;
38436+ atomic_unchecked_t vbl_received;
38437 drm_via_state_t hc_state;
38438 char pci_buf[VIA_PCI_BUF_SIZE];
38439 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
38440diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
38441index ac98964..5dbf512 100644
38442--- a/drivers/gpu/drm/via/via_irq.c
38443+++ b/drivers/gpu/drm/via/via_irq.c
38444@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
38445 if (crtc != 0)
38446 return 0;
38447
38448- return atomic_read(&dev_priv->vbl_received);
38449+ return atomic_read_unchecked(&dev_priv->vbl_received);
38450 }
38451
38452 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38453@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38454
38455 status = VIA_READ(VIA_REG_INTERRUPT);
38456 if (status & VIA_IRQ_VBLANK_PENDING) {
38457- atomic_inc(&dev_priv->vbl_received);
38458- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
38459+ atomic_inc_unchecked(&dev_priv->vbl_received);
38460+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
38461 do_gettimeofday(&cur_vblank);
38462 if (dev_priv->last_vblank_valid) {
38463 dev_priv->usec_per_vblank =
38464@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38465 dev_priv->last_vblank = cur_vblank;
38466 dev_priv->last_vblank_valid = 1;
38467 }
38468- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
38469+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
38470 DRM_DEBUG("US per vblank is: %u\n",
38471 dev_priv->usec_per_vblank);
38472 }
38473@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38474
38475 for (i = 0; i < dev_priv->num_irqs; ++i) {
38476 if (status & cur_irq->pending_mask) {
38477- atomic_inc(&cur_irq->irq_received);
38478+ atomic_inc_unchecked(&cur_irq->irq_received);
38479 DRM_WAKEUP(&cur_irq->irq_queue);
38480 handled = 1;
38481 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
38482@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
38483 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
38484 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
38485 masks[irq][4]));
38486- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
38487+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
38488 } else {
38489 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
38490 (((cur_irq_sequence =
38491- atomic_read(&cur_irq->irq_received)) -
38492+ atomic_read_unchecked(&cur_irq->irq_received)) -
38493 *sequence) <= (1 << 23)));
38494 }
38495 *sequence = cur_irq_sequence;
38496@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
38497 }
38498
38499 for (i = 0; i < dev_priv->num_irqs; ++i) {
38500- atomic_set(&cur_irq->irq_received, 0);
38501+ atomic_set_unchecked(&cur_irq->irq_received, 0);
38502 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
38503 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
38504 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
38505@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
38506 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
38507 case VIA_IRQ_RELATIVE:
38508 irqwait->request.sequence +=
38509- atomic_read(&cur_irq->irq_received);
38510+ atomic_read_unchecked(&cur_irq->irq_received);
38511 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
38512 case VIA_IRQ_ABSOLUTE:
38513 break;
38514diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
38515index 13aeda7..4a952d1 100644
38516--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
38517+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
38518@@ -290,7 +290,7 @@ struct vmw_private {
38519 * Fencing and IRQs.
38520 */
38521
38522- atomic_t marker_seq;
38523+ atomic_unchecked_t marker_seq;
38524 wait_queue_head_t fence_queue;
38525 wait_queue_head_t fifo_queue;
38526 int fence_queue_waiters; /* Protected by hw_mutex */
38527diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
38528index 3eb1486..0a47ee9 100644
38529--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
38530+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
38531@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
38532 (unsigned int) min,
38533 (unsigned int) fifo->capabilities);
38534
38535- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
38536+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
38537 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
38538 vmw_marker_queue_init(&fifo->marker_queue);
38539 return vmw_fifo_send_fence(dev_priv, &dummy);
38540@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
38541 if (reserveable)
38542 iowrite32(bytes, fifo_mem +
38543 SVGA_FIFO_RESERVED);
38544- return fifo_mem + (next_cmd >> 2);
38545+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
38546 } else {
38547 need_bounce = true;
38548 }
38549@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
38550
38551 fm = vmw_fifo_reserve(dev_priv, bytes);
38552 if (unlikely(fm == NULL)) {
38553- *seqno = atomic_read(&dev_priv->marker_seq);
38554+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
38555 ret = -ENOMEM;
38556 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
38557 false, 3*HZ);
38558@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
38559 }
38560
38561 do {
38562- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
38563+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
38564 } while (*seqno == 0);
38565
38566 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
38567diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
38568index c509d40..3b640c3 100644
38569--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
38570+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
38571@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
38572 int ret;
38573
38574 num_clips = arg->num_clips;
38575- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
38576+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
38577
38578 if (unlikely(num_clips == 0))
38579 return 0;
38580@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
38581 int ret;
38582
38583 num_clips = arg->num_clips;
38584- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
38585+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
38586
38587 if (unlikely(num_clips == 0))
38588 return 0;
38589diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
38590index 4640adb..e1384ed 100644
38591--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
38592+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
38593@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
38594 * emitted. Then the fence is stale and signaled.
38595 */
38596
38597- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
38598+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
38599 > VMW_FENCE_WRAP);
38600
38601 return ret;
38602@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
38603
38604 if (fifo_idle)
38605 down_read(&fifo_state->rwsem);
38606- signal_seq = atomic_read(&dev_priv->marker_seq);
38607+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
38608 ret = 0;
38609
38610 for (;;) {
38611diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
38612index 8a8725c2..afed796 100644
38613--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
38614+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
38615@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
38616 while (!vmw_lag_lt(queue, us)) {
38617 spin_lock(&queue->lock);
38618 if (list_empty(&queue->head))
38619- seqno = atomic_read(&dev_priv->marker_seq);
38620+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
38621 else {
38622 marker = list_first_entry(&queue->head,
38623 struct vmw_marker, head);
38624diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
38625index 8c04943..4370ed9 100644
38626--- a/drivers/gpu/host1x/drm/dc.c
38627+++ b/drivers/gpu/host1x/drm/dc.c
38628@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
38629 }
38630
38631 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
38632- dc->debugfs_files[i].data = dc;
38633+ *(void **)&dc->debugfs_files[i].data = dc;
38634
38635 err = drm_debugfs_create_files(dc->debugfs_files,
38636 ARRAY_SIZE(debugfs_files),
38637diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
38638index 402f486..f862d7e 100644
38639--- a/drivers/hid/hid-core.c
38640+++ b/drivers/hid/hid-core.c
38641@@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
38642
38643 int hid_add_device(struct hid_device *hdev)
38644 {
38645- static atomic_t id = ATOMIC_INIT(0);
38646+ static atomic_unchecked_t id = ATOMIC_INIT(0);
38647 int ret;
38648
38649 if (WARN_ON(hdev->status & HID_STAT_ADDED))
38650@@ -2309,7 +2309,7 @@ int hid_add_device(struct hid_device *hdev)
38651 /* XXX hack, any other cleaner solution after the driver core
38652 * is converted to allow more than 20 bytes as the device name? */
38653 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
38654- hdev->vendor, hdev->product, atomic_inc_return(&id));
38655+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
38656
38657 hid_debug_register(hdev, dev_name(&hdev->dev));
38658 ret = device_add(&hdev->dev);
38659diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
38660index 90124ff..3761764 100644
38661--- a/drivers/hid/hid-wiimote-debug.c
38662+++ b/drivers/hid/hid-wiimote-debug.c
38663@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
38664 else if (size == 0)
38665 return -EIO;
38666
38667- if (copy_to_user(u, buf, size))
38668+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
38669 return -EFAULT;
38670
38671 *off += size;
38672diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
38673index 0b122f8..b1d8160 100644
38674--- a/drivers/hv/channel.c
38675+++ b/drivers/hv/channel.c
38676@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
38677 int ret = 0;
38678 int t;
38679
38680- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
38681- atomic_inc(&vmbus_connection.next_gpadl_handle);
38682+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
38683+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
38684
38685 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
38686 if (ret)
38687diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
38688index ae49237..380d4c9 100644
38689--- a/drivers/hv/hv.c
38690+++ b/drivers/hv/hv.c
38691@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
38692 u64 output_address = (output) ? virt_to_phys(output) : 0;
38693 u32 output_address_hi = output_address >> 32;
38694 u32 output_address_lo = output_address & 0xFFFFFFFF;
38695- void *hypercall_page = hv_context.hypercall_page;
38696+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
38697
38698 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
38699 "=a"(hv_status_lo) : "d" (control_hi),
38700diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
38701index 12f2f9e..679603c 100644
38702--- a/drivers/hv/hyperv_vmbus.h
38703+++ b/drivers/hv/hyperv_vmbus.h
38704@@ -591,7 +591,7 @@ enum vmbus_connect_state {
38705 struct vmbus_connection {
38706 enum vmbus_connect_state conn_state;
38707
38708- atomic_t next_gpadl_handle;
38709+ atomic_unchecked_t next_gpadl_handle;
38710
38711 /*
38712 * Represents channel interrupts. Each bit position represents a
38713diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
38714index 4004e54..c2de226 100644
38715--- a/drivers/hv/vmbus_drv.c
38716+++ b/drivers/hv/vmbus_drv.c
38717@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
38718 {
38719 int ret = 0;
38720
38721- static atomic_t device_num = ATOMIC_INIT(0);
38722+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
38723
38724 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
38725- atomic_inc_return(&device_num));
38726+ atomic_inc_return_unchecked(&device_num));
38727
38728 child_device_obj->device.bus = &hv_bus;
38729 child_device_obj->device.parent = &hv_acpi_dev->dev;
38730diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
38731index 6351aba..dc4aaf4 100644
38732--- a/drivers/hwmon/acpi_power_meter.c
38733+++ b/drivers/hwmon/acpi_power_meter.c
38734@@ -117,7 +117,7 @@ struct sensor_template {
38735 struct device_attribute *devattr,
38736 const char *buf, size_t count);
38737 int index;
38738-};
38739+} __do_const;
38740
38741 /* Averaging interval */
38742 static int update_avg_interval(struct acpi_power_meter_resource *resource)
38743@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
38744 struct sensor_template *attrs)
38745 {
38746 struct device *dev = &resource->acpi_dev->dev;
38747- struct sensor_device_attribute *sensors =
38748+ sensor_device_attribute_no_const *sensors =
38749 &resource->sensors[resource->num_sensors];
38750 int res = 0;
38751
38752diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
38753index 62c2e32..8f2859a 100644
38754--- a/drivers/hwmon/applesmc.c
38755+++ b/drivers/hwmon/applesmc.c
38756@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
38757 {
38758 struct applesmc_node_group *grp;
38759 struct applesmc_dev_attr *node;
38760- struct attribute *attr;
38761+ attribute_no_const *attr;
38762 int ret, i;
38763
38764 for (grp = groups; grp->format; grp++) {
38765diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
38766index b25c643..a13460d 100644
38767--- a/drivers/hwmon/asus_atk0110.c
38768+++ b/drivers/hwmon/asus_atk0110.c
38769@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
38770 struct atk_sensor_data {
38771 struct list_head list;
38772 struct atk_data *data;
38773- struct device_attribute label_attr;
38774- struct device_attribute input_attr;
38775- struct device_attribute limit1_attr;
38776- struct device_attribute limit2_attr;
38777+ device_attribute_no_const label_attr;
38778+ device_attribute_no_const input_attr;
38779+ device_attribute_no_const limit1_attr;
38780+ device_attribute_no_const limit2_attr;
38781 char label_attr_name[ATTR_NAME_SIZE];
38782 char input_attr_name[ATTR_NAME_SIZE];
38783 char limit1_attr_name[ATTR_NAME_SIZE];
38784@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
38785 static struct device_attribute atk_name_attr =
38786 __ATTR(name, 0444, atk_name_show, NULL);
38787
38788-static void atk_init_attribute(struct device_attribute *attr, char *name,
38789+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
38790 sysfs_show_func show)
38791 {
38792 sysfs_attr_init(&attr->attr);
38793diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
38794index 658ce3a..0d0c2f3 100644
38795--- a/drivers/hwmon/coretemp.c
38796+++ b/drivers/hwmon/coretemp.c
38797@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
38798 return NOTIFY_OK;
38799 }
38800
38801-static struct notifier_block coretemp_cpu_notifier __refdata = {
38802+static struct notifier_block coretemp_cpu_notifier = {
38803 .notifier_call = coretemp_cpu_callback,
38804 };
38805
38806diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
38807index 1429f6e..ee03d59 100644
38808--- a/drivers/hwmon/ibmaem.c
38809+++ b/drivers/hwmon/ibmaem.c
38810@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
38811 struct aem_rw_sensor_template *rw)
38812 {
38813 struct device *dev = &data->pdev->dev;
38814- struct sensor_device_attribute *sensors = data->sensors;
38815+ sensor_device_attribute_no_const *sensors = data->sensors;
38816 int err;
38817
38818 /* Set up read-only sensors */
38819diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
38820index 52b77af..aed1ddf 100644
38821--- a/drivers/hwmon/iio_hwmon.c
38822+++ b/drivers/hwmon/iio_hwmon.c
38823@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
38824 {
38825 struct device *dev = &pdev->dev;
38826 struct iio_hwmon_state *st;
38827- struct sensor_device_attribute *a;
38828+ sensor_device_attribute_no_const *a;
38829 int ret, i;
38830 int in_i = 1, temp_i = 1, curr_i = 1;
38831 enum iio_chan_type type;
38832diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
38833index 9add6092..ee7ba3f 100644
38834--- a/drivers/hwmon/pmbus/pmbus_core.c
38835+++ b/drivers/hwmon/pmbus/pmbus_core.c
38836@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
38837 return 0;
38838 }
38839
38840-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
38841+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
38842 const char *name,
38843 umode_t mode,
38844 ssize_t (*show)(struct device *dev,
38845@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
38846 dev_attr->store = store;
38847 }
38848
38849-static void pmbus_attr_init(struct sensor_device_attribute *a,
38850+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
38851 const char *name,
38852 umode_t mode,
38853 ssize_t (*show)(struct device *dev,
38854@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
38855 u16 reg, u8 mask)
38856 {
38857 struct pmbus_boolean *boolean;
38858- struct sensor_device_attribute *a;
38859+ sensor_device_attribute_no_const *a;
38860
38861 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
38862 if (!boolean)
38863@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
38864 bool update, bool readonly)
38865 {
38866 struct pmbus_sensor *sensor;
38867- struct device_attribute *a;
38868+ device_attribute_no_const *a;
38869
38870 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
38871 if (!sensor)
38872@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
38873 const char *lstring, int index)
38874 {
38875 struct pmbus_label *label;
38876- struct device_attribute *a;
38877+ device_attribute_no_const *a;
38878
38879 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
38880 if (!label)
38881diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
38882index 2507f90..1645765 100644
38883--- a/drivers/hwmon/sht15.c
38884+++ b/drivers/hwmon/sht15.c
38885@@ -169,7 +169,7 @@ struct sht15_data {
38886 int supply_uv;
38887 bool supply_uv_valid;
38888 struct work_struct update_supply_work;
38889- atomic_t interrupt_handled;
38890+ atomic_unchecked_t interrupt_handled;
38891 };
38892
38893 /**
38894@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
38895 ret = gpio_direction_input(data->pdata->gpio_data);
38896 if (ret)
38897 return ret;
38898- atomic_set(&data->interrupt_handled, 0);
38899+ atomic_set_unchecked(&data->interrupt_handled, 0);
38900
38901 enable_irq(gpio_to_irq(data->pdata->gpio_data));
38902 if (gpio_get_value(data->pdata->gpio_data) == 0) {
38903 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
38904 /* Only relevant if the interrupt hasn't occurred. */
38905- if (!atomic_read(&data->interrupt_handled))
38906+ if (!atomic_read_unchecked(&data->interrupt_handled))
38907 schedule_work(&data->read_work);
38908 }
38909 ret = wait_event_timeout(data->wait_queue,
38910@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
38911
38912 /* First disable the interrupt */
38913 disable_irq_nosync(irq);
38914- atomic_inc(&data->interrupt_handled);
38915+ atomic_inc_unchecked(&data->interrupt_handled);
38916 /* Then schedule a reading work struct */
38917 if (data->state != SHT15_READING_NOTHING)
38918 schedule_work(&data->read_work);
38919@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
38920 * If not, then start the interrupt again - care here as could
38921 * have gone low in meantime so verify it hasn't!
38922 */
38923- atomic_set(&data->interrupt_handled, 0);
38924+ atomic_set_unchecked(&data->interrupt_handled, 0);
38925 enable_irq(gpio_to_irq(data->pdata->gpio_data));
38926 /* If still not occurred or another handler was scheduled */
38927 if (gpio_get_value(data->pdata->gpio_data)
38928- || atomic_read(&data->interrupt_handled))
38929+ || atomic_read_unchecked(&data->interrupt_handled))
38930 return;
38931 }
38932
38933diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
38934index 76f157b..9c0db1b 100644
38935--- a/drivers/hwmon/via-cputemp.c
38936+++ b/drivers/hwmon/via-cputemp.c
38937@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
38938 return NOTIFY_OK;
38939 }
38940
38941-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
38942+static struct notifier_block via_cputemp_cpu_notifier = {
38943 .notifier_call = via_cputemp_cpu_callback,
38944 };
38945
38946diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
38947index 07f01ac..d79ad3d 100644
38948--- a/drivers/i2c/busses/i2c-amd756-s4882.c
38949+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
38950@@ -43,7 +43,7 @@
38951 extern struct i2c_adapter amd756_smbus;
38952
38953 static struct i2c_adapter *s4882_adapter;
38954-static struct i2c_algorithm *s4882_algo;
38955+static i2c_algorithm_no_const *s4882_algo;
38956
38957 /* Wrapper access functions for multiplexed SMBus */
38958 static DEFINE_MUTEX(amd756_lock);
38959diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
38960index 2ca268d..c6acbdf 100644
38961--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
38962+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
38963@@ -41,7 +41,7 @@
38964 extern struct i2c_adapter *nforce2_smbus;
38965
38966 static struct i2c_adapter *s4985_adapter;
38967-static struct i2c_algorithm *s4985_algo;
38968+static i2c_algorithm_no_const *s4985_algo;
38969
38970 /* Wrapper access functions for multiplexed SMBus */
38971 static DEFINE_MUTEX(nforce2_lock);
38972diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
38973index c3ccdea..5b3dc1a 100644
38974--- a/drivers/i2c/i2c-dev.c
38975+++ b/drivers/i2c/i2c-dev.c
38976@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
38977 break;
38978 }
38979
38980- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
38981+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
38982 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
38983 if (IS_ERR(rdwr_pa[i].buf)) {
38984 res = PTR_ERR(rdwr_pa[i].buf);
38985diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
38986index 2ff6204..218c16e 100644
38987--- a/drivers/ide/ide-cd.c
38988+++ b/drivers/ide/ide-cd.c
38989@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
38990 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
38991 if ((unsigned long)buf & alignment
38992 || blk_rq_bytes(rq) & q->dma_pad_mask
38993- || object_is_on_stack(buf))
38994+ || object_starts_on_stack(buf))
38995 drive->dma = 0;
38996 }
38997 }
38998diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
38999index e145931..08bfc59 100644
39000--- a/drivers/iio/industrialio-core.c
39001+++ b/drivers/iio/industrialio-core.c
39002@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
39003 }
39004
39005 static
39006-int __iio_device_attr_init(struct device_attribute *dev_attr,
39007+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
39008 const char *postfix,
39009 struct iio_chan_spec const *chan,
39010 ssize_t (*readfunc)(struct device *dev,
39011diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
39012index 784b97c..c9ceadf 100644
39013--- a/drivers/infiniband/core/cm.c
39014+++ b/drivers/infiniband/core/cm.c
39015@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
39016
39017 struct cm_counter_group {
39018 struct kobject obj;
39019- atomic_long_t counter[CM_ATTR_COUNT];
39020+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
39021 };
39022
39023 struct cm_counter_attribute {
39024@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
39025 struct ib_mad_send_buf *msg = NULL;
39026 int ret;
39027
39028- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39029+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39030 counter[CM_REQ_COUNTER]);
39031
39032 /* Quick state check to discard duplicate REQs. */
39033@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
39034 if (!cm_id_priv)
39035 return;
39036
39037- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39038+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39039 counter[CM_REP_COUNTER]);
39040 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
39041 if (ret)
39042@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
39043 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
39044 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
39045 spin_unlock_irq(&cm_id_priv->lock);
39046- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39047+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39048 counter[CM_RTU_COUNTER]);
39049 goto out;
39050 }
39051@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
39052 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
39053 dreq_msg->local_comm_id);
39054 if (!cm_id_priv) {
39055- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39056+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39057 counter[CM_DREQ_COUNTER]);
39058 cm_issue_drep(work->port, work->mad_recv_wc);
39059 return -EINVAL;
39060@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
39061 case IB_CM_MRA_REP_RCVD:
39062 break;
39063 case IB_CM_TIMEWAIT:
39064- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39065+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39066 counter[CM_DREQ_COUNTER]);
39067 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
39068 goto unlock;
39069@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
39070 cm_free_msg(msg);
39071 goto deref;
39072 case IB_CM_DREQ_RCVD:
39073- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39074+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39075 counter[CM_DREQ_COUNTER]);
39076 goto unlock;
39077 default:
39078@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
39079 ib_modify_mad(cm_id_priv->av.port->mad_agent,
39080 cm_id_priv->msg, timeout)) {
39081 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
39082- atomic_long_inc(&work->port->
39083+ atomic_long_inc_unchecked(&work->port->
39084 counter_group[CM_RECV_DUPLICATES].
39085 counter[CM_MRA_COUNTER]);
39086 goto out;
39087@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
39088 break;
39089 case IB_CM_MRA_REQ_RCVD:
39090 case IB_CM_MRA_REP_RCVD:
39091- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39092+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39093 counter[CM_MRA_COUNTER]);
39094 /* fall through */
39095 default:
39096@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
39097 case IB_CM_LAP_IDLE:
39098 break;
39099 case IB_CM_MRA_LAP_SENT:
39100- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39101+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39102 counter[CM_LAP_COUNTER]);
39103 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
39104 goto unlock;
39105@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
39106 cm_free_msg(msg);
39107 goto deref;
39108 case IB_CM_LAP_RCVD:
39109- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39110+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39111 counter[CM_LAP_COUNTER]);
39112 goto unlock;
39113 default:
39114@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
39115 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
39116 if (cur_cm_id_priv) {
39117 spin_unlock_irq(&cm.lock);
39118- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39119+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39120 counter[CM_SIDR_REQ_COUNTER]);
39121 goto out; /* Duplicate message. */
39122 }
39123@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
39124 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
39125 msg->retries = 1;
39126
39127- atomic_long_add(1 + msg->retries,
39128+ atomic_long_add_unchecked(1 + msg->retries,
39129 &port->counter_group[CM_XMIT].counter[attr_index]);
39130 if (msg->retries)
39131- atomic_long_add(msg->retries,
39132+ atomic_long_add_unchecked(msg->retries,
39133 &port->counter_group[CM_XMIT_RETRIES].
39134 counter[attr_index]);
39135
39136@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
39137 }
39138
39139 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
39140- atomic_long_inc(&port->counter_group[CM_RECV].
39141+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
39142 counter[attr_id - CM_ATTR_ID_OFFSET]);
39143
39144 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
39145@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
39146 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
39147
39148 return sprintf(buf, "%ld\n",
39149- atomic_long_read(&group->counter[cm_attr->index]));
39150+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
39151 }
39152
39153 static const struct sysfs_ops cm_counter_ops = {
39154diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
39155index 9f5ad7c..588cd84 100644
39156--- a/drivers/infiniband/core/fmr_pool.c
39157+++ b/drivers/infiniband/core/fmr_pool.c
39158@@ -98,8 +98,8 @@ struct ib_fmr_pool {
39159
39160 struct task_struct *thread;
39161
39162- atomic_t req_ser;
39163- atomic_t flush_ser;
39164+ atomic_unchecked_t req_ser;
39165+ atomic_unchecked_t flush_ser;
39166
39167 wait_queue_head_t force_wait;
39168 };
39169@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
39170 struct ib_fmr_pool *pool = pool_ptr;
39171
39172 do {
39173- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
39174+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
39175 ib_fmr_batch_release(pool);
39176
39177- atomic_inc(&pool->flush_ser);
39178+ atomic_inc_unchecked(&pool->flush_ser);
39179 wake_up_interruptible(&pool->force_wait);
39180
39181 if (pool->flush_function)
39182@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
39183 }
39184
39185 set_current_state(TASK_INTERRUPTIBLE);
39186- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
39187+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
39188 !kthread_should_stop())
39189 schedule();
39190 __set_current_state(TASK_RUNNING);
39191@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
39192 pool->dirty_watermark = params->dirty_watermark;
39193 pool->dirty_len = 0;
39194 spin_lock_init(&pool->pool_lock);
39195- atomic_set(&pool->req_ser, 0);
39196- atomic_set(&pool->flush_ser, 0);
39197+ atomic_set_unchecked(&pool->req_ser, 0);
39198+ atomic_set_unchecked(&pool->flush_ser, 0);
39199 init_waitqueue_head(&pool->force_wait);
39200
39201 pool->thread = kthread_run(ib_fmr_cleanup_thread,
39202@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
39203 }
39204 spin_unlock_irq(&pool->pool_lock);
39205
39206- serial = atomic_inc_return(&pool->req_ser);
39207+ serial = atomic_inc_return_unchecked(&pool->req_ser);
39208 wake_up_process(pool->thread);
39209
39210 if (wait_event_interruptible(pool->force_wait,
39211- atomic_read(&pool->flush_ser) - serial >= 0))
39212+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
39213 return -EINTR;
39214
39215 return 0;
39216@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
39217 } else {
39218 list_add_tail(&fmr->list, &pool->dirty_list);
39219 if (++pool->dirty_len >= pool->dirty_watermark) {
39220- atomic_inc(&pool->req_ser);
39221+ atomic_inc_unchecked(&pool->req_ser);
39222 wake_up_process(pool->thread);
39223 }
39224 }
39225diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
39226index 4cb8eb2..146bf60 100644
39227--- a/drivers/infiniband/hw/cxgb4/mem.c
39228+++ b/drivers/infiniband/hw/cxgb4/mem.c
39229@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
39230 int err;
39231 struct fw_ri_tpte tpt;
39232 u32 stag_idx;
39233- static atomic_t key;
39234+ static atomic_unchecked_t key;
39235
39236 if (c4iw_fatal_error(rdev))
39237 return -EIO;
39238@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
39239 if (rdev->stats.stag.cur > rdev->stats.stag.max)
39240 rdev->stats.stag.max = rdev->stats.stag.cur;
39241 mutex_unlock(&rdev->stats.lock);
39242- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
39243+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
39244 }
39245 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
39246 __func__, stag_state, type, pdid, stag_idx);
39247diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
39248index 79b3dbc..96e5fcc 100644
39249--- a/drivers/infiniband/hw/ipath/ipath_rc.c
39250+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
39251@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
39252 struct ib_atomic_eth *ateth;
39253 struct ipath_ack_entry *e;
39254 u64 vaddr;
39255- atomic64_t *maddr;
39256+ atomic64_unchecked_t *maddr;
39257 u64 sdata;
39258 u32 rkey;
39259 u8 next;
39260@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
39261 IB_ACCESS_REMOTE_ATOMIC)))
39262 goto nack_acc_unlck;
39263 /* Perform atomic OP and save result. */
39264- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
39265+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
39266 sdata = be64_to_cpu(ateth->swap_data);
39267 e = &qp->s_ack_queue[qp->r_head_ack_queue];
39268 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
39269- (u64) atomic64_add_return(sdata, maddr) - sdata :
39270+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
39271 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
39272 be64_to_cpu(ateth->compare_data),
39273 sdata);
39274diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
39275index 1f95bba..9530f87 100644
39276--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
39277+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
39278@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
39279 unsigned long flags;
39280 struct ib_wc wc;
39281 u64 sdata;
39282- atomic64_t *maddr;
39283+ atomic64_unchecked_t *maddr;
39284 enum ib_wc_status send_status;
39285
39286 /*
39287@@ -382,11 +382,11 @@ again:
39288 IB_ACCESS_REMOTE_ATOMIC)))
39289 goto acc_err;
39290 /* Perform atomic OP and save result. */
39291- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
39292+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
39293 sdata = wqe->wr.wr.atomic.compare_add;
39294 *(u64 *) sqp->s_sge.sge.vaddr =
39295 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
39296- (u64) atomic64_add_return(sdata, maddr) - sdata :
39297+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
39298 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
39299 sdata, wqe->wr.wr.atomic.swap);
39300 goto send_comp;
39301diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
39302index 9d3e5c1..d9afe4a 100644
39303--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
39304+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
39305@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
39306 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
39307 }
39308
39309-int mthca_QUERY_FW(struct mthca_dev *dev)
39310+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
39311 {
39312 struct mthca_mailbox *mailbox;
39313 u32 *outbox;
39314diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
39315index ed9a989..e0c5871 100644
39316--- a/drivers/infiniband/hw/mthca/mthca_mr.c
39317+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
39318@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
39319 return key;
39320 }
39321
39322-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
39323+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
39324 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
39325 {
39326 struct mthca_mailbox *mailbox;
39327diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
39328index 4291410..d2ab1fb 100644
39329--- a/drivers/infiniband/hw/nes/nes.c
39330+++ b/drivers/infiniband/hw/nes/nes.c
39331@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
39332 LIST_HEAD(nes_adapter_list);
39333 static LIST_HEAD(nes_dev_list);
39334
39335-atomic_t qps_destroyed;
39336+atomic_unchecked_t qps_destroyed;
39337
39338 static unsigned int ee_flsh_adapter;
39339 static unsigned int sysfs_nonidx_addr;
39340@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
39341 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
39342 struct nes_adapter *nesadapter = nesdev->nesadapter;
39343
39344- atomic_inc(&qps_destroyed);
39345+ atomic_inc_unchecked(&qps_destroyed);
39346
39347 /* Free the control structures */
39348
39349diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
39350index 33cc589..3bd6538 100644
39351--- a/drivers/infiniband/hw/nes/nes.h
39352+++ b/drivers/infiniband/hw/nes/nes.h
39353@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
39354 extern unsigned int wqm_quanta;
39355 extern struct list_head nes_adapter_list;
39356
39357-extern atomic_t cm_connects;
39358-extern atomic_t cm_accepts;
39359-extern atomic_t cm_disconnects;
39360-extern atomic_t cm_closes;
39361-extern atomic_t cm_connecteds;
39362-extern atomic_t cm_connect_reqs;
39363-extern atomic_t cm_rejects;
39364-extern atomic_t mod_qp_timouts;
39365-extern atomic_t qps_created;
39366-extern atomic_t qps_destroyed;
39367-extern atomic_t sw_qps_destroyed;
39368+extern atomic_unchecked_t cm_connects;
39369+extern atomic_unchecked_t cm_accepts;
39370+extern atomic_unchecked_t cm_disconnects;
39371+extern atomic_unchecked_t cm_closes;
39372+extern atomic_unchecked_t cm_connecteds;
39373+extern atomic_unchecked_t cm_connect_reqs;
39374+extern atomic_unchecked_t cm_rejects;
39375+extern atomic_unchecked_t mod_qp_timouts;
39376+extern atomic_unchecked_t qps_created;
39377+extern atomic_unchecked_t qps_destroyed;
39378+extern atomic_unchecked_t sw_qps_destroyed;
39379 extern u32 mh_detected;
39380 extern u32 mh_pauses_sent;
39381 extern u32 cm_packets_sent;
39382@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
39383 extern u32 cm_packets_received;
39384 extern u32 cm_packets_dropped;
39385 extern u32 cm_packets_retrans;
39386-extern atomic_t cm_listens_created;
39387-extern atomic_t cm_listens_destroyed;
39388+extern atomic_unchecked_t cm_listens_created;
39389+extern atomic_unchecked_t cm_listens_destroyed;
39390 extern u32 cm_backlog_drops;
39391-extern atomic_t cm_loopbacks;
39392-extern atomic_t cm_nodes_created;
39393-extern atomic_t cm_nodes_destroyed;
39394-extern atomic_t cm_accel_dropped_pkts;
39395-extern atomic_t cm_resets_recvd;
39396-extern atomic_t pau_qps_created;
39397-extern atomic_t pau_qps_destroyed;
39398+extern atomic_unchecked_t cm_loopbacks;
39399+extern atomic_unchecked_t cm_nodes_created;
39400+extern atomic_unchecked_t cm_nodes_destroyed;
39401+extern atomic_unchecked_t cm_accel_dropped_pkts;
39402+extern atomic_unchecked_t cm_resets_recvd;
39403+extern atomic_unchecked_t pau_qps_created;
39404+extern atomic_unchecked_t pau_qps_destroyed;
39405
39406 extern u32 int_mod_timer_init;
39407 extern u32 int_mod_cq_depth_256;
39408diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
39409index 24b9f1a..00fd004 100644
39410--- a/drivers/infiniband/hw/nes/nes_cm.c
39411+++ b/drivers/infiniband/hw/nes/nes_cm.c
39412@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
39413 u32 cm_packets_retrans;
39414 u32 cm_packets_created;
39415 u32 cm_packets_received;
39416-atomic_t cm_listens_created;
39417-atomic_t cm_listens_destroyed;
39418+atomic_unchecked_t cm_listens_created;
39419+atomic_unchecked_t cm_listens_destroyed;
39420 u32 cm_backlog_drops;
39421-atomic_t cm_loopbacks;
39422-atomic_t cm_nodes_created;
39423-atomic_t cm_nodes_destroyed;
39424-atomic_t cm_accel_dropped_pkts;
39425-atomic_t cm_resets_recvd;
39426+atomic_unchecked_t cm_loopbacks;
39427+atomic_unchecked_t cm_nodes_created;
39428+atomic_unchecked_t cm_nodes_destroyed;
39429+atomic_unchecked_t cm_accel_dropped_pkts;
39430+atomic_unchecked_t cm_resets_recvd;
39431
39432 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
39433 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
39434@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
39435
39436 static struct nes_cm_core *g_cm_core;
39437
39438-atomic_t cm_connects;
39439-atomic_t cm_accepts;
39440-atomic_t cm_disconnects;
39441-atomic_t cm_closes;
39442-atomic_t cm_connecteds;
39443-atomic_t cm_connect_reqs;
39444-atomic_t cm_rejects;
39445+atomic_unchecked_t cm_connects;
39446+atomic_unchecked_t cm_accepts;
39447+atomic_unchecked_t cm_disconnects;
39448+atomic_unchecked_t cm_closes;
39449+atomic_unchecked_t cm_connecteds;
39450+atomic_unchecked_t cm_connect_reqs;
39451+atomic_unchecked_t cm_rejects;
39452
39453 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
39454 {
39455@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
39456 kfree(listener);
39457 listener = NULL;
39458 ret = 0;
39459- atomic_inc(&cm_listens_destroyed);
39460+ atomic_inc_unchecked(&cm_listens_destroyed);
39461 } else {
39462 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
39463 }
39464@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
39465 cm_node->rem_mac);
39466
39467 add_hte_node(cm_core, cm_node);
39468- atomic_inc(&cm_nodes_created);
39469+ atomic_inc_unchecked(&cm_nodes_created);
39470
39471 return cm_node;
39472 }
39473@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
39474 }
39475
39476 atomic_dec(&cm_core->node_cnt);
39477- atomic_inc(&cm_nodes_destroyed);
39478+ atomic_inc_unchecked(&cm_nodes_destroyed);
39479 nesqp = cm_node->nesqp;
39480 if (nesqp) {
39481 nesqp->cm_node = NULL;
39482@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
39483
39484 static void drop_packet(struct sk_buff *skb)
39485 {
39486- atomic_inc(&cm_accel_dropped_pkts);
39487+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
39488 dev_kfree_skb_any(skb);
39489 }
39490
39491@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
39492 {
39493
39494 int reset = 0; /* whether to send reset in case of err.. */
39495- atomic_inc(&cm_resets_recvd);
39496+ atomic_inc_unchecked(&cm_resets_recvd);
39497 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
39498 " refcnt=%d\n", cm_node, cm_node->state,
39499 atomic_read(&cm_node->ref_count));
39500@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
39501 rem_ref_cm_node(cm_node->cm_core, cm_node);
39502 return NULL;
39503 }
39504- atomic_inc(&cm_loopbacks);
39505+ atomic_inc_unchecked(&cm_loopbacks);
39506 loopbackremotenode->loopbackpartner = cm_node;
39507 loopbackremotenode->tcp_cntxt.rcv_wscale =
39508 NES_CM_DEFAULT_RCV_WND_SCALE;
39509@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
39510 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
39511 else {
39512 rem_ref_cm_node(cm_core, cm_node);
39513- atomic_inc(&cm_accel_dropped_pkts);
39514+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
39515 dev_kfree_skb_any(skb);
39516 }
39517 break;
39518@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
39519
39520 if ((cm_id) && (cm_id->event_handler)) {
39521 if (issue_disconn) {
39522- atomic_inc(&cm_disconnects);
39523+ atomic_inc_unchecked(&cm_disconnects);
39524 cm_event.event = IW_CM_EVENT_DISCONNECT;
39525 cm_event.status = disconn_status;
39526 cm_event.local_addr = cm_id->local_addr;
39527@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
39528 }
39529
39530 if (issue_close) {
39531- atomic_inc(&cm_closes);
39532+ atomic_inc_unchecked(&cm_closes);
39533 nes_disconnect(nesqp, 1);
39534
39535 cm_id->provider_data = nesqp;
39536@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
39537
39538 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
39539 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
39540- atomic_inc(&cm_accepts);
39541+ atomic_inc_unchecked(&cm_accepts);
39542
39543 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
39544 netdev_refcnt_read(nesvnic->netdev));
39545@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
39546 struct nes_cm_core *cm_core;
39547 u8 *start_buff;
39548
39549- atomic_inc(&cm_rejects);
39550+ atomic_inc_unchecked(&cm_rejects);
39551 cm_node = (struct nes_cm_node *)cm_id->provider_data;
39552 loopback = cm_node->loopbackpartner;
39553 cm_core = cm_node->cm_core;
39554@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
39555 ntohl(cm_id->local_addr.sin_addr.s_addr),
39556 ntohs(cm_id->local_addr.sin_port));
39557
39558- atomic_inc(&cm_connects);
39559+ atomic_inc_unchecked(&cm_connects);
39560 nesqp->active_conn = 1;
39561
39562 /* cache the cm_id in the qp */
39563@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
39564 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
39565 return err;
39566 }
39567- atomic_inc(&cm_listens_created);
39568+ atomic_inc_unchecked(&cm_listens_created);
39569 }
39570
39571 cm_id->add_ref(cm_id);
39572@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
39573
39574 if (nesqp->destroyed)
39575 return;
39576- atomic_inc(&cm_connecteds);
39577+ atomic_inc_unchecked(&cm_connecteds);
39578 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
39579 " local port 0x%04X. jiffies = %lu.\n",
39580 nesqp->hwqp.qp_id,
39581@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
39582
39583 cm_id->add_ref(cm_id);
39584 ret = cm_id->event_handler(cm_id, &cm_event);
39585- atomic_inc(&cm_closes);
39586+ atomic_inc_unchecked(&cm_closes);
39587 cm_event.event = IW_CM_EVENT_CLOSE;
39588 cm_event.status = 0;
39589 cm_event.provider_data = cm_id->provider_data;
39590@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
39591 return;
39592 cm_id = cm_node->cm_id;
39593
39594- atomic_inc(&cm_connect_reqs);
39595+ atomic_inc_unchecked(&cm_connect_reqs);
39596 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
39597 cm_node, cm_id, jiffies);
39598
39599@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
39600 return;
39601 cm_id = cm_node->cm_id;
39602
39603- atomic_inc(&cm_connect_reqs);
39604+ atomic_inc_unchecked(&cm_connect_reqs);
39605 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
39606 cm_node, cm_id, jiffies);
39607
39608diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
39609index 4166452..fc952c3 100644
39610--- a/drivers/infiniband/hw/nes/nes_mgt.c
39611+++ b/drivers/infiniband/hw/nes/nes_mgt.c
39612@@ -40,8 +40,8 @@
39613 #include "nes.h"
39614 #include "nes_mgt.h"
39615
39616-atomic_t pau_qps_created;
39617-atomic_t pau_qps_destroyed;
39618+atomic_unchecked_t pau_qps_created;
39619+atomic_unchecked_t pau_qps_destroyed;
39620
39621 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
39622 {
39623@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
39624 {
39625 struct sk_buff *skb;
39626 unsigned long flags;
39627- atomic_inc(&pau_qps_destroyed);
39628+ atomic_inc_unchecked(&pau_qps_destroyed);
39629
39630 /* Free packets that have not yet been forwarded */
39631 /* Lock is acquired by skb_dequeue when removing the skb */
39632@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
39633 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
39634 skb_queue_head_init(&nesqp->pau_list);
39635 spin_lock_init(&nesqp->pau_lock);
39636- atomic_inc(&pau_qps_created);
39637+ atomic_inc_unchecked(&pau_qps_created);
39638 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
39639 }
39640
39641diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
39642index 49eb511..a774366 100644
39643--- a/drivers/infiniband/hw/nes/nes_nic.c
39644+++ b/drivers/infiniband/hw/nes/nes_nic.c
39645@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
39646 target_stat_values[++index] = mh_detected;
39647 target_stat_values[++index] = mh_pauses_sent;
39648 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
39649- target_stat_values[++index] = atomic_read(&cm_connects);
39650- target_stat_values[++index] = atomic_read(&cm_accepts);
39651- target_stat_values[++index] = atomic_read(&cm_disconnects);
39652- target_stat_values[++index] = atomic_read(&cm_connecteds);
39653- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
39654- target_stat_values[++index] = atomic_read(&cm_rejects);
39655- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
39656- target_stat_values[++index] = atomic_read(&qps_created);
39657- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
39658- target_stat_values[++index] = atomic_read(&qps_destroyed);
39659- target_stat_values[++index] = atomic_read(&cm_closes);
39660+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
39661+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
39662+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
39663+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
39664+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
39665+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
39666+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
39667+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
39668+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
39669+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
39670+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
39671 target_stat_values[++index] = cm_packets_sent;
39672 target_stat_values[++index] = cm_packets_bounced;
39673 target_stat_values[++index] = cm_packets_created;
39674 target_stat_values[++index] = cm_packets_received;
39675 target_stat_values[++index] = cm_packets_dropped;
39676 target_stat_values[++index] = cm_packets_retrans;
39677- target_stat_values[++index] = atomic_read(&cm_listens_created);
39678- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
39679+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
39680+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
39681 target_stat_values[++index] = cm_backlog_drops;
39682- target_stat_values[++index] = atomic_read(&cm_loopbacks);
39683- target_stat_values[++index] = atomic_read(&cm_nodes_created);
39684- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
39685- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
39686- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
39687+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
39688+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
39689+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
39690+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
39691+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
39692 target_stat_values[++index] = nesadapter->free_4kpbl;
39693 target_stat_values[++index] = nesadapter->free_256pbl;
39694 target_stat_values[++index] = int_mod_timer_init;
39695 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
39696 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
39697 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
39698- target_stat_values[++index] = atomic_read(&pau_qps_created);
39699- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
39700+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
39701+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
39702 }
39703
39704 /**
39705diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
39706index 8f67fe2..8960859 100644
39707--- a/drivers/infiniband/hw/nes/nes_verbs.c
39708+++ b/drivers/infiniband/hw/nes/nes_verbs.c
39709@@ -46,9 +46,9 @@
39710
39711 #include <rdma/ib_umem.h>
39712
39713-atomic_t mod_qp_timouts;
39714-atomic_t qps_created;
39715-atomic_t sw_qps_destroyed;
39716+atomic_unchecked_t mod_qp_timouts;
39717+atomic_unchecked_t qps_created;
39718+atomic_unchecked_t sw_qps_destroyed;
39719
39720 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
39721
39722@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
39723 if (init_attr->create_flags)
39724 return ERR_PTR(-EINVAL);
39725
39726- atomic_inc(&qps_created);
39727+ atomic_inc_unchecked(&qps_created);
39728 switch (init_attr->qp_type) {
39729 case IB_QPT_RC:
39730 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
39731@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
39732 struct iw_cm_event cm_event;
39733 int ret = 0;
39734
39735- atomic_inc(&sw_qps_destroyed);
39736+ atomic_inc_unchecked(&sw_qps_destroyed);
39737 nesqp->destroyed = 1;
39738
39739 /* Blow away the connection if it exists. */
39740diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
39741index 4d11575..3e890e5 100644
39742--- a/drivers/infiniband/hw/qib/qib.h
39743+++ b/drivers/infiniband/hw/qib/qib.h
39744@@ -51,6 +51,7 @@
39745 #include <linux/completion.h>
39746 #include <linux/kref.h>
39747 #include <linux/sched.h>
39748+#include <linux/slab.h>
39749
39750 #include "qib_common.h"
39751 #include "qib_verbs.h"
39752diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
39753index da739d9..da1c7f4 100644
39754--- a/drivers/input/gameport/gameport.c
39755+++ b/drivers/input/gameport/gameport.c
39756@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
39757 */
39758 static void gameport_init_port(struct gameport *gameport)
39759 {
39760- static atomic_t gameport_no = ATOMIC_INIT(0);
39761+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
39762
39763 __module_get(THIS_MODULE);
39764
39765 mutex_init(&gameport->drv_mutex);
39766 device_initialize(&gameport->dev);
39767 dev_set_name(&gameport->dev, "gameport%lu",
39768- (unsigned long)atomic_inc_return(&gameport_no) - 1);
39769+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
39770 gameport->dev.bus = &gameport_bus;
39771 gameport->dev.release = gameport_release_port;
39772 if (gameport->parent)
39773diff --git a/drivers/input/input.c b/drivers/input/input.c
39774index c044699..174d71a 100644
39775--- a/drivers/input/input.c
39776+++ b/drivers/input/input.c
39777@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
39778 */
39779 int input_register_device(struct input_dev *dev)
39780 {
39781- static atomic_t input_no = ATOMIC_INIT(0);
39782+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
39783 struct input_devres *devres = NULL;
39784 struct input_handler *handler;
39785 unsigned int packet_size;
39786@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
39787 dev->setkeycode = input_default_setkeycode;
39788
39789 dev_set_name(&dev->dev, "input%ld",
39790- (unsigned long) atomic_inc_return(&input_no) - 1);
39791+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
39792
39793 error = device_add(&dev->dev);
39794 if (error)
39795diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
39796index 04c69af..5f92d00 100644
39797--- a/drivers/input/joystick/sidewinder.c
39798+++ b/drivers/input/joystick/sidewinder.c
39799@@ -30,6 +30,7 @@
39800 #include <linux/kernel.h>
39801 #include <linux/module.h>
39802 #include <linux/slab.h>
39803+#include <linux/sched.h>
39804 #include <linux/init.h>
39805 #include <linux/input.h>
39806 #include <linux/gameport.h>
39807diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
39808index fa061d4..4a6957c 100644
39809--- a/drivers/input/joystick/xpad.c
39810+++ b/drivers/input/joystick/xpad.c
39811@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
39812
39813 static int xpad_led_probe(struct usb_xpad *xpad)
39814 {
39815- static atomic_t led_seq = ATOMIC_INIT(0);
39816+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
39817 long led_no;
39818 struct xpad_led *led;
39819 struct led_classdev *led_cdev;
39820@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
39821 if (!led)
39822 return -ENOMEM;
39823
39824- led_no = (long)atomic_inc_return(&led_seq) - 1;
39825+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
39826
39827 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
39828 led->xpad = xpad;
39829diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
39830index 2f0b39d..7370f13 100644
39831--- a/drivers/input/mouse/psmouse.h
39832+++ b/drivers/input/mouse/psmouse.h
39833@@ -116,7 +116,7 @@ struct psmouse_attribute {
39834 ssize_t (*set)(struct psmouse *psmouse, void *data,
39835 const char *buf, size_t count);
39836 bool protect;
39837-};
39838+} __do_const;
39839 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
39840
39841 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
39842diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
39843index 4c842c3..590b0bf 100644
39844--- a/drivers/input/mousedev.c
39845+++ b/drivers/input/mousedev.c
39846@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
39847
39848 spin_unlock_irq(&client->packet_lock);
39849
39850- if (copy_to_user(buffer, data, count))
39851+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
39852 return -EFAULT;
39853
39854 return count;
39855diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
39856index 25fc597..558bf3b3 100644
39857--- a/drivers/input/serio/serio.c
39858+++ b/drivers/input/serio/serio.c
39859@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
39860 */
39861 static void serio_init_port(struct serio *serio)
39862 {
39863- static atomic_t serio_no = ATOMIC_INIT(0);
39864+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
39865
39866 __module_get(THIS_MODULE);
39867
39868@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
39869 mutex_init(&serio->drv_mutex);
39870 device_initialize(&serio->dev);
39871 dev_set_name(&serio->dev, "serio%ld",
39872- (long)atomic_inc_return(&serio_no) - 1);
39873+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
39874 serio->dev.bus = &serio_bus;
39875 serio->dev.release = serio_release_port;
39876 serio->dev.groups = serio_device_attr_groups;
39877diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
39878index d8f98b1..f62a640 100644
39879--- a/drivers/iommu/iommu.c
39880+++ b/drivers/iommu/iommu.c
39881@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
39882 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
39883 {
39884 bus_register_notifier(bus, &iommu_bus_nb);
39885- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
39886+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
39887 }
39888
39889 /**
39890diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
39891index dcfea4e..f4226b2 100644
39892--- a/drivers/iommu/irq_remapping.c
39893+++ b/drivers/iommu/irq_remapping.c
39894@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
39895 void panic_if_irq_remap(const char *msg)
39896 {
39897 if (irq_remapping_enabled)
39898- panic(msg);
39899+ panic("%s", msg);
39900 }
39901
39902 static void ir_ack_apic_edge(struct irq_data *data)
39903@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
39904
39905 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
39906 {
39907- chip->irq_print_chip = ir_print_prefix;
39908- chip->irq_ack = ir_ack_apic_edge;
39909- chip->irq_eoi = ir_ack_apic_level;
39910- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
39911+ pax_open_kernel();
39912+ *(void **)&chip->irq_print_chip = ir_print_prefix;
39913+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
39914+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
39915+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
39916+ pax_close_kernel();
39917 }
39918
39919 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
39920diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
39921index 19ceaa6..3625818 100644
39922--- a/drivers/irqchip/irq-gic.c
39923+++ b/drivers/irqchip/irq-gic.c
39924@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
39925 * Supported arch specific GIC irq extension.
39926 * Default make them NULL.
39927 */
39928-struct irq_chip gic_arch_extn = {
39929+irq_chip_no_const gic_arch_extn = {
39930 .irq_eoi = NULL,
39931 .irq_mask = NULL,
39932 .irq_unmask = NULL,
39933@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
39934 chained_irq_exit(chip, desc);
39935 }
39936
39937-static struct irq_chip gic_chip = {
39938+static irq_chip_no_const gic_chip __read_only = {
39939 .name = "GIC",
39940 .irq_mask = gic_mask_irq,
39941 .irq_unmask = gic_unmask_irq,
39942diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
39943index ac6f72b..81150f2 100644
39944--- a/drivers/isdn/capi/capi.c
39945+++ b/drivers/isdn/capi/capi.c
39946@@ -81,8 +81,8 @@ struct capiminor {
39947
39948 struct capi20_appl *ap;
39949 u32 ncci;
39950- atomic_t datahandle;
39951- atomic_t msgid;
39952+ atomic_unchecked_t datahandle;
39953+ atomic_unchecked_t msgid;
39954
39955 struct tty_port port;
39956 int ttyinstop;
39957@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
39958 capimsg_setu16(s, 2, mp->ap->applid);
39959 capimsg_setu8 (s, 4, CAPI_DATA_B3);
39960 capimsg_setu8 (s, 5, CAPI_RESP);
39961- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
39962+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
39963 capimsg_setu32(s, 8, mp->ncci);
39964 capimsg_setu16(s, 12, datahandle);
39965 }
39966@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
39967 mp->outbytes -= len;
39968 spin_unlock_bh(&mp->outlock);
39969
39970- datahandle = atomic_inc_return(&mp->datahandle);
39971+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
39972 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
39973 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
39974 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
39975 capimsg_setu16(skb->data, 2, mp->ap->applid);
39976 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
39977 capimsg_setu8 (skb->data, 5, CAPI_REQ);
39978- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
39979+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
39980 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
39981 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
39982 capimsg_setu16(skb->data, 16, len); /* Data length */
39983diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
39984index 600c79b..3752bab 100644
39985--- a/drivers/isdn/gigaset/interface.c
39986+++ b/drivers/isdn/gigaset/interface.c
39987@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
39988 }
39989 tty->driver_data = cs;
39990
39991- ++cs->port.count;
39992+ atomic_inc(&cs->port.count);
39993
39994- if (cs->port.count == 1) {
39995+ if (atomic_read(&cs->port.count) == 1) {
39996 tty_port_tty_set(&cs->port, tty);
39997 cs->port.low_latency = 1;
39998 }
39999@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
40000
40001 if (!cs->connected)
40002 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
40003- else if (!cs->port.count)
40004+ else if (!atomic_read(&cs->port.count))
40005 dev_warn(cs->dev, "%s: device not opened\n", __func__);
40006- else if (!--cs->port.count)
40007+ else if (!atomic_dec_return(&cs->port.count))
40008 tty_port_tty_set(&cs->port, NULL);
40009
40010 mutex_unlock(&cs->mutex);
40011diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
40012index 4d9b195..455075c 100644
40013--- a/drivers/isdn/hardware/avm/b1.c
40014+++ b/drivers/isdn/hardware/avm/b1.c
40015@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
40016 }
40017 if (left) {
40018 if (t4file->user) {
40019- if (copy_from_user(buf, dp, left))
40020+ if (left > sizeof buf || copy_from_user(buf, dp, left))
40021 return -EFAULT;
40022 } else {
40023 memcpy(buf, dp, left);
40024@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
40025 }
40026 if (left) {
40027 if (config->user) {
40028- if (copy_from_user(buf, dp, left))
40029+ if (left > sizeof buf || copy_from_user(buf, dp, left))
40030 return -EFAULT;
40031 } else {
40032 memcpy(buf, dp, left);
40033diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
40034index 3c5f249..5fac4d0 100644
40035--- a/drivers/isdn/i4l/isdn_tty.c
40036+++ b/drivers/isdn/i4l/isdn_tty.c
40037@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
40038
40039 #ifdef ISDN_DEBUG_MODEM_OPEN
40040 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
40041- port->count);
40042+ atomic_read(&port->count));
40043 #endif
40044- port->count++;
40045+ atomic_inc(&port->count);
40046 port->tty = tty;
40047 /*
40048 * Start up serial port
40049@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
40050 #endif
40051 return;
40052 }
40053- if ((tty->count == 1) && (port->count != 1)) {
40054+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
40055 /*
40056 * Uh, oh. tty->count is 1, which means that the tty
40057 * structure will be freed. Info->count should always
40058@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
40059 * serial port won't be shutdown.
40060 */
40061 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
40062- "info->count is %d\n", port->count);
40063- port->count = 1;
40064+ "info->count is %d\n", atomic_read(&port->count));
40065+ atomic_set(&port->count, 1);
40066 }
40067- if (--port->count < 0) {
40068+ if (atomic_dec_return(&port->count) < 0) {
40069 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
40070- info->line, port->count);
40071- port->count = 0;
40072+ info->line, atomic_read(&port->count));
40073+ atomic_set(&port->count, 0);
40074 }
40075- if (port->count) {
40076+ if (atomic_read(&port->count)) {
40077 #ifdef ISDN_DEBUG_MODEM_OPEN
40078 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
40079 #endif
40080@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
40081 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
40082 return;
40083 isdn_tty_shutdown(info);
40084- port->count = 0;
40085+ atomic_set(&port->count, 0);
40086 port->flags &= ~ASYNC_NORMAL_ACTIVE;
40087 port->tty = NULL;
40088 wake_up_interruptible(&port->open_wait);
40089@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
40090 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
40091 modem_info *info = &dev->mdm.info[i];
40092
40093- if (info->port.count == 0)
40094+ if (atomic_read(&info->port.count) == 0)
40095 continue;
40096 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
40097 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
40098diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
40099index e74df7c..03a03ba 100644
40100--- a/drivers/isdn/icn/icn.c
40101+++ b/drivers/isdn/icn/icn.c
40102@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
40103 if (count > len)
40104 count = len;
40105 if (user) {
40106- if (copy_from_user(msg, buf, count))
40107+ if (count > sizeof msg || copy_from_user(msg, buf, count))
40108 return -EFAULT;
40109 } else
40110 memcpy(msg, buf, count);
40111diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
40112index 6a8405d..0bd1c7e 100644
40113--- a/drivers/leds/leds-clevo-mail.c
40114+++ b/drivers/leds/leds-clevo-mail.c
40115@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
40116 * detected as working, but in reality it is not) as low as
40117 * possible.
40118 */
40119-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
40120+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
40121 {
40122 .callback = clevo_mail_led_dmi_callback,
40123 .ident = "Clevo D410J",
40124diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
40125index 64e204e..c6bf189 100644
40126--- a/drivers/leds/leds-ss4200.c
40127+++ b/drivers/leds/leds-ss4200.c
40128@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
40129 * detected as working, but in reality it is not) as low as
40130 * possible.
40131 */
40132-static struct dmi_system_id __initdata nas_led_whitelist[] = {
40133+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
40134 {
40135 .callback = ss4200_led_dmi_callback,
40136 .ident = "Intel SS4200-E",
40137diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
40138index 0bf1e4e..b4bf44e 100644
40139--- a/drivers/lguest/core.c
40140+++ b/drivers/lguest/core.c
40141@@ -97,9 +97,17 @@ static __init int map_switcher(void)
40142 * The end address needs +1 because __get_vm_area allocates an
40143 * extra guard page, so we need space for that.
40144 */
40145+
40146+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
40147+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
40148+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
40149+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
40150+#else
40151 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
40152 VM_ALLOC, switcher_addr, switcher_addr
40153 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
40154+#endif
40155+
40156 if (!switcher_vma) {
40157 err = -ENOMEM;
40158 printk("lguest: could not map switcher pages high\n");
40159@@ -124,7 +132,7 @@ static __init int map_switcher(void)
40160 * Now the Switcher is mapped at the right address, we can't fail!
40161 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
40162 */
40163- memcpy(switcher_vma->addr, start_switcher_text,
40164+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
40165 end_switcher_text - start_switcher_text);
40166
40167 printk(KERN_INFO "lguest: mapped switcher at %p\n",
40168diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
40169index 5b9ac32..2ef4f26 100644
40170--- a/drivers/lguest/page_tables.c
40171+++ b/drivers/lguest/page_tables.c
40172@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
40173 /*:*/
40174
40175 #ifdef CONFIG_X86_PAE
40176-static void release_pmd(pmd_t *spmd)
40177+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
40178 {
40179 /* If the entry's not present, there's nothing to release. */
40180 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
40181diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
40182index f0a3347..f6608b2 100644
40183--- a/drivers/lguest/x86/core.c
40184+++ b/drivers/lguest/x86/core.c
40185@@ -59,7 +59,7 @@ static struct {
40186 /* Offset from where switcher.S was compiled to where we've copied it */
40187 static unsigned long switcher_offset(void)
40188 {
40189- return switcher_addr - (unsigned long)start_switcher_text;
40190+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
40191 }
40192
40193 /* This cpu's struct lguest_pages (after the Switcher text page) */
40194@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
40195 * These copies are pretty cheap, so we do them unconditionally: */
40196 /* Save the current Host top-level page directory.
40197 */
40198+
40199+#ifdef CONFIG_PAX_PER_CPU_PGD
40200+ pages->state.host_cr3 = read_cr3();
40201+#else
40202 pages->state.host_cr3 = __pa(current->mm->pgd);
40203+#endif
40204+
40205 /*
40206 * Set up the Guest's page tables to see this CPU's pages (and no
40207 * other CPU's pages).
40208@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
40209 * compiled-in switcher code and the high-mapped copy we just made.
40210 */
40211 for (i = 0; i < IDT_ENTRIES; i++)
40212- default_idt_entries[i] += switcher_offset();
40213+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
40214
40215 /*
40216 * Set up the Switcher's per-cpu areas.
40217@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
40218 * it will be undisturbed when we switch. To change %cs and jump we
40219 * need this structure to feed to Intel's "lcall" instruction.
40220 */
40221- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
40222+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
40223 lguest_entry.segment = LGUEST_CS;
40224
40225 /*
40226diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
40227index 40634b0..4f5855e 100644
40228--- a/drivers/lguest/x86/switcher_32.S
40229+++ b/drivers/lguest/x86/switcher_32.S
40230@@ -87,6 +87,7 @@
40231 #include <asm/page.h>
40232 #include <asm/segment.h>
40233 #include <asm/lguest.h>
40234+#include <asm/processor-flags.h>
40235
40236 // We mark the start of the code to copy
40237 // It's placed in .text tho it's never run here
40238@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
40239 // Changes type when we load it: damn Intel!
40240 // For after we switch over our page tables
40241 // That entry will be read-only: we'd crash.
40242+
40243+#ifdef CONFIG_PAX_KERNEXEC
40244+ mov %cr0, %edx
40245+ xor $X86_CR0_WP, %edx
40246+ mov %edx, %cr0
40247+#endif
40248+
40249 movl $(GDT_ENTRY_TSS*8), %edx
40250 ltr %dx
40251
40252@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
40253 // Let's clear it again for our return.
40254 // The GDT descriptor of the Host
40255 // Points to the table after two "size" bytes
40256- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
40257+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
40258 // Clear "used" from type field (byte 5, bit 2)
40259- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
40260+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
40261+
40262+#ifdef CONFIG_PAX_KERNEXEC
40263+ mov %cr0, %eax
40264+ xor $X86_CR0_WP, %eax
40265+ mov %eax, %cr0
40266+#endif
40267
40268 // Once our page table's switched, the Guest is live!
40269 // The Host fades as we run this final step.
40270@@ -295,13 +309,12 @@ deliver_to_host:
40271 // I consulted gcc, and it gave
40272 // These instructions, which I gladly credit:
40273 leal (%edx,%ebx,8), %eax
40274- movzwl (%eax),%edx
40275- movl 4(%eax), %eax
40276- xorw %ax, %ax
40277- orl %eax, %edx
40278+ movl 4(%eax), %edx
40279+ movw (%eax), %dx
40280 // Now the address of the handler's in %edx
40281 // We call it now: its "iret" drops us home.
40282- jmp *%edx
40283+ ljmp $__KERNEL_CS, $1f
40284+1: jmp *%edx
40285
40286 // Every interrupt can come to us here
40287 // But we must truly tell each apart.
40288diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
40289index 0003992..854bbce 100644
40290--- a/drivers/md/bcache/closure.h
40291+++ b/drivers/md/bcache/closure.h
40292@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
40293 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
40294 struct workqueue_struct *wq)
40295 {
40296- BUG_ON(object_is_on_stack(cl));
40297+ BUG_ON(object_starts_on_stack(cl));
40298 closure_set_ip(cl);
40299 cl->fn = fn;
40300 cl->wq = wq;
40301diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
40302index 5a2c754..0fa55db 100644
40303--- a/drivers/md/bitmap.c
40304+++ b/drivers/md/bitmap.c
40305@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
40306 chunk_kb ? "KB" : "B");
40307 if (bitmap->storage.file) {
40308 seq_printf(seq, ", file: ");
40309- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
40310+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
40311 }
40312
40313 seq_printf(seq, "\n");
40314diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
40315index 81a79b7..87a0f73 100644
40316--- a/drivers/md/dm-ioctl.c
40317+++ b/drivers/md/dm-ioctl.c
40318@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
40319 cmd == DM_LIST_VERSIONS_CMD)
40320 return 0;
40321
40322- if ((cmd == DM_DEV_CREATE_CMD)) {
40323+ if (cmd == DM_DEV_CREATE_CMD) {
40324 if (!*param->name) {
40325 DMWARN("name not supplied when creating device");
40326 return -EINVAL;
40327diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
40328index 699b5be..eac0a15 100644
40329--- a/drivers/md/dm-raid1.c
40330+++ b/drivers/md/dm-raid1.c
40331@@ -40,7 +40,7 @@ enum dm_raid1_error {
40332
40333 struct mirror {
40334 struct mirror_set *ms;
40335- atomic_t error_count;
40336+ atomic_unchecked_t error_count;
40337 unsigned long error_type;
40338 struct dm_dev *dev;
40339 sector_t offset;
40340@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
40341 struct mirror *m;
40342
40343 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
40344- if (!atomic_read(&m->error_count))
40345+ if (!atomic_read_unchecked(&m->error_count))
40346 return m;
40347
40348 return NULL;
40349@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
40350 * simple way to tell if a device has encountered
40351 * errors.
40352 */
40353- atomic_inc(&m->error_count);
40354+ atomic_inc_unchecked(&m->error_count);
40355
40356 if (test_and_set_bit(error_type, &m->error_type))
40357 return;
40358@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
40359 struct mirror *m = get_default_mirror(ms);
40360
40361 do {
40362- if (likely(!atomic_read(&m->error_count)))
40363+ if (likely(!atomic_read_unchecked(&m->error_count)))
40364 return m;
40365
40366 if (m-- == ms->mirror)
40367@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
40368 {
40369 struct mirror *default_mirror = get_default_mirror(m->ms);
40370
40371- return !atomic_read(&default_mirror->error_count);
40372+ return !atomic_read_unchecked(&default_mirror->error_count);
40373 }
40374
40375 static int mirror_available(struct mirror_set *ms, struct bio *bio)
40376@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
40377 */
40378 if (likely(region_in_sync(ms, region, 1)))
40379 m = choose_mirror(ms, bio->bi_sector);
40380- else if (m && atomic_read(&m->error_count))
40381+ else if (m && atomic_read_unchecked(&m->error_count))
40382 m = NULL;
40383
40384 if (likely(m))
40385@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
40386 }
40387
40388 ms->mirror[mirror].ms = ms;
40389- atomic_set(&(ms->mirror[mirror].error_count), 0);
40390+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
40391 ms->mirror[mirror].error_type = 0;
40392 ms->mirror[mirror].offset = offset;
40393
40394@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
40395 */
40396 static char device_status_char(struct mirror *m)
40397 {
40398- if (!atomic_read(&(m->error_count)))
40399+ if (!atomic_read_unchecked(&(m->error_count)))
40400 return 'A';
40401
40402 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
40403diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
40404index d907ca6..cfb8384 100644
40405--- a/drivers/md/dm-stripe.c
40406+++ b/drivers/md/dm-stripe.c
40407@@ -20,7 +20,7 @@ struct stripe {
40408 struct dm_dev *dev;
40409 sector_t physical_start;
40410
40411- atomic_t error_count;
40412+ atomic_unchecked_t error_count;
40413 };
40414
40415 struct stripe_c {
40416@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
40417 kfree(sc);
40418 return r;
40419 }
40420- atomic_set(&(sc->stripe[i].error_count), 0);
40421+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
40422 }
40423
40424 ti->private = sc;
40425@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
40426 DMEMIT("%d ", sc->stripes);
40427 for (i = 0; i < sc->stripes; i++) {
40428 DMEMIT("%s ", sc->stripe[i].dev->name);
40429- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
40430+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
40431 'D' : 'A';
40432 }
40433 buffer[i] = '\0';
40434@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
40435 */
40436 for (i = 0; i < sc->stripes; i++)
40437 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
40438- atomic_inc(&(sc->stripe[i].error_count));
40439- if (atomic_read(&(sc->stripe[i].error_count)) <
40440+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
40441+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
40442 DM_IO_ERROR_THRESHOLD)
40443 schedule_work(&sc->trigger_event);
40444 }
40445diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
40446index 1ff252a..ee384c1 100644
40447--- a/drivers/md/dm-table.c
40448+++ b/drivers/md/dm-table.c
40449@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
40450 if (!dev_size)
40451 return 0;
40452
40453- if ((start >= dev_size) || (start + len > dev_size)) {
40454+ if ((start >= dev_size) || (len > dev_size - start)) {
40455 DMWARN("%s: %s too small for target: "
40456 "start=%llu, len=%llu, dev_size=%llu",
40457 dm_device_name(ti->table->md), bdevname(bdev, b),
40458diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
40459index 60bce43..9b997d0 100644
40460--- a/drivers/md/dm-thin-metadata.c
40461+++ b/drivers/md/dm-thin-metadata.c
40462@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
40463 {
40464 pmd->info.tm = pmd->tm;
40465 pmd->info.levels = 2;
40466- pmd->info.value_type.context = pmd->data_sm;
40467+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
40468 pmd->info.value_type.size = sizeof(__le64);
40469 pmd->info.value_type.inc = data_block_inc;
40470 pmd->info.value_type.dec = data_block_dec;
40471@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
40472
40473 pmd->bl_info.tm = pmd->tm;
40474 pmd->bl_info.levels = 1;
40475- pmd->bl_info.value_type.context = pmd->data_sm;
40476+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
40477 pmd->bl_info.value_type.size = sizeof(__le64);
40478 pmd->bl_info.value_type.inc = data_block_inc;
40479 pmd->bl_info.value_type.dec = data_block_dec;
40480diff --git a/drivers/md/dm.c b/drivers/md/dm.c
40481index 33f2010..23fb84c 100644
40482--- a/drivers/md/dm.c
40483+++ b/drivers/md/dm.c
40484@@ -169,9 +169,9 @@ struct mapped_device {
40485 /*
40486 * Event handling.
40487 */
40488- atomic_t event_nr;
40489+ atomic_unchecked_t event_nr;
40490 wait_queue_head_t eventq;
40491- atomic_t uevent_seq;
40492+ atomic_unchecked_t uevent_seq;
40493 struct list_head uevent_list;
40494 spinlock_t uevent_lock; /* Protect access to uevent_list */
40495
40496@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
40497 rwlock_init(&md->map_lock);
40498 atomic_set(&md->holders, 1);
40499 atomic_set(&md->open_count, 0);
40500- atomic_set(&md->event_nr, 0);
40501- atomic_set(&md->uevent_seq, 0);
40502+ atomic_set_unchecked(&md->event_nr, 0);
40503+ atomic_set_unchecked(&md->uevent_seq, 0);
40504 INIT_LIST_HEAD(&md->uevent_list);
40505 spin_lock_init(&md->uevent_lock);
40506
40507@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
40508
40509 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
40510
40511- atomic_inc(&md->event_nr);
40512+ atomic_inc_unchecked(&md->event_nr);
40513 wake_up(&md->eventq);
40514 }
40515
40516@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
40517
40518 uint32_t dm_next_uevent_seq(struct mapped_device *md)
40519 {
40520- return atomic_add_return(1, &md->uevent_seq);
40521+ return atomic_add_return_unchecked(1, &md->uevent_seq);
40522 }
40523
40524 uint32_t dm_get_event_nr(struct mapped_device *md)
40525 {
40526- return atomic_read(&md->event_nr);
40527+ return atomic_read_unchecked(&md->event_nr);
40528 }
40529
40530 int dm_wait_event(struct mapped_device *md, int event_nr)
40531 {
40532 return wait_event_interruptible(md->eventq,
40533- (event_nr != atomic_read(&md->event_nr)));
40534+ (event_nr != atomic_read_unchecked(&md->event_nr)));
40535 }
40536
40537 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
40538diff --git a/drivers/md/md.c b/drivers/md/md.c
40539index 51f0345..c77810e 100644
40540--- a/drivers/md/md.c
40541+++ b/drivers/md/md.c
40542@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
40543 * start build, activate spare
40544 */
40545 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
40546-static atomic_t md_event_count;
40547+static atomic_unchecked_t md_event_count;
40548 void md_new_event(struct mddev *mddev)
40549 {
40550- atomic_inc(&md_event_count);
40551+ atomic_inc_unchecked(&md_event_count);
40552 wake_up(&md_event_waiters);
40553 }
40554 EXPORT_SYMBOL_GPL(md_new_event);
40555@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
40556 */
40557 static void md_new_event_inintr(struct mddev *mddev)
40558 {
40559- atomic_inc(&md_event_count);
40560+ atomic_inc_unchecked(&md_event_count);
40561 wake_up(&md_event_waiters);
40562 }
40563
40564@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
40565 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
40566 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
40567 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
40568- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
40569+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
40570
40571 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
40572 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
40573@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
40574 else
40575 sb->resync_offset = cpu_to_le64(0);
40576
40577- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
40578+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
40579
40580 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
40581 sb->size = cpu_to_le64(mddev->dev_sectors);
40582@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
40583 static ssize_t
40584 errors_show(struct md_rdev *rdev, char *page)
40585 {
40586- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
40587+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
40588 }
40589
40590 static ssize_t
40591@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
40592 char *e;
40593 unsigned long n = simple_strtoul(buf, &e, 10);
40594 if (*buf && (*e == 0 || *e == '\n')) {
40595- atomic_set(&rdev->corrected_errors, n);
40596+ atomic_set_unchecked(&rdev->corrected_errors, n);
40597 return len;
40598 }
40599 return -EINVAL;
40600@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
40601 rdev->sb_loaded = 0;
40602 rdev->bb_page = NULL;
40603 atomic_set(&rdev->nr_pending, 0);
40604- atomic_set(&rdev->read_errors, 0);
40605- atomic_set(&rdev->corrected_errors, 0);
40606+ atomic_set_unchecked(&rdev->read_errors, 0);
40607+ atomic_set_unchecked(&rdev->corrected_errors, 0);
40608
40609 INIT_LIST_HEAD(&rdev->same_set);
40610 init_waitqueue_head(&rdev->blocked_wait);
40611@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
40612
40613 spin_unlock(&pers_lock);
40614 seq_printf(seq, "\n");
40615- seq->poll_event = atomic_read(&md_event_count);
40616+ seq->poll_event = atomic_read_unchecked(&md_event_count);
40617 return 0;
40618 }
40619 if (v == (void*)2) {
40620@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
40621 return error;
40622
40623 seq = file->private_data;
40624- seq->poll_event = atomic_read(&md_event_count);
40625+ seq->poll_event = atomic_read_unchecked(&md_event_count);
40626 return error;
40627 }
40628
40629@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
40630 /* always allow read */
40631 mask = POLLIN | POLLRDNORM;
40632
40633- if (seq->poll_event != atomic_read(&md_event_count))
40634+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
40635 mask |= POLLERR | POLLPRI;
40636 return mask;
40637 }
40638@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
40639 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
40640 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
40641 (int)part_stat_read(&disk->part0, sectors[1]) -
40642- atomic_read(&disk->sync_io);
40643+ atomic_read_unchecked(&disk->sync_io);
40644 /* sync IO will cause sync_io to increase before the disk_stats
40645 * as sync_io is counted when a request starts, and
40646 * disk_stats is counted when it completes.
40647diff --git a/drivers/md/md.h b/drivers/md/md.h
40648index 653f992b6..6af6c40 100644
40649--- a/drivers/md/md.h
40650+++ b/drivers/md/md.h
40651@@ -94,13 +94,13 @@ struct md_rdev {
40652 * only maintained for arrays that
40653 * support hot removal
40654 */
40655- atomic_t read_errors; /* number of consecutive read errors that
40656+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
40657 * we have tried to ignore.
40658 */
40659 struct timespec last_read_error; /* monotonic time since our
40660 * last read error
40661 */
40662- atomic_t corrected_errors; /* number of corrected read errors,
40663+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
40664 * for reporting to userspace and storing
40665 * in superblock.
40666 */
40667@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
40668
40669 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
40670 {
40671- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
40672+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
40673 }
40674
40675 struct md_personality
40676diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
40677index 3e6d115..ffecdeb 100644
40678--- a/drivers/md/persistent-data/dm-space-map.h
40679+++ b/drivers/md/persistent-data/dm-space-map.h
40680@@ -71,6 +71,7 @@ struct dm_space_map {
40681 dm_sm_threshold_fn fn,
40682 void *context);
40683 };
40684+typedef struct dm_space_map __no_const dm_space_map_no_const;
40685
40686 /*----------------------------------------------------------------*/
40687
40688diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
40689index 6f48244..7d29145 100644
40690--- a/drivers/md/raid1.c
40691+++ b/drivers/md/raid1.c
40692@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
40693 if (r1_sync_page_io(rdev, sect, s,
40694 bio->bi_io_vec[idx].bv_page,
40695 READ) != 0)
40696- atomic_add(s, &rdev->corrected_errors);
40697+ atomic_add_unchecked(s, &rdev->corrected_errors);
40698 }
40699 sectors -= s;
40700 sect += s;
40701@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
40702 test_bit(In_sync, &rdev->flags)) {
40703 if (r1_sync_page_io(rdev, sect, s,
40704 conf->tmppage, READ)) {
40705- atomic_add(s, &rdev->corrected_errors);
40706+ atomic_add_unchecked(s, &rdev->corrected_errors);
40707 printk(KERN_INFO
40708 "md/raid1:%s: read error corrected "
40709 "(%d sectors at %llu on %s)\n",
40710diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
40711index 081bb33..3c4b287 100644
40712--- a/drivers/md/raid10.c
40713+++ b/drivers/md/raid10.c
40714@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
40715 /* The write handler will notice the lack of
40716 * R10BIO_Uptodate and record any errors etc
40717 */
40718- atomic_add(r10_bio->sectors,
40719+ atomic_add_unchecked(r10_bio->sectors,
40720 &conf->mirrors[d].rdev->corrected_errors);
40721
40722 /* for reconstruct, we always reschedule after a read.
40723@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
40724 {
40725 struct timespec cur_time_mon;
40726 unsigned long hours_since_last;
40727- unsigned int read_errors = atomic_read(&rdev->read_errors);
40728+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
40729
40730 ktime_get_ts(&cur_time_mon);
40731
40732@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
40733 * overflowing the shift of read_errors by hours_since_last.
40734 */
40735 if (hours_since_last >= 8 * sizeof(read_errors))
40736- atomic_set(&rdev->read_errors, 0);
40737+ atomic_set_unchecked(&rdev->read_errors, 0);
40738 else
40739- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
40740+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
40741 }
40742
40743 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
40744@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
40745 return;
40746
40747 check_decay_read_errors(mddev, rdev);
40748- atomic_inc(&rdev->read_errors);
40749- if (atomic_read(&rdev->read_errors) > max_read_errors) {
40750+ atomic_inc_unchecked(&rdev->read_errors);
40751+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
40752 char b[BDEVNAME_SIZE];
40753 bdevname(rdev->bdev, b);
40754
40755@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
40756 "md/raid10:%s: %s: Raid device exceeded "
40757 "read_error threshold [cur %d:max %d]\n",
40758 mdname(mddev), b,
40759- atomic_read(&rdev->read_errors), max_read_errors);
40760+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
40761 printk(KERN_NOTICE
40762 "md/raid10:%s: %s: Failing raid device\n",
40763 mdname(mddev), b);
40764@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
40765 sect +
40766 choose_data_offset(r10_bio, rdev)),
40767 bdevname(rdev->bdev, b));
40768- atomic_add(s, &rdev->corrected_errors);
40769+ atomic_add_unchecked(s, &rdev->corrected_errors);
40770 }
40771
40772 rdev_dec_pending(rdev, mddev);
40773diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
40774index a35b846..e295c6d 100644
40775--- a/drivers/md/raid5.c
40776+++ b/drivers/md/raid5.c
40777@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
40778 mdname(conf->mddev), STRIPE_SECTORS,
40779 (unsigned long long)s,
40780 bdevname(rdev->bdev, b));
40781- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
40782+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
40783 clear_bit(R5_ReadError, &sh->dev[i].flags);
40784 clear_bit(R5_ReWrite, &sh->dev[i].flags);
40785 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
40786 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
40787
40788- if (atomic_read(&rdev->read_errors))
40789- atomic_set(&rdev->read_errors, 0);
40790+ if (atomic_read_unchecked(&rdev->read_errors))
40791+ atomic_set_unchecked(&rdev->read_errors, 0);
40792 } else {
40793 const char *bdn = bdevname(rdev->bdev, b);
40794 int retry = 0;
40795 int set_bad = 0;
40796
40797 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
40798- atomic_inc(&rdev->read_errors);
40799+ atomic_inc_unchecked(&rdev->read_errors);
40800 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
40801 printk_ratelimited(
40802 KERN_WARNING
40803@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
40804 mdname(conf->mddev),
40805 (unsigned long long)s,
40806 bdn);
40807- } else if (atomic_read(&rdev->read_errors)
40808+ } else if (atomic_read_unchecked(&rdev->read_errors)
40809 > conf->max_nr_stripes)
40810 printk(KERN_WARNING
40811 "md/raid:%s: Too many read errors, failing device %s.\n",
40812diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
40813index 401ef64..836e563 100644
40814--- a/drivers/media/dvb-core/dvbdev.c
40815+++ b/drivers/media/dvb-core/dvbdev.c
40816@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
40817 const struct dvb_device *template, void *priv, int type)
40818 {
40819 struct dvb_device *dvbdev;
40820- struct file_operations *dvbdevfops;
40821+ file_operations_no_const *dvbdevfops;
40822 struct device *clsdev;
40823 int minor;
40824 int id;
40825diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
40826index 9b6c3bb..baeb5c7 100644
40827--- a/drivers/media/dvb-frontends/dib3000.h
40828+++ b/drivers/media/dvb-frontends/dib3000.h
40829@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
40830 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
40831 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
40832 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
40833-};
40834+} __no_const;
40835
40836 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
40837 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
40838diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
40839index c7a9be1..683f6f8 100644
40840--- a/drivers/media/pci/cx88/cx88-video.c
40841+++ b/drivers/media/pci/cx88/cx88-video.c
40842@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
40843
40844 /* ------------------------------------------------------------------ */
40845
40846-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40847-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40848-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40849+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40850+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40851+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40852
40853 module_param_array(video_nr, int, NULL, 0444);
40854 module_param_array(vbi_nr, int, NULL, 0444);
40855diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
40856index d338b19..aae4f9e 100644
40857--- a/drivers/media/platform/omap/omap_vout.c
40858+++ b/drivers/media/platform/omap/omap_vout.c
40859@@ -63,7 +63,6 @@ enum omap_vout_channels {
40860 OMAP_VIDEO2,
40861 };
40862
40863-static struct videobuf_queue_ops video_vbq_ops;
40864 /* Variables configurable through module params*/
40865 static u32 video1_numbuffers = 3;
40866 static u32 video2_numbuffers = 3;
40867@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
40868 {
40869 struct videobuf_queue *q;
40870 struct omap_vout_device *vout = NULL;
40871+ static struct videobuf_queue_ops video_vbq_ops = {
40872+ .buf_setup = omap_vout_buffer_setup,
40873+ .buf_prepare = omap_vout_buffer_prepare,
40874+ .buf_release = omap_vout_buffer_release,
40875+ .buf_queue = omap_vout_buffer_queue,
40876+ };
40877
40878 vout = video_drvdata(file);
40879 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
40880@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
40881 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
40882
40883 q = &vout->vbq;
40884- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
40885- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
40886- video_vbq_ops.buf_release = omap_vout_buffer_release;
40887- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
40888 spin_lock_init(&vout->vbq_lock);
40889
40890 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
40891diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
40892index 04e6490..2df65bf 100644
40893--- a/drivers/media/platform/s5p-tv/mixer.h
40894+++ b/drivers/media/platform/s5p-tv/mixer.h
40895@@ -156,7 +156,7 @@ struct mxr_layer {
40896 /** layer index (unique identifier) */
40897 int idx;
40898 /** callbacks for layer methods */
40899- struct mxr_layer_ops ops;
40900+ struct mxr_layer_ops *ops;
40901 /** format array */
40902 const struct mxr_format **fmt_array;
40903 /** size of format array */
40904diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
40905index b93a21f..2535195 100644
40906--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
40907+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
40908@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
40909 {
40910 struct mxr_layer *layer;
40911 int ret;
40912- struct mxr_layer_ops ops = {
40913+ static struct mxr_layer_ops ops = {
40914 .release = mxr_graph_layer_release,
40915 .buffer_set = mxr_graph_buffer_set,
40916 .stream_set = mxr_graph_stream_set,
40917diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
40918index b713403..53cb5ad 100644
40919--- a/drivers/media/platform/s5p-tv/mixer_reg.c
40920+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
40921@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
40922 layer->update_buf = next;
40923 }
40924
40925- layer->ops.buffer_set(layer, layer->update_buf);
40926+ layer->ops->buffer_set(layer, layer->update_buf);
40927
40928 if (done && done != layer->shadow_buf)
40929 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
40930diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
40931index ef0efdf..8c78eb6 100644
40932--- a/drivers/media/platform/s5p-tv/mixer_video.c
40933+++ b/drivers/media/platform/s5p-tv/mixer_video.c
40934@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
40935 layer->geo.src.height = layer->geo.src.full_height;
40936
40937 mxr_geometry_dump(mdev, &layer->geo);
40938- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40939+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40940 mxr_geometry_dump(mdev, &layer->geo);
40941 }
40942
40943@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
40944 layer->geo.dst.full_width = mbus_fmt.width;
40945 layer->geo.dst.full_height = mbus_fmt.height;
40946 layer->geo.dst.field = mbus_fmt.field;
40947- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40948+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40949
40950 mxr_geometry_dump(mdev, &layer->geo);
40951 }
40952@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
40953 /* set source size to highest accepted value */
40954 geo->src.full_width = max(geo->dst.full_width, pix->width);
40955 geo->src.full_height = max(geo->dst.full_height, pix->height);
40956- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40957+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40958 mxr_geometry_dump(mdev, &layer->geo);
40959 /* set cropping to total visible screen */
40960 geo->src.width = pix->width;
40961@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
40962 geo->src.x_offset = 0;
40963 geo->src.y_offset = 0;
40964 /* assure consistency of geometry */
40965- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
40966+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
40967 mxr_geometry_dump(mdev, &layer->geo);
40968 /* set full size to lowest possible value */
40969 geo->src.full_width = 0;
40970 geo->src.full_height = 0;
40971- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40972+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40973 mxr_geometry_dump(mdev, &layer->geo);
40974
40975 /* returning results */
40976@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
40977 target->width = s->r.width;
40978 target->height = s->r.height;
40979
40980- layer->ops.fix_geometry(layer, stage, s->flags);
40981+ layer->ops->fix_geometry(layer, stage, s->flags);
40982
40983 /* retrieve update selection rectangle */
40984 res.left = target->x_offset;
40985@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
40986 mxr_output_get(mdev);
40987
40988 mxr_layer_update_output(layer);
40989- layer->ops.format_set(layer);
40990+ layer->ops->format_set(layer);
40991 /* enabling layer in hardware */
40992 spin_lock_irqsave(&layer->enq_slock, flags);
40993 layer->state = MXR_LAYER_STREAMING;
40994 spin_unlock_irqrestore(&layer->enq_slock, flags);
40995
40996- layer->ops.stream_set(layer, MXR_ENABLE);
40997+ layer->ops->stream_set(layer, MXR_ENABLE);
40998 mxr_streamer_get(mdev);
40999
41000 return 0;
41001@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
41002 spin_unlock_irqrestore(&layer->enq_slock, flags);
41003
41004 /* disabling layer in hardware */
41005- layer->ops.stream_set(layer, MXR_DISABLE);
41006+ layer->ops->stream_set(layer, MXR_DISABLE);
41007 /* remove one streamer */
41008 mxr_streamer_put(mdev);
41009 /* allow changes in output configuration */
41010@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
41011
41012 void mxr_layer_release(struct mxr_layer *layer)
41013 {
41014- if (layer->ops.release)
41015- layer->ops.release(layer);
41016+ if (layer->ops->release)
41017+ layer->ops->release(layer);
41018 }
41019
41020 void mxr_base_layer_release(struct mxr_layer *layer)
41021@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
41022
41023 layer->mdev = mdev;
41024 layer->idx = idx;
41025- layer->ops = *ops;
41026+ layer->ops = ops;
41027
41028 spin_lock_init(&layer->enq_slock);
41029 INIT_LIST_HEAD(&layer->enq_list);
41030diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
41031index 3d13a63..da31bf1 100644
41032--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
41033+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
41034@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
41035 {
41036 struct mxr_layer *layer;
41037 int ret;
41038- struct mxr_layer_ops ops = {
41039+ static struct mxr_layer_ops ops = {
41040 .release = mxr_vp_layer_release,
41041 .buffer_set = mxr_vp_buffer_set,
41042 .stream_set = mxr_vp_stream_set,
41043diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
41044index 545c04c..a14bded 100644
41045--- a/drivers/media/radio/radio-cadet.c
41046+++ b/drivers/media/radio/radio-cadet.c
41047@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
41048 unsigned char readbuf[RDS_BUFFER];
41049 int i = 0;
41050
41051+ if (count > RDS_BUFFER)
41052+ return -EFAULT;
41053 mutex_lock(&dev->lock);
41054 if (dev->rdsstat == 0)
41055 cadet_start_rds(dev);
41056@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
41057 while (i < count && dev->rdsin != dev->rdsout)
41058 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
41059
41060- if (i && copy_to_user(data, readbuf, i))
41061+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
41062 i = -EFAULT;
41063 unlock:
41064 mutex_unlock(&dev->lock);
41065diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
41066index 3940bb0..fb3952a 100644
41067--- a/drivers/media/usb/dvb-usb/cxusb.c
41068+++ b/drivers/media/usb/dvb-usb/cxusb.c
41069@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
41070
41071 struct dib0700_adapter_state {
41072 int (*set_param_save) (struct dvb_frontend *);
41073-};
41074+} __no_const;
41075
41076 static int dib7070_set_param_override(struct dvb_frontend *fe)
41077 {
41078diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
41079index 6e237b6..dc25556 100644
41080--- a/drivers/media/usb/dvb-usb/dw2102.c
41081+++ b/drivers/media/usb/dvb-usb/dw2102.c
41082@@ -118,7 +118,7 @@ struct su3000_state {
41083
41084 struct s6x0_state {
41085 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
41086-};
41087+} __no_const;
41088
41089 /* debug */
41090 static int dvb_usb_dw2102_debug;
41091diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
41092index f129551..ecf6514 100644
41093--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
41094+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
41095@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
41096 __u32 reserved;
41097 };
41098
41099-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
41100+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
41101 enum v4l2_memory memory)
41102 {
41103 void __user *up_pln;
41104@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
41105 return 0;
41106 }
41107
41108-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
41109+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
41110 enum v4l2_memory memory)
41111 {
41112 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
41113@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
41114 put_user(kp->start_block, &up->start_block) ||
41115 put_user(kp->blocks, &up->blocks) ||
41116 put_user(tmp, &up->edid) ||
41117- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
41118+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
41119 return -EFAULT;
41120 return 0;
41121 }
41122diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
41123index 7658586..1079260 100644
41124--- a/drivers/media/v4l2-core/v4l2-ioctl.c
41125+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
41126@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
41127 struct file *file, void *fh, void *p);
41128 } u;
41129 void (*debug)(const void *arg, bool write_only);
41130-};
41131+} __do_const;
41132+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
41133
41134 /* This control needs a priority check */
41135 #define INFO_FL_PRIO (1 << 0)
41136@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
41137 struct video_device *vfd = video_devdata(file);
41138 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
41139 bool write_only = false;
41140- struct v4l2_ioctl_info default_info;
41141+ v4l2_ioctl_info_no_const default_info;
41142 const struct v4l2_ioctl_info *info;
41143 void *fh = file->private_data;
41144 struct v4l2_fh *vfh = NULL;
41145@@ -2251,7 +2252,7 @@ done:
41146 }
41147
41148 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
41149- void * __user *user_ptr, void ***kernel_ptr)
41150+ void __user **user_ptr, void ***kernel_ptr)
41151 {
41152 int ret = 0;
41153
41154@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
41155 ret = -EINVAL;
41156 break;
41157 }
41158- *user_ptr = (void __user *)buf->m.planes;
41159+ *user_ptr = (void __force_user *)buf->m.planes;
41160 *kernel_ptr = (void *)&buf->m.planes;
41161 *array_size = sizeof(struct v4l2_plane) * buf->length;
41162 ret = 1;
41163@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
41164 ret = -EINVAL;
41165 break;
41166 }
41167- *user_ptr = (void __user *)ctrls->controls;
41168+ *user_ptr = (void __force_user *)ctrls->controls;
41169 *kernel_ptr = (void *)&ctrls->controls;
41170 *array_size = sizeof(struct v4l2_ext_control)
41171 * ctrls->count;
41172diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
41173index 767ff4d..c69d259 100644
41174--- a/drivers/message/fusion/mptbase.c
41175+++ b/drivers/message/fusion/mptbase.c
41176@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
41177 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
41178 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
41179
41180+#ifdef CONFIG_GRKERNSEC_HIDESYM
41181+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
41182+#else
41183 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
41184 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
41185+#endif
41186+
41187 /*
41188 * Rounding UP to nearest 4-kB boundary here...
41189 */
41190@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
41191 ioc->facts.GlobalCredits);
41192
41193 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
41194+#ifdef CONFIG_GRKERNSEC_HIDESYM
41195+ NULL, NULL);
41196+#else
41197 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
41198+#endif
41199 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
41200 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
41201 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
41202diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
41203index dd239bd..689c4f7 100644
41204--- a/drivers/message/fusion/mptsas.c
41205+++ b/drivers/message/fusion/mptsas.c
41206@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
41207 return 0;
41208 }
41209
41210+static inline void
41211+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
41212+{
41213+ if (phy_info->port_details) {
41214+ phy_info->port_details->rphy = rphy;
41215+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
41216+ ioc->name, rphy));
41217+ }
41218+
41219+ if (rphy) {
41220+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
41221+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
41222+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
41223+ ioc->name, rphy, rphy->dev.release));
41224+ }
41225+}
41226+
41227 /* no mutex */
41228 static void
41229 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
41230@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
41231 return NULL;
41232 }
41233
41234-static inline void
41235-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
41236-{
41237- if (phy_info->port_details) {
41238- phy_info->port_details->rphy = rphy;
41239- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
41240- ioc->name, rphy));
41241- }
41242-
41243- if (rphy) {
41244- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
41245- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
41246- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
41247- ioc->name, rphy, rphy->dev.release));
41248- }
41249-}
41250-
41251 static inline struct sas_port *
41252 mptsas_get_port(struct mptsas_phyinfo *phy_info)
41253 {
41254diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
41255index 727819c..ad74694 100644
41256--- a/drivers/message/fusion/mptscsih.c
41257+++ b/drivers/message/fusion/mptscsih.c
41258@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
41259
41260 h = shost_priv(SChost);
41261
41262- if (h) {
41263- if (h->info_kbuf == NULL)
41264- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
41265- return h->info_kbuf;
41266- h->info_kbuf[0] = '\0';
41267+ if (!h)
41268+ return NULL;
41269
41270- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
41271- h->info_kbuf[size-1] = '\0';
41272- }
41273+ if (h->info_kbuf == NULL)
41274+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
41275+ return h->info_kbuf;
41276+ h->info_kbuf[0] = '\0';
41277+
41278+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
41279+ h->info_kbuf[size-1] = '\0';
41280
41281 return h->info_kbuf;
41282 }
41283diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
41284index b7d87cd..9890039 100644
41285--- a/drivers/message/i2o/i2o_proc.c
41286+++ b/drivers/message/i2o/i2o_proc.c
41287@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
41288 "Array Controller Device"
41289 };
41290
41291-static char *chtostr(char *tmp, u8 *chars, int n)
41292-{
41293- tmp[0] = 0;
41294- return strncat(tmp, (char *)chars, n);
41295-}
41296-
41297 static int i2o_report_query_status(struct seq_file *seq, int block_status,
41298 char *group)
41299 {
41300@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
41301 } *result;
41302
41303 i2o_exec_execute_ddm_table ddm_table;
41304- char tmp[28 + 1];
41305
41306 result = kmalloc(sizeof(*result), GFP_KERNEL);
41307 if (!result)
41308@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
41309
41310 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
41311 seq_printf(seq, "%-#8x", ddm_table.module_id);
41312- seq_printf(seq, "%-29s",
41313- chtostr(tmp, ddm_table.module_name_version, 28));
41314+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
41315 seq_printf(seq, "%9d ", ddm_table.data_size);
41316 seq_printf(seq, "%8d", ddm_table.code_size);
41317
41318@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
41319
41320 i2o_driver_result_table *result;
41321 i2o_driver_store_table *dst;
41322- char tmp[28 + 1];
41323
41324 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
41325 if (result == NULL)
41326@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
41327
41328 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
41329 seq_printf(seq, "%-#8x", dst->module_id);
41330- seq_printf(seq, "%-29s",
41331- chtostr(tmp, dst->module_name_version, 28));
41332- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
41333+ seq_printf(seq, "%-.28s", dst->module_name_version);
41334+ seq_printf(seq, "%-.8s", dst->date);
41335 seq_printf(seq, "%8d ", dst->module_size);
41336 seq_printf(seq, "%8d ", dst->mpb_size);
41337 seq_printf(seq, "0x%04x", dst->module_flags);
41338@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
41339 // == (allow) 512d bytes (max)
41340 static u16 *work16 = (u16 *) work32;
41341 int token;
41342- char tmp[16 + 1];
41343
41344 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
41345
41346@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
41347 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
41348 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
41349 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
41350- seq_printf(seq, "Vendor info : %s\n",
41351- chtostr(tmp, (u8 *) (work32 + 2), 16));
41352- seq_printf(seq, "Product info : %s\n",
41353- chtostr(tmp, (u8 *) (work32 + 6), 16));
41354- seq_printf(seq, "Description : %s\n",
41355- chtostr(tmp, (u8 *) (work32 + 10), 16));
41356- seq_printf(seq, "Product rev. : %s\n",
41357- chtostr(tmp, (u8 *) (work32 + 14), 8));
41358+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
41359+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
41360+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
41361+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
41362
41363 seq_printf(seq, "Serial number : ");
41364 print_serial_number(seq, (u8 *) (work32 + 16),
41365@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
41366 u8 pad[256]; // allow up to 256 byte (max) serial number
41367 } result;
41368
41369- char tmp[24 + 1];
41370-
41371 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
41372
41373 if (token < 0) {
41374@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
41375 }
41376
41377 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
41378- seq_printf(seq, "Module name : %s\n",
41379- chtostr(tmp, result.module_name, 24));
41380- seq_printf(seq, "Module revision : %s\n",
41381- chtostr(tmp, result.module_rev, 8));
41382+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
41383+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
41384
41385 seq_printf(seq, "Serial number : ");
41386 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
41387@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
41388 u8 instance_number[4];
41389 } result;
41390
41391- char tmp[64 + 1];
41392-
41393 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
41394
41395 if (token < 0) {
41396@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
41397 return 0;
41398 }
41399
41400- seq_printf(seq, "Device name : %s\n",
41401- chtostr(tmp, result.device_name, 64));
41402- seq_printf(seq, "Service name : %s\n",
41403- chtostr(tmp, result.service_name, 64));
41404- seq_printf(seq, "Physical name : %s\n",
41405- chtostr(tmp, result.physical_location, 64));
41406- seq_printf(seq, "Instance number : %s\n",
41407- chtostr(tmp, result.instance_number, 4));
41408+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
41409+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
41410+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
41411+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
41412
41413 return 0;
41414 }
41415diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
41416index a8c08f3..155fe3d 100644
41417--- a/drivers/message/i2o/iop.c
41418+++ b/drivers/message/i2o/iop.c
41419@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
41420
41421 spin_lock_irqsave(&c->context_list_lock, flags);
41422
41423- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
41424- atomic_inc(&c->context_list_counter);
41425+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
41426+ atomic_inc_unchecked(&c->context_list_counter);
41427
41428- entry->context = atomic_read(&c->context_list_counter);
41429+ entry->context = atomic_read_unchecked(&c->context_list_counter);
41430
41431 list_add(&entry->list, &c->context_list);
41432
41433@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
41434
41435 #if BITS_PER_LONG == 64
41436 spin_lock_init(&c->context_list_lock);
41437- atomic_set(&c->context_list_counter, 0);
41438+ atomic_set_unchecked(&c->context_list_counter, 0);
41439 INIT_LIST_HEAD(&c->context_list);
41440 #endif
41441
41442diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
41443index 45ece11..8efa218 100644
41444--- a/drivers/mfd/janz-cmodio.c
41445+++ b/drivers/mfd/janz-cmodio.c
41446@@ -13,6 +13,7 @@
41447
41448 #include <linux/kernel.h>
41449 #include <linux/module.h>
41450+#include <linux/slab.h>
41451 #include <linux/init.h>
41452 #include <linux/pci.h>
41453 #include <linux/interrupt.h>
41454diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
41455index a5f9888..1c0ed56 100644
41456--- a/drivers/mfd/twl4030-irq.c
41457+++ b/drivers/mfd/twl4030-irq.c
41458@@ -35,6 +35,7 @@
41459 #include <linux/of.h>
41460 #include <linux/irqdomain.h>
41461 #include <linux/i2c/twl.h>
41462+#include <asm/pgtable.h>
41463
41464 #include "twl-core.h"
41465
41466@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
41467 * Install an irq handler for each of the SIH modules;
41468 * clone dummy irq_chip since PIH can't *do* anything
41469 */
41470- twl4030_irq_chip = dummy_irq_chip;
41471- twl4030_irq_chip.name = "twl4030";
41472+ pax_open_kernel();
41473+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
41474+ *(const char **)&twl4030_irq_chip.name = "twl4030";
41475
41476- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
41477+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
41478+ pax_close_kernel();
41479
41480 for (i = irq_base; i < irq_end; i++) {
41481 irq_set_chip_and_handler(i, &twl4030_irq_chip,
41482diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
41483index 277a8db..0e0b754 100644
41484--- a/drivers/mfd/twl6030-irq.c
41485+++ b/drivers/mfd/twl6030-irq.c
41486@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
41487 * install an irq handler for each of the modules;
41488 * clone dummy irq_chip since PIH can't *do* anything
41489 */
41490- twl6030_irq_chip = dummy_irq_chip;
41491- twl6030_irq_chip.name = "twl6030";
41492- twl6030_irq_chip.irq_set_type = NULL;
41493- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
41494+ pax_open_kernel();
41495+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
41496+ *(const char **)&twl6030_irq_chip.name = "twl6030";
41497+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
41498+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
41499+ pax_close_kernel();
41500
41501 for (i = irq_base; i < irq_end; i++) {
41502 irq_set_chip_and_handler(i, &twl6030_irq_chip,
41503diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
41504index f32550a..e3e52a2 100644
41505--- a/drivers/misc/c2port/core.c
41506+++ b/drivers/misc/c2port/core.c
41507@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
41508 mutex_init(&c2dev->mutex);
41509
41510 /* Create binary file */
41511- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
41512+ pax_open_kernel();
41513+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
41514+ pax_close_kernel();
41515 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
41516 if (unlikely(ret))
41517 goto error_device_create_bin_file;
41518diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
41519index 36f5d52..32311c3 100644
41520--- a/drivers/misc/kgdbts.c
41521+++ b/drivers/misc/kgdbts.c
41522@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
41523 char before[BREAK_INSTR_SIZE];
41524 char after[BREAK_INSTR_SIZE];
41525
41526- probe_kernel_read(before, (char *)kgdbts_break_test,
41527+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
41528 BREAK_INSTR_SIZE);
41529 init_simple_test();
41530 ts.tst = plant_and_detach_test;
41531@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
41532 /* Activate test with initial breakpoint */
41533 if (!is_early)
41534 kgdb_breakpoint();
41535- probe_kernel_read(after, (char *)kgdbts_break_test,
41536+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
41537 BREAK_INSTR_SIZE);
41538 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
41539 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
41540diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
41541index 4cd4a3d..b48cbc7 100644
41542--- a/drivers/misc/lis3lv02d/lis3lv02d.c
41543+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
41544@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
41545 * the lid is closed. This leads to interrupts as soon as a little move
41546 * is done.
41547 */
41548- atomic_inc(&lis3->count);
41549+ atomic_inc_unchecked(&lis3->count);
41550
41551 wake_up_interruptible(&lis3->misc_wait);
41552 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
41553@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
41554 if (lis3->pm_dev)
41555 pm_runtime_get_sync(lis3->pm_dev);
41556
41557- atomic_set(&lis3->count, 0);
41558+ atomic_set_unchecked(&lis3->count, 0);
41559 return 0;
41560 }
41561
41562@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
41563 add_wait_queue(&lis3->misc_wait, &wait);
41564 while (true) {
41565 set_current_state(TASK_INTERRUPTIBLE);
41566- data = atomic_xchg(&lis3->count, 0);
41567+ data = atomic_xchg_unchecked(&lis3->count, 0);
41568 if (data)
41569 break;
41570
41571@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
41572 struct lis3lv02d, miscdev);
41573
41574 poll_wait(file, &lis3->misc_wait, wait);
41575- if (atomic_read(&lis3->count))
41576+ if (atomic_read_unchecked(&lis3->count))
41577 return POLLIN | POLLRDNORM;
41578 return 0;
41579 }
41580diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
41581index c439c82..1f20f57 100644
41582--- a/drivers/misc/lis3lv02d/lis3lv02d.h
41583+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
41584@@ -297,7 +297,7 @@ struct lis3lv02d {
41585 struct input_polled_dev *idev; /* input device */
41586 struct platform_device *pdev; /* platform device */
41587 struct regulator_bulk_data regulators[2];
41588- atomic_t count; /* interrupt count after last read */
41589+ atomic_unchecked_t count; /* interrupt count after last read */
41590 union axis_conversion ac; /* hw -> logical axis */
41591 int mapped_btns[3];
41592
41593diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
41594index 2f30bad..c4c13d0 100644
41595--- a/drivers/misc/sgi-gru/gruhandles.c
41596+++ b/drivers/misc/sgi-gru/gruhandles.c
41597@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
41598 unsigned long nsec;
41599
41600 nsec = CLKS2NSEC(clks);
41601- atomic_long_inc(&mcs_op_statistics[op].count);
41602- atomic_long_add(nsec, &mcs_op_statistics[op].total);
41603+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
41604+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
41605 if (mcs_op_statistics[op].max < nsec)
41606 mcs_op_statistics[op].max = nsec;
41607 }
41608diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
41609index 797d796..ae8f01e 100644
41610--- a/drivers/misc/sgi-gru/gruprocfs.c
41611+++ b/drivers/misc/sgi-gru/gruprocfs.c
41612@@ -32,9 +32,9 @@
41613
41614 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
41615
41616-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
41617+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
41618 {
41619- unsigned long val = atomic_long_read(v);
41620+ unsigned long val = atomic_long_read_unchecked(v);
41621
41622 seq_printf(s, "%16lu %s\n", val, id);
41623 }
41624@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
41625
41626 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
41627 for (op = 0; op < mcsop_last; op++) {
41628- count = atomic_long_read(&mcs_op_statistics[op].count);
41629- total = atomic_long_read(&mcs_op_statistics[op].total);
41630+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
41631+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
41632 max = mcs_op_statistics[op].max;
41633 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
41634 count ? total / count : 0, max);
41635diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
41636index 5c3ce24..4915ccb 100644
41637--- a/drivers/misc/sgi-gru/grutables.h
41638+++ b/drivers/misc/sgi-gru/grutables.h
41639@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
41640 * GRU statistics.
41641 */
41642 struct gru_stats_s {
41643- atomic_long_t vdata_alloc;
41644- atomic_long_t vdata_free;
41645- atomic_long_t gts_alloc;
41646- atomic_long_t gts_free;
41647- atomic_long_t gms_alloc;
41648- atomic_long_t gms_free;
41649- atomic_long_t gts_double_allocate;
41650- atomic_long_t assign_context;
41651- atomic_long_t assign_context_failed;
41652- atomic_long_t free_context;
41653- atomic_long_t load_user_context;
41654- atomic_long_t load_kernel_context;
41655- atomic_long_t lock_kernel_context;
41656- atomic_long_t unlock_kernel_context;
41657- atomic_long_t steal_user_context;
41658- atomic_long_t steal_kernel_context;
41659- atomic_long_t steal_context_failed;
41660- atomic_long_t nopfn;
41661- atomic_long_t asid_new;
41662- atomic_long_t asid_next;
41663- atomic_long_t asid_wrap;
41664- atomic_long_t asid_reuse;
41665- atomic_long_t intr;
41666- atomic_long_t intr_cbr;
41667- atomic_long_t intr_tfh;
41668- atomic_long_t intr_spurious;
41669- atomic_long_t intr_mm_lock_failed;
41670- atomic_long_t call_os;
41671- atomic_long_t call_os_wait_queue;
41672- atomic_long_t user_flush_tlb;
41673- atomic_long_t user_unload_context;
41674- atomic_long_t user_exception;
41675- atomic_long_t set_context_option;
41676- atomic_long_t check_context_retarget_intr;
41677- atomic_long_t check_context_unload;
41678- atomic_long_t tlb_dropin;
41679- atomic_long_t tlb_preload_page;
41680- atomic_long_t tlb_dropin_fail_no_asid;
41681- atomic_long_t tlb_dropin_fail_upm;
41682- atomic_long_t tlb_dropin_fail_invalid;
41683- atomic_long_t tlb_dropin_fail_range_active;
41684- atomic_long_t tlb_dropin_fail_idle;
41685- atomic_long_t tlb_dropin_fail_fmm;
41686- atomic_long_t tlb_dropin_fail_no_exception;
41687- atomic_long_t tfh_stale_on_fault;
41688- atomic_long_t mmu_invalidate_range;
41689- atomic_long_t mmu_invalidate_page;
41690- atomic_long_t flush_tlb;
41691- atomic_long_t flush_tlb_gru;
41692- atomic_long_t flush_tlb_gru_tgh;
41693- atomic_long_t flush_tlb_gru_zero_asid;
41694+ atomic_long_unchecked_t vdata_alloc;
41695+ atomic_long_unchecked_t vdata_free;
41696+ atomic_long_unchecked_t gts_alloc;
41697+ atomic_long_unchecked_t gts_free;
41698+ atomic_long_unchecked_t gms_alloc;
41699+ atomic_long_unchecked_t gms_free;
41700+ atomic_long_unchecked_t gts_double_allocate;
41701+ atomic_long_unchecked_t assign_context;
41702+ atomic_long_unchecked_t assign_context_failed;
41703+ atomic_long_unchecked_t free_context;
41704+ atomic_long_unchecked_t load_user_context;
41705+ atomic_long_unchecked_t load_kernel_context;
41706+ atomic_long_unchecked_t lock_kernel_context;
41707+ atomic_long_unchecked_t unlock_kernel_context;
41708+ atomic_long_unchecked_t steal_user_context;
41709+ atomic_long_unchecked_t steal_kernel_context;
41710+ atomic_long_unchecked_t steal_context_failed;
41711+ atomic_long_unchecked_t nopfn;
41712+ atomic_long_unchecked_t asid_new;
41713+ atomic_long_unchecked_t asid_next;
41714+ atomic_long_unchecked_t asid_wrap;
41715+ atomic_long_unchecked_t asid_reuse;
41716+ atomic_long_unchecked_t intr;
41717+ atomic_long_unchecked_t intr_cbr;
41718+ atomic_long_unchecked_t intr_tfh;
41719+ atomic_long_unchecked_t intr_spurious;
41720+ atomic_long_unchecked_t intr_mm_lock_failed;
41721+ atomic_long_unchecked_t call_os;
41722+ atomic_long_unchecked_t call_os_wait_queue;
41723+ atomic_long_unchecked_t user_flush_tlb;
41724+ atomic_long_unchecked_t user_unload_context;
41725+ atomic_long_unchecked_t user_exception;
41726+ atomic_long_unchecked_t set_context_option;
41727+ atomic_long_unchecked_t check_context_retarget_intr;
41728+ atomic_long_unchecked_t check_context_unload;
41729+ atomic_long_unchecked_t tlb_dropin;
41730+ atomic_long_unchecked_t tlb_preload_page;
41731+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
41732+ atomic_long_unchecked_t tlb_dropin_fail_upm;
41733+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
41734+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
41735+ atomic_long_unchecked_t tlb_dropin_fail_idle;
41736+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
41737+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
41738+ atomic_long_unchecked_t tfh_stale_on_fault;
41739+ atomic_long_unchecked_t mmu_invalidate_range;
41740+ atomic_long_unchecked_t mmu_invalidate_page;
41741+ atomic_long_unchecked_t flush_tlb;
41742+ atomic_long_unchecked_t flush_tlb_gru;
41743+ atomic_long_unchecked_t flush_tlb_gru_tgh;
41744+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
41745
41746- atomic_long_t copy_gpa;
41747- atomic_long_t read_gpa;
41748+ atomic_long_unchecked_t copy_gpa;
41749+ atomic_long_unchecked_t read_gpa;
41750
41751- atomic_long_t mesq_receive;
41752- atomic_long_t mesq_receive_none;
41753- atomic_long_t mesq_send;
41754- atomic_long_t mesq_send_failed;
41755- atomic_long_t mesq_noop;
41756- atomic_long_t mesq_send_unexpected_error;
41757- atomic_long_t mesq_send_lb_overflow;
41758- atomic_long_t mesq_send_qlimit_reached;
41759- atomic_long_t mesq_send_amo_nacked;
41760- atomic_long_t mesq_send_put_nacked;
41761- atomic_long_t mesq_page_overflow;
41762- atomic_long_t mesq_qf_locked;
41763- atomic_long_t mesq_qf_noop_not_full;
41764- atomic_long_t mesq_qf_switch_head_failed;
41765- atomic_long_t mesq_qf_unexpected_error;
41766- atomic_long_t mesq_noop_unexpected_error;
41767- atomic_long_t mesq_noop_lb_overflow;
41768- atomic_long_t mesq_noop_qlimit_reached;
41769- atomic_long_t mesq_noop_amo_nacked;
41770- atomic_long_t mesq_noop_put_nacked;
41771- atomic_long_t mesq_noop_page_overflow;
41772+ atomic_long_unchecked_t mesq_receive;
41773+ atomic_long_unchecked_t mesq_receive_none;
41774+ atomic_long_unchecked_t mesq_send;
41775+ atomic_long_unchecked_t mesq_send_failed;
41776+ atomic_long_unchecked_t mesq_noop;
41777+ atomic_long_unchecked_t mesq_send_unexpected_error;
41778+ atomic_long_unchecked_t mesq_send_lb_overflow;
41779+ atomic_long_unchecked_t mesq_send_qlimit_reached;
41780+ atomic_long_unchecked_t mesq_send_amo_nacked;
41781+ atomic_long_unchecked_t mesq_send_put_nacked;
41782+ atomic_long_unchecked_t mesq_page_overflow;
41783+ atomic_long_unchecked_t mesq_qf_locked;
41784+ atomic_long_unchecked_t mesq_qf_noop_not_full;
41785+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
41786+ atomic_long_unchecked_t mesq_qf_unexpected_error;
41787+ atomic_long_unchecked_t mesq_noop_unexpected_error;
41788+ atomic_long_unchecked_t mesq_noop_lb_overflow;
41789+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
41790+ atomic_long_unchecked_t mesq_noop_amo_nacked;
41791+ atomic_long_unchecked_t mesq_noop_put_nacked;
41792+ atomic_long_unchecked_t mesq_noop_page_overflow;
41793
41794 };
41795
41796@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
41797 tghop_invalidate, mcsop_last};
41798
41799 struct mcs_op_statistic {
41800- atomic_long_t count;
41801- atomic_long_t total;
41802+ atomic_long_unchecked_t count;
41803+ atomic_long_unchecked_t total;
41804 unsigned long max;
41805 };
41806
41807@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
41808
41809 #define STAT(id) do { \
41810 if (gru_options & OPT_STATS) \
41811- atomic_long_inc(&gru_stats.id); \
41812+ atomic_long_inc_unchecked(&gru_stats.id); \
41813 } while (0)
41814
41815 #ifdef CONFIG_SGI_GRU_DEBUG
41816diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
41817index c862cd4..0d176fe 100644
41818--- a/drivers/misc/sgi-xp/xp.h
41819+++ b/drivers/misc/sgi-xp/xp.h
41820@@ -288,7 +288,7 @@ struct xpc_interface {
41821 xpc_notify_func, void *);
41822 void (*received) (short, int, void *);
41823 enum xp_retval (*partid_to_nasids) (short, void *);
41824-};
41825+} __no_const;
41826
41827 extern struct xpc_interface xpc_interface;
41828
41829diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
41830index b94d5f7..7f494c5 100644
41831--- a/drivers/misc/sgi-xp/xpc.h
41832+++ b/drivers/misc/sgi-xp/xpc.h
41833@@ -835,6 +835,7 @@ struct xpc_arch_operations {
41834 void (*received_payload) (struct xpc_channel *, void *);
41835 void (*notify_senders_of_disconnect) (struct xpc_channel *);
41836 };
41837+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
41838
41839 /* struct xpc_partition act_state values (for XPC HB) */
41840
41841@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
41842 /* found in xpc_main.c */
41843 extern struct device *xpc_part;
41844 extern struct device *xpc_chan;
41845-extern struct xpc_arch_operations xpc_arch_ops;
41846+extern xpc_arch_operations_no_const xpc_arch_ops;
41847 extern int xpc_disengage_timelimit;
41848 extern int xpc_disengage_timedout;
41849 extern int xpc_activate_IRQ_rcvd;
41850diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
41851index d971817..33bdca5 100644
41852--- a/drivers/misc/sgi-xp/xpc_main.c
41853+++ b/drivers/misc/sgi-xp/xpc_main.c
41854@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
41855 .notifier_call = xpc_system_die,
41856 };
41857
41858-struct xpc_arch_operations xpc_arch_ops;
41859+xpc_arch_operations_no_const xpc_arch_ops;
41860
41861 /*
41862 * Timer function to enforce the timelimit on the partition disengage.
41863@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
41864
41865 if (((die_args->trapnr == X86_TRAP_MF) ||
41866 (die_args->trapnr == X86_TRAP_XF)) &&
41867- !user_mode_vm(die_args->regs))
41868+ !user_mode(die_args->regs))
41869 xpc_die_deactivate();
41870
41871 break;
41872diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
41873index 49f04bc..65660c2 100644
41874--- a/drivers/mmc/core/mmc_ops.c
41875+++ b/drivers/mmc/core/mmc_ops.c
41876@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
41877 void *data_buf;
41878 int is_on_stack;
41879
41880- is_on_stack = object_is_on_stack(buf);
41881+ is_on_stack = object_starts_on_stack(buf);
41882 if (is_on_stack) {
41883 /*
41884 * dma onto stack is unsafe/nonportable, but callers to this
41885diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
41886index 0b74189..818358f 100644
41887--- a/drivers/mmc/host/dw_mmc.h
41888+++ b/drivers/mmc/host/dw_mmc.h
41889@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
41890 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
41891 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
41892 int (*parse_dt)(struct dw_mci *host);
41893-};
41894+} __do_const;
41895 #endif /* _DW_MMC_H_ */
41896diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
41897index c6f6246..60760a8 100644
41898--- a/drivers/mmc/host/sdhci-s3c.c
41899+++ b/drivers/mmc/host/sdhci-s3c.c
41900@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
41901 * we can use overriding functions instead of default.
41902 */
41903 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
41904- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
41905- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
41906- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
41907+ pax_open_kernel();
41908+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
41909+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
41910+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
41911+ pax_close_kernel();
41912 }
41913
41914 /* It supports additional host capabilities if needed */
41915diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
41916index 0c8bb6b..6f35deb 100644
41917--- a/drivers/mtd/nand/denali.c
41918+++ b/drivers/mtd/nand/denali.c
41919@@ -24,6 +24,7 @@
41920 #include <linux/slab.h>
41921 #include <linux/mtd/mtd.h>
41922 #include <linux/module.h>
41923+#include <linux/slab.h>
41924
41925 #include "denali.h"
41926
41927diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
41928index 51b9d6a..52af9a7 100644
41929--- a/drivers/mtd/nftlmount.c
41930+++ b/drivers/mtd/nftlmount.c
41931@@ -24,6 +24,7 @@
41932 #include <asm/errno.h>
41933 #include <linux/delay.h>
41934 #include <linux/slab.h>
41935+#include <linux/sched.h>
41936 #include <linux/mtd/mtd.h>
41937 #include <linux/mtd/nand.h>
41938 #include <linux/mtd/nftl.h>
41939diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
41940index f9d5615..99dd95f 100644
41941--- a/drivers/mtd/sm_ftl.c
41942+++ b/drivers/mtd/sm_ftl.c
41943@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
41944 #define SM_CIS_VENDOR_OFFSET 0x59
41945 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
41946 {
41947- struct attribute_group *attr_group;
41948+ attribute_group_no_const *attr_group;
41949 struct attribute **attributes;
41950 struct sm_sysfs_attribute *vendor_attribute;
41951
41952diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
41953index f975696..4597e21 100644
41954--- a/drivers/net/bonding/bond_main.c
41955+++ b/drivers/net/bonding/bond_main.c
41956@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
41957 return tx_queues;
41958 }
41959
41960-static struct rtnl_link_ops bond_link_ops __read_mostly = {
41961+static struct rtnl_link_ops bond_link_ops = {
41962 .kind = "bond",
41963 .priv_size = sizeof(struct bonding),
41964 .setup = bond_setup,
41965@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
41966
41967 bond_destroy_debugfs();
41968
41969- rtnl_link_unregister(&bond_link_ops);
41970 unregister_pernet_subsys(&bond_net_ops);
41971+ rtnl_link_unregister(&bond_link_ops);
41972
41973 #ifdef CONFIG_NET_POLL_CONTROLLER
41974 /*
41975diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
41976index 25723d8..925ab8e 100644
41977--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
41978+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
41979@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
41980 if ((mc->ptr + rec_len) > mc->end)
41981 goto decode_failed;
41982
41983- memcpy(cf->data, mc->ptr, rec_len);
41984+ memcpy(cf->data, mc->ptr, cf->can_dlc);
41985 mc->ptr += rec_len;
41986 }
41987
41988diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
41989index e1d2643..7f4133b 100644
41990--- a/drivers/net/ethernet/8390/ax88796.c
41991+++ b/drivers/net/ethernet/8390/ax88796.c
41992@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
41993 if (ax->plat->reg_offsets)
41994 ei_local->reg_offset = ax->plat->reg_offsets;
41995 else {
41996+ resource_size_t _mem_size = mem_size;
41997+ do_div(_mem_size, 0x18);
41998 ei_local->reg_offset = ax->reg_offsets;
41999 for (ret = 0; ret < 0x18; ret++)
42000- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
42001+ ax->reg_offsets[ret] = _mem_size * ret;
42002 }
42003
42004 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
42005diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
42006index 151675d..0139a9d 100644
42007--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
42008+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
42009@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
42010 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
42011 {
42012 /* RX_MODE controlling object */
42013- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
42014+ bnx2x_init_rx_mode_obj(bp);
42015
42016 /* multicast configuration controlling object */
42017 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
42018diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
42019index ce1a916..10b52b0 100644
42020--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
42021+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
42022@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
42023 struct bnx2x *bp = netdev_priv(dev);
42024
42025 /* Use the ethtool_dump "flag" field as the dump preset index */
42026+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
42027+ return -EINVAL;
42028+
42029 bp->dump_preset_idx = val->flag;
42030 return 0;
42031 }
42032@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
42033 struct bnx2x *bp = netdev_priv(dev);
42034 struct dump_header dump_hdr = {0};
42035
42036- memset(p, 0, dump->len);
42037-
42038 /* Disable parity attentions as long as following dump may
42039 * cause false alarms by reading never written registers. We
42040 * will re-enable parity attentions right after the dump.
42041diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
42042index b4c9dea..2a9927f 100644
42043--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
42044+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
42045@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
42046 bp->min_msix_vec_cnt = 2;
42047 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
42048
42049+ bp->dump_preset_idx = 1;
42050+
42051 return rc;
42052 }
42053
42054diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
42055index 32a9609..0b1c53a 100644
42056--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
42057+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
42058@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
42059 return rc;
42060 }
42061
42062-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
42063- struct bnx2x_rx_mode_obj *o)
42064+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
42065 {
42066 if (CHIP_IS_E1x(bp)) {
42067- o->wait_comp = bnx2x_empty_rx_mode_wait;
42068- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
42069+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
42070+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
42071 } else {
42072- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
42073- o->config_rx_mode = bnx2x_set_rx_mode_e2;
42074+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
42075+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
42076 }
42077 }
42078
42079diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
42080index 43c00bc..dd1d03d 100644
42081--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
42082+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
42083@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
42084
42085 /********************* RX MODE ****************/
42086
42087-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
42088- struct bnx2x_rx_mode_obj *o);
42089+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
42090
42091 /**
42092 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
42093diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
42094index ff6e30e..87e8452 100644
42095--- a/drivers/net/ethernet/broadcom/tg3.h
42096+++ b/drivers/net/ethernet/broadcom/tg3.h
42097@@ -147,6 +147,7 @@
42098 #define CHIPREV_ID_5750_A0 0x4000
42099 #define CHIPREV_ID_5750_A1 0x4001
42100 #define CHIPREV_ID_5750_A3 0x4003
42101+#define CHIPREV_ID_5750_C1 0x4201
42102 #define CHIPREV_ID_5750_C2 0x4202
42103 #define CHIPREV_ID_5752_A0_HW 0x5000
42104 #define CHIPREV_ID_5752_A0 0x6000
42105diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
42106index 71497e8..b650951 100644
42107--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
42108+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
42109@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
42110 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
42111 t3_read_reg(adapter, A_PCIE_PEX_ERR));
42112
42113+ rtnl_lock();
42114 t3_resume_ports(adapter);
42115+ rtnl_unlock();
42116 }
42117
42118 static const struct pci_error_handlers t3_err_handler = {
42119diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
42120index 8cffcdf..aadf043 100644
42121--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
42122+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
42123@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
42124 */
42125 struct l2t_skb_cb {
42126 arp_failure_handler_func arp_failure_handler;
42127-};
42128+} __no_const;
42129
42130 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
42131
42132diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
42133index 4c83003..2a2a5b9 100644
42134--- a/drivers/net/ethernet/dec/tulip/de4x5.c
42135+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
42136@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
42137 for (i=0; i<ETH_ALEN; i++) {
42138 tmp.addr[i] = dev->dev_addr[i];
42139 }
42140- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
42141+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
42142 break;
42143
42144 case DE4X5_SET_HWADDR: /* Set the hardware address */
42145@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
42146 spin_lock_irqsave(&lp->lock, flags);
42147 memcpy(&statbuf, &lp->pktStats, ioc->len);
42148 spin_unlock_irqrestore(&lp->lock, flags);
42149- if (copy_to_user(ioc->data, &statbuf, ioc->len))
42150+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
42151 return -EFAULT;
42152 break;
42153 }
42154diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
42155index 6e43426..1bd8365 100644
42156--- a/drivers/net/ethernet/emulex/benet/be_main.c
42157+++ b/drivers/net/ethernet/emulex/benet/be_main.c
42158@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
42159
42160 if (wrapped)
42161 newacc += 65536;
42162- ACCESS_ONCE(*acc) = newacc;
42163+ ACCESS_ONCE_RW(*acc) = newacc;
42164 }
42165
42166 void populate_erx_stats(struct be_adapter *adapter,
42167diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
42168index 21b85fb..b49e5fc 100644
42169--- a/drivers/net/ethernet/faraday/ftgmac100.c
42170+++ b/drivers/net/ethernet/faraday/ftgmac100.c
42171@@ -31,6 +31,8 @@
42172 #include <linux/netdevice.h>
42173 #include <linux/phy.h>
42174 #include <linux/platform_device.h>
42175+#include <linux/interrupt.h>
42176+#include <linux/irqreturn.h>
42177 #include <net/ip.h>
42178
42179 #include "ftgmac100.h"
42180diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
42181index a6eda8d..935d273 100644
42182--- a/drivers/net/ethernet/faraday/ftmac100.c
42183+++ b/drivers/net/ethernet/faraday/ftmac100.c
42184@@ -31,6 +31,8 @@
42185 #include <linux/module.h>
42186 #include <linux/netdevice.h>
42187 #include <linux/platform_device.h>
42188+#include <linux/interrupt.h>
42189+#include <linux/irqreturn.h>
42190
42191 #include "ftmac100.h"
42192
42193diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
42194index 331987d..3be1135 100644
42195--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
42196+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
42197@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
42198 }
42199
42200 /* update the base incval used to calculate frequency adjustment */
42201- ACCESS_ONCE(adapter->base_incval) = incval;
42202+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
42203 smp_mb();
42204
42205 /* need lock to prevent incorrect read while modifying cyclecounter */
42206diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
42207index fbe5363..266b4e3 100644
42208--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
42209+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
42210@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
42211 struct __vxge_hw_fifo *fifo;
42212 struct vxge_hw_fifo_config *config;
42213 u32 txdl_size, txdl_per_memblock;
42214- struct vxge_hw_mempool_cbs fifo_mp_callback;
42215+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
42216+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
42217+ };
42218+
42219 struct __vxge_hw_virtualpath *vpath;
42220
42221 if ((vp == NULL) || (attr == NULL)) {
42222@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
42223 goto exit;
42224 }
42225
42226- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
42227-
42228 fifo->mempool =
42229 __vxge_hw_mempool_create(vpath->hldev,
42230 fifo->config->memblock_size,
42231diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
42232index 5e7fb1d..f8d1810 100644
42233--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
42234+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
42235@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
42236 op_mode = QLC_83XX_DEFAULT_OPMODE;
42237
42238 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
42239- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
42240+ pax_open_kernel();
42241+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
42242+ pax_close_kernel();
42243 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
42244 } else {
42245 return -EIO;
42246diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
42247index b0c3de9..fc5857e 100644
42248--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
42249+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
42250@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
42251 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
42252 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
42253 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
42254- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
42255+ pax_open_kernel();
42256+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
42257+ pax_close_kernel();
42258 } else if (priv_level == QLCNIC_PRIV_FUNC) {
42259 ahw->op_mode = QLCNIC_PRIV_FUNC;
42260 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
42261- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
42262+ pax_open_kernel();
42263+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
42264+ pax_close_kernel();
42265 } else if (priv_level == QLCNIC_MGMT_FUNC) {
42266 ahw->op_mode = QLCNIC_MGMT_FUNC;
42267 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
42268- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
42269+ pax_open_kernel();
42270+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
42271+ pax_close_kernel();
42272 } else {
42273 return -EIO;
42274 }
42275diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
42276index 6acf82b..14b097e 100644
42277--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
42278+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
42279@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
42280 if (err) {
42281 dev_info(&adapter->pdev->dev,
42282 "Failed to set driver version in firmware\n");
42283- return -EIO;
42284+ err = -EIO;
42285 }
42286-
42287- return 0;
42288+ qlcnic_free_mbx_args(&cmd);
42289+ return err;
42290 }
42291
42292 int
42293diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
42294index d3f8797..82a03d3 100644
42295--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
42296+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
42297@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
42298
42299 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
42300 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
42301- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
42302+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
42303
42304 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
42305 vlan_req->vlan_id = cpu_to_le16(vlan_id);
42306diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
42307index 887aebe..9095ff9 100644
42308--- a/drivers/net/ethernet/realtek/8139cp.c
42309+++ b/drivers/net/ethernet/realtek/8139cp.c
42310@@ -524,6 +524,7 @@ rx_status_loop:
42311 PCI_DMA_FROMDEVICE);
42312 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
42313 dev->stats.rx_dropped++;
42314+ kfree_skb(new_skb);
42315 goto rx_next;
42316 }
42317
42318diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
42319index 393f961..d343034 100644
42320--- a/drivers/net/ethernet/realtek/r8169.c
42321+++ b/drivers/net/ethernet/realtek/r8169.c
42322@@ -753,22 +753,22 @@ struct rtl8169_private {
42323 struct mdio_ops {
42324 void (*write)(struct rtl8169_private *, int, int);
42325 int (*read)(struct rtl8169_private *, int);
42326- } mdio_ops;
42327+ } __no_const mdio_ops;
42328
42329 struct pll_power_ops {
42330 void (*down)(struct rtl8169_private *);
42331 void (*up)(struct rtl8169_private *);
42332- } pll_power_ops;
42333+ } __no_const pll_power_ops;
42334
42335 struct jumbo_ops {
42336 void (*enable)(struct rtl8169_private *);
42337 void (*disable)(struct rtl8169_private *);
42338- } jumbo_ops;
42339+ } __no_const jumbo_ops;
42340
42341 struct csi_ops {
42342 void (*write)(struct rtl8169_private *, int, int);
42343 u32 (*read)(struct rtl8169_private *, int);
42344- } csi_ops;
42345+ } __no_const csi_ops;
42346
42347 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
42348 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
42349diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
42350index 9a95abf..36df7f9 100644
42351--- a/drivers/net/ethernet/sfc/ptp.c
42352+++ b/drivers/net/ethernet/sfc/ptp.c
42353@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
42354 (u32)((u64)ptp->start.dma_addr >> 32));
42355
42356 /* Clear flag that signals MC ready */
42357- ACCESS_ONCE(*start) = 0;
42358+ ACCESS_ONCE_RW(*start) = 0;
42359 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
42360 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
42361
42362diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
42363index 50617c5..b13724c 100644
42364--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
42365+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
42366@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
42367
42368 writel(value, ioaddr + MMC_CNTRL);
42369
42370- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
42371- MMC_CNTRL, value);
42372+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
42373+// MMC_CNTRL, value);
42374 }
42375
42376 /* To mask all all interrupts.*/
42377diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
42378index e6fe0d8..2b7d752 100644
42379--- a/drivers/net/hyperv/hyperv_net.h
42380+++ b/drivers/net/hyperv/hyperv_net.h
42381@@ -101,7 +101,7 @@ struct rndis_device {
42382
42383 enum rndis_device_state state;
42384 bool link_state;
42385- atomic_t new_req_id;
42386+ atomic_unchecked_t new_req_id;
42387
42388 spinlock_t request_lock;
42389 struct list_head req_list;
42390diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
42391index 0775f0a..d4fb316 100644
42392--- a/drivers/net/hyperv/rndis_filter.c
42393+++ b/drivers/net/hyperv/rndis_filter.c
42394@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
42395 * template
42396 */
42397 set = &rndis_msg->msg.set_req;
42398- set->req_id = atomic_inc_return(&dev->new_req_id);
42399+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
42400
42401 /* Add to the request list */
42402 spin_lock_irqsave(&dev->request_lock, flags);
42403@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
42404
42405 /* Setup the rndis set */
42406 halt = &request->request_msg.msg.halt_req;
42407- halt->req_id = atomic_inc_return(&dev->new_req_id);
42408+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
42409
42410 /* Ignore return since this msg is optional. */
42411 rndis_filter_send_request(dev, request);
42412diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
42413index bf0d55e..82bcfbd1 100644
42414--- a/drivers/net/ieee802154/fakehard.c
42415+++ b/drivers/net/ieee802154/fakehard.c
42416@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
42417 phy->transmit_power = 0xbf;
42418
42419 dev->netdev_ops = &fake_ops;
42420- dev->ml_priv = &fake_mlme;
42421+ dev->ml_priv = (void *)&fake_mlme;
42422
42423 priv = netdev_priv(dev);
42424 priv->phy = phy;
42425diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
42426index 6e91931..2b0ebe7 100644
42427--- a/drivers/net/macvlan.c
42428+++ b/drivers/net/macvlan.c
42429@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
42430 int macvlan_link_register(struct rtnl_link_ops *ops)
42431 {
42432 /* common fields */
42433- ops->priv_size = sizeof(struct macvlan_dev);
42434- ops->validate = macvlan_validate;
42435- ops->maxtype = IFLA_MACVLAN_MAX;
42436- ops->policy = macvlan_policy;
42437- ops->changelink = macvlan_changelink;
42438- ops->get_size = macvlan_get_size;
42439- ops->fill_info = macvlan_fill_info;
42440+ pax_open_kernel();
42441+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
42442+ *(void **)&ops->validate = macvlan_validate;
42443+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
42444+ *(const void **)&ops->policy = macvlan_policy;
42445+ *(void **)&ops->changelink = macvlan_changelink;
42446+ *(void **)&ops->get_size = macvlan_get_size;
42447+ *(void **)&ops->fill_info = macvlan_fill_info;
42448+ pax_close_kernel();
42449
42450 return rtnl_link_register(ops);
42451 };
42452@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
42453 return NOTIFY_DONE;
42454 }
42455
42456-static struct notifier_block macvlan_notifier_block __read_mostly = {
42457+static struct notifier_block macvlan_notifier_block = {
42458 .notifier_call = macvlan_device_event,
42459 };
42460
42461diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
42462index 523d6b2..5e16aa1 100644
42463--- a/drivers/net/macvtap.c
42464+++ b/drivers/net/macvtap.c
42465@@ -1110,7 +1110,7 @@ static int macvtap_device_event(struct notifier_block *unused,
42466 return NOTIFY_DONE;
42467 }
42468
42469-static struct notifier_block macvtap_notifier_block __read_mostly = {
42470+static struct notifier_block macvtap_notifier_block = {
42471 .notifier_call = macvtap_device_event,
42472 };
42473
42474diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
42475index daec9b0..6428fcb 100644
42476--- a/drivers/net/phy/mdio-bitbang.c
42477+++ b/drivers/net/phy/mdio-bitbang.c
42478@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
42479 struct mdiobb_ctrl *ctrl = bus->priv;
42480
42481 module_put(ctrl->ops->owner);
42482+ mdiobus_unregister(bus);
42483 mdiobus_free(bus);
42484 }
42485 EXPORT_SYMBOL(free_mdio_bitbang);
42486diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
42487index 72ff14b..11d442d 100644
42488--- a/drivers/net/ppp/ppp_generic.c
42489+++ b/drivers/net/ppp/ppp_generic.c
42490@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42491 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
42492 struct ppp_stats stats;
42493 struct ppp_comp_stats cstats;
42494- char *vers;
42495
42496 switch (cmd) {
42497 case SIOCGPPPSTATS:
42498@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42499 break;
42500
42501 case SIOCGPPPVER:
42502- vers = PPP_VERSION;
42503- if (copy_to_user(addr, vers, strlen(vers) + 1))
42504+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
42505 break;
42506 err = 0;
42507 break;
42508diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
42509index 1252d9c..80e660b 100644
42510--- a/drivers/net/slip/slhc.c
42511+++ b/drivers/net/slip/slhc.c
42512@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
42513 register struct tcphdr *thp;
42514 register struct iphdr *ip;
42515 register struct cstate *cs;
42516- int len, hdrlen;
42517+ long len, hdrlen;
42518 unsigned char *cp = icp;
42519
42520 /* We've got a compressed packet; read the change byte */
42521diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
42522index b305105..8ead6df 100644
42523--- a/drivers/net/team/team.c
42524+++ b/drivers/net/team/team.c
42525@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
42526 return NOTIFY_DONE;
42527 }
42528
42529-static struct notifier_block team_notifier_block __read_mostly = {
42530+static struct notifier_block team_notifier_block = {
42531 .notifier_call = team_device_event,
42532 };
42533
42534diff --git a/drivers/net/tun.c b/drivers/net/tun.c
42535index 2491eb2..1a453eb 100644
42536--- a/drivers/net/tun.c
42537+++ b/drivers/net/tun.c
42538@@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
42539 u32 rxhash;
42540
42541 if (!(tun->flags & TUN_NO_PI)) {
42542- if ((len -= sizeof(pi)) > total_len)
42543+ if (len < sizeof(pi))
42544 return -EINVAL;
42545+ len -= sizeof(pi);
42546
42547 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
42548 return -EFAULT;
42549@@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
42550 }
42551
42552 if (tun->flags & TUN_VNET_HDR) {
42553- if ((len -= tun->vnet_hdr_sz) > total_len)
42554+ if (len < tun->vnet_hdr_sz)
42555 return -EINVAL;
42556+ len -= tun->vnet_hdr_sz;
42557
42558 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
42559 return -EFAULT;
42560@@ -1869,7 +1871,7 @@ unlock:
42561 }
42562
42563 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
42564- unsigned long arg, int ifreq_len)
42565+ unsigned long arg, size_t ifreq_len)
42566 {
42567 struct tun_file *tfile = file->private_data;
42568 struct tun_struct *tun;
42569@@ -1881,6 +1883,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
42570 int vnet_hdr_sz;
42571 int ret;
42572
42573+ if (ifreq_len > sizeof ifr)
42574+ return -EFAULT;
42575+
42576 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
42577 if (copy_from_user(&ifr, argp, ifreq_len))
42578 return -EFAULT;
42579diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
42580index cba1d46..f703766 100644
42581--- a/drivers/net/usb/hso.c
42582+++ b/drivers/net/usb/hso.c
42583@@ -71,7 +71,7 @@
42584 #include <asm/byteorder.h>
42585 #include <linux/serial_core.h>
42586 #include <linux/serial.h>
42587-
42588+#include <asm/local.h>
42589
42590 #define MOD_AUTHOR "Option Wireless"
42591 #define MOD_DESCRIPTION "USB High Speed Option driver"
42592@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
42593 struct urb *urb;
42594
42595 urb = serial->rx_urb[0];
42596- if (serial->port.count > 0) {
42597+ if (atomic_read(&serial->port.count) > 0) {
42598 count = put_rxbuf_data(urb, serial);
42599 if (count == -1)
42600 return;
42601@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
42602 DUMP1(urb->transfer_buffer, urb->actual_length);
42603
42604 /* Anyone listening? */
42605- if (serial->port.count == 0)
42606+ if (atomic_read(&serial->port.count) == 0)
42607 return;
42608
42609 if (status == 0) {
42610@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
42611 tty_port_tty_set(&serial->port, tty);
42612
42613 /* check for port already opened, if not set the termios */
42614- serial->port.count++;
42615- if (serial->port.count == 1) {
42616+ if (atomic_inc_return(&serial->port.count) == 1) {
42617 serial->rx_state = RX_IDLE;
42618 /* Force default termio settings */
42619 _hso_serial_set_termios(tty, NULL);
42620@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
42621 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
42622 if (result) {
42623 hso_stop_serial_device(serial->parent);
42624- serial->port.count--;
42625+ atomic_dec(&serial->port.count);
42626 kref_put(&serial->parent->ref, hso_serial_ref_free);
42627 }
42628 } else {
42629@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
42630
42631 /* reset the rts and dtr */
42632 /* do the actual close */
42633- serial->port.count--;
42634+ atomic_dec(&serial->port.count);
42635
42636- if (serial->port.count <= 0) {
42637- serial->port.count = 0;
42638+ if (atomic_read(&serial->port.count) <= 0) {
42639+ atomic_set(&serial->port.count, 0);
42640 tty_port_tty_set(&serial->port, NULL);
42641 if (!usb_gone)
42642 hso_stop_serial_device(serial->parent);
42643@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
42644
42645 /* the actual setup */
42646 spin_lock_irqsave(&serial->serial_lock, flags);
42647- if (serial->port.count)
42648+ if (atomic_read(&serial->port.count))
42649 _hso_serial_set_termios(tty, old);
42650 else
42651 tty->termios = *old;
42652@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
42653 D1("Pending read interrupt on port %d\n", i);
42654 spin_lock(&serial->serial_lock);
42655 if (serial->rx_state == RX_IDLE &&
42656- serial->port.count > 0) {
42657+ atomic_read(&serial->port.count) > 0) {
42658 /* Setup and send a ctrl req read on
42659 * port i */
42660 if (!serial->rx_urb_filled[0]) {
42661@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
42662 /* Start all serial ports */
42663 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
42664 if (serial_table[i] && (serial_table[i]->interface == iface)) {
42665- if (dev2ser(serial_table[i])->port.count) {
42666+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
42667 result =
42668 hso_start_serial_device(serial_table[i], GFP_NOIO);
42669 hso_kick_transmit(dev2ser(serial_table[i]));
42670diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
42671index 57325f3..36b181f 100644
42672--- a/drivers/net/vxlan.c
42673+++ b/drivers/net/vxlan.c
42674@@ -1579,7 +1579,7 @@ nla_put_failure:
42675 return -EMSGSIZE;
42676 }
42677
42678-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
42679+static struct rtnl_link_ops vxlan_link_ops = {
42680 .kind = "vxlan",
42681 .maxtype = IFLA_VXLAN_MAX,
42682 .policy = vxlan_policy,
42683diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
42684index 34c8a33..3261fdc 100644
42685--- a/drivers/net/wireless/at76c50x-usb.c
42686+++ b/drivers/net/wireless/at76c50x-usb.c
42687@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
42688 }
42689
42690 /* Convert timeout from the DFU status to jiffies */
42691-static inline unsigned long at76_get_timeout(struct dfu_status *s)
42692+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
42693 {
42694 return msecs_to_jiffies((s->poll_timeout[2] << 16)
42695 | (s->poll_timeout[1] << 8)
42696diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
42697index 8d78253..bebbb68 100644
42698--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
42699+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
42700@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42701 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
42702 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
42703
42704- ACCESS_ONCE(ads->ds_link) = i->link;
42705- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
42706+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
42707+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
42708
42709 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
42710 ctl6 = SM(i->keytype, AR_EncrType);
42711@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42712
42713 if ((i->is_first || i->is_last) &&
42714 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
42715- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
42716+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
42717 | set11nTries(i->rates, 1)
42718 | set11nTries(i->rates, 2)
42719 | set11nTries(i->rates, 3)
42720 | (i->dur_update ? AR_DurUpdateEna : 0)
42721 | SM(0, AR_BurstDur);
42722
42723- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
42724+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
42725 | set11nRate(i->rates, 1)
42726 | set11nRate(i->rates, 2)
42727 | set11nRate(i->rates, 3);
42728 } else {
42729- ACCESS_ONCE(ads->ds_ctl2) = 0;
42730- ACCESS_ONCE(ads->ds_ctl3) = 0;
42731+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
42732+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
42733 }
42734
42735 if (!i->is_first) {
42736- ACCESS_ONCE(ads->ds_ctl0) = 0;
42737- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
42738- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
42739+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
42740+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
42741+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
42742 return;
42743 }
42744
42745@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42746 break;
42747 }
42748
42749- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
42750+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
42751 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
42752 | SM(i->txpower, AR_XmitPower)
42753 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
42754@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42755 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
42756 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
42757
42758- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
42759- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
42760+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
42761+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
42762
42763 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
42764 return;
42765
42766- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
42767+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
42768 | set11nPktDurRTSCTS(i->rates, 1);
42769
42770- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
42771+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
42772 | set11nPktDurRTSCTS(i->rates, 3);
42773
42774- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
42775+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
42776 | set11nRateFlags(i->rates, 1)
42777 | set11nRateFlags(i->rates, 2)
42778 | set11nRateFlags(i->rates, 3)
42779diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
42780index 301bf72..3f5654f 100644
42781--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
42782+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
42783@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42784 (i->qcu << AR_TxQcuNum_S) | desc_len;
42785
42786 checksum += val;
42787- ACCESS_ONCE(ads->info) = val;
42788+ ACCESS_ONCE_RW(ads->info) = val;
42789
42790 checksum += i->link;
42791- ACCESS_ONCE(ads->link) = i->link;
42792+ ACCESS_ONCE_RW(ads->link) = i->link;
42793
42794 checksum += i->buf_addr[0];
42795- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
42796+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
42797 checksum += i->buf_addr[1];
42798- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
42799+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
42800 checksum += i->buf_addr[2];
42801- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
42802+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
42803 checksum += i->buf_addr[3];
42804- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
42805+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
42806
42807 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
42808- ACCESS_ONCE(ads->ctl3) = val;
42809+ ACCESS_ONCE_RW(ads->ctl3) = val;
42810 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
42811- ACCESS_ONCE(ads->ctl5) = val;
42812+ ACCESS_ONCE_RW(ads->ctl5) = val;
42813 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
42814- ACCESS_ONCE(ads->ctl7) = val;
42815+ ACCESS_ONCE_RW(ads->ctl7) = val;
42816 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
42817- ACCESS_ONCE(ads->ctl9) = val;
42818+ ACCESS_ONCE_RW(ads->ctl9) = val;
42819
42820 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
42821- ACCESS_ONCE(ads->ctl10) = checksum;
42822+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
42823
42824 if (i->is_first || i->is_last) {
42825- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
42826+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
42827 | set11nTries(i->rates, 1)
42828 | set11nTries(i->rates, 2)
42829 | set11nTries(i->rates, 3)
42830 | (i->dur_update ? AR_DurUpdateEna : 0)
42831 | SM(0, AR_BurstDur);
42832
42833- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
42834+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
42835 | set11nRate(i->rates, 1)
42836 | set11nRate(i->rates, 2)
42837 | set11nRate(i->rates, 3);
42838 } else {
42839- ACCESS_ONCE(ads->ctl13) = 0;
42840- ACCESS_ONCE(ads->ctl14) = 0;
42841+ ACCESS_ONCE_RW(ads->ctl13) = 0;
42842+ ACCESS_ONCE_RW(ads->ctl14) = 0;
42843 }
42844
42845 ads->ctl20 = 0;
42846@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42847
42848 ctl17 = SM(i->keytype, AR_EncrType);
42849 if (!i->is_first) {
42850- ACCESS_ONCE(ads->ctl11) = 0;
42851- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
42852- ACCESS_ONCE(ads->ctl15) = 0;
42853- ACCESS_ONCE(ads->ctl16) = 0;
42854- ACCESS_ONCE(ads->ctl17) = ctl17;
42855- ACCESS_ONCE(ads->ctl18) = 0;
42856- ACCESS_ONCE(ads->ctl19) = 0;
42857+ ACCESS_ONCE_RW(ads->ctl11) = 0;
42858+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
42859+ ACCESS_ONCE_RW(ads->ctl15) = 0;
42860+ ACCESS_ONCE_RW(ads->ctl16) = 0;
42861+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
42862+ ACCESS_ONCE_RW(ads->ctl18) = 0;
42863+ ACCESS_ONCE_RW(ads->ctl19) = 0;
42864 return;
42865 }
42866
42867- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
42868+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
42869 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
42870 | SM(i->txpower, AR_XmitPower)
42871 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
42872@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42873 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
42874 ctl12 |= SM(val, AR_PAPRDChainMask);
42875
42876- ACCESS_ONCE(ads->ctl12) = ctl12;
42877- ACCESS_ONCE(ads->ctl17) = ctl17;
42878+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
42879+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
42880
42881- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
42882+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
42883 | set11nPktDurRTSCTS(i->rates, 1);
42884
42885- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
42886+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
42887 | set11nPktDurRTSCTS(i->rates, 3);
42888
42889- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
42890+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
42891 | set11nRateFlags(i->rates, 1)
42892 | set11nRateFlags(i->rates, 2)
42893 | set11nRateFlags(i->rates, 3)
42894 | SM(i->rtscts_rate, AR_RTSCTSRate);
42895
42896- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
42897+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
42898 }
42899
42900 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
42901diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
42902index ae30343..a117806 100644
42903--- a/drivers/net/wireless/ath/ath9k/hw.h
42904+++ b/drivers/net/wireless/ath/ath9k/hw.h
42905@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
42906
42907 /* ANI */
42908 void (*ani_cache_ini_regs)(struct ath_hw *ah);
42909-};
42910+} __no_const;
42911
42912 /**
42913 * struct ath_spec_scan - parameters for Atheros spectral scan
42914@@ -721,7 +721,7 @@ struct ath_hw_ops {
42915 struct ath_spec_scan *param);
42916 void (*spectral_scan_trigger)(struct ath_hw *ah);
42917 void (*spectral_scan_wait)(struct ath_hw *ah);
42918-};
42919+} __no_const;
42920
42921 struct ath_nf_limits {
42922 s16 max;
42923diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
42924index b37a582..680835d 100644
42925--- a/drivers/net/wireless/iwlegacy/3945-mac.c
42926+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
42927@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
42928 */
42929 if (il3945_mod_params.disable_hw_scan) {
42930 D_INFO("Disabling hw_scan\n");
42931- il3945_mac_ops.hw_scan = NULL;
42932+ pax_open_kernel();
42933+ *(void **)&il3945_mac_ops.hw_scan = NULL;
42934+ pax_close_kernel();
42935 }
42936
42937 D_INFO("*** LOAD DRIVER ***\n");
42938diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
42939index d532948..e0d8bb1 100644
42940--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
42941+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
42942@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
42943 {
42944 struct iwl_priv *priv = file->private_data;
42945 char buf[64];
42946- int buf_size;
42947+ size_t buf_size;
42948 u32 offset, len;
42949
42950 memset(buf, 0, sizeof(buf));
42951@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
42952 struct iwl_priv *priv = file->private_data;
42953
42954 char buf[8];
42955- int buf_size;
42956+ size_t buf_size;
42957 u32 reset_flag;
42958
42959 memset(buf, 0, sizeof(buf));
42960@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
42961 {
42962 struct iwl_priv *priv = file->private_data;
42963 char buf[8];
42964- int buf_size;
42965+ size_t buf_size;
42966 int ht40;
42967
42968 memset(buf, 0, sizeof(buf));
42969@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
42970 {
42971 struct iwl_priv *priv = file->private_data;
42972 char buf[8];
42973- int buf_size;
42974+ size_t buf_size;
42975 int value;
42976
42977 memset(buf, 0, sizeof(buf));
42978@@ -698,10 +698,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
42979 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
42980 DEBUGFS_READ_FILE_OPS(current_sleep_command);
42981
42982-static const char *fmt_value = " %-30s %10u\n";
42983-static const char *fmt_hex = " %-30s 0x%02X\n";
42984-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
42985-static const char *fmt_header =
42986+static const char fmt_value[] = " %-30s %10u\n";
42987+static const char fmt_hex[] = " %-30s 0x%02X\n";
42988+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
42989+static const char fmt_header[] =
42990 "%-32s current cumulative delta max\n";
42991
42992 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
42993@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
42994 {
42995 struct iwl_priv *priv = file->private_data;
42996 char buf[8];
42997- int buf_size;
42998+ size_t buf_size;
42999 int clear;
43000
43001 memset(buf, 0, sizeof(buf));
43002@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
43003 {
43004 struct iwl_priv *priv = file->private_data;
43005 char buf[8];
43006- int buf_size;
43007+ size_t buf_size;
43008 int trace;
43009
43010 memset(buf, 0, sizeof(buf));
43011@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
43012 {
43013 struct iwl_priv *priv = file->private_data;
43014 char buf[8];
43015- int buf_size;
43016+ size_t buf_size;
43017 int missed;
43018
43019 memset(buf, 0, sizeof(buf));
43020@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
43021
43022 struct iwl_priv *priv = file->private_data;
43023 char buf[8];
43024- int buf_size;
43025+ size_t buf_size;
43026 int plcp;
43027
43028 memset(buf, 0, sizeof(buf));
43029@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
43030
43031 struct iwl_priv *priv = file->private_data;
43032 char buf[8];
43033- int buf_size;
43034+ size_t buf_size;
43035 int flush;
43036
43037 memset(buf, 0, sizeof(buf));
43038@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
43039
43040 struct iwl_priv *priv = file->private_data;
43041 char buf[8];
43042- int buf_size;
43043+ size_t buf_size;
43044 int rts;
43045
43046 if (!priv->cfg->ht_params)
43047@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
43048 {
43049 struct iwl_priv *priv = file->private_data;
43050 char buf[8];
43051- int buf_size;
43052+ size_t buf_size;
43053
43054 memset(buf, 0, sizeof(buf));
43055 buf_size = min(count, sizeof(buf) - 1);
43056@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
43057 struct iwl_priv *priv = file->private_data;
43058 u32 event_log_flag;
43059 char buf[8];
43060- int buf_size;
43061+ size_t buf_size;
43062
43063 /* check that the interface is up */
43064 if (!iwl_is_ready(priv))
43065@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
43066 struct iwl_priv *priv = file->private_data;
43067 char buf[8];
43068 u32 calib_disabled;
43069- int buf_size;
43070+ size_t buf_size;
43071
43072 memset(buf, 0, sizeof(buf));
43073 buf_size = min(count, sizeof(buf) - 1);
43074diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
43075index 50ba0a4..29424e7 100644
43076--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
43077+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
43078@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
43079 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
43080
43081 char buf[8];
43082- int buf_size;
43083+ size_t buf_size;
43084 u32 reset_flag;
43085
43086 memset(buf, 0, sizeof(buf));
43087@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
43088 {
43089 struct iwl_trans *trans = file->private_data;
43090 char buf[8];
43091- int buf_size;
43092+ size_t buf_size;
43093 int csr;
43094
43095 memset(buf, 0, sizeof(buf));
43096diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
43097index cb34c78..9fec0dc 100644
43098--- a/drivers/net/wireless/mac80211_hwsim.c
43099+++ b/drivers/net/wireless/mac80211_hwsim.c
43100@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
43101
43102 if (channels > 1) {
43103 hwsim_if_comb.num_different_channels = channels;
43104- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
43105- mac80211_hwsim_ops.cancel_hw_scan =
43106- mac80211_hwsim_cancel_hw_scan;
43107- mac80211_hwsim_ops.sw_scan_start = NULL;
43108- mac80211_hwsim_ops.sw_scan_complete = NULL;
43109- mac80211_hwsim_ops.remain_on_channel =
43110- mac80211_hwsim_roc;
43111- mac80211_hwsim_ops.cancel_remain_on_channel =
43112- mac80211_hwsim_croc;
43113- mac80211_hwsim_ops.add_chanctx =
43114- mac80211_hwsim_add_chanctx;
43115- mac80211_hwsim_ops.remove_chanctx =
43116- mac80211_hwsim_remove_chanctx;
43117- mac80211_hwsim_ops.change_chanctx =
43118- mac80211_hwsim_change_chanctx;
43119- mac80211_hwsim_ops.assign_vif_chanctx =
43120- mac80211_hwsim_assign_vif_chanctx;
43121- mac80211_hwsim_ops.unassign_vif_chanctx =
43122- mac80211_hwsim_unassign_vif_chanctx;
43123+ pax_open_kernel();
43124+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
43125+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
43126+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
43127+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
43128+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
43129+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
43130+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
43131+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
43132+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
43133+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
43134+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
43135+ pax_close_kernel();
43136 }
43137
43138 spin_lock_init(&hwsim_radio_lock);
43139diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
43140index 8169a85..7fa3b47 100644
43141--- a/drivers/net/wireless/rndis_wlan.c
43142+++ b/drivers/net/wireless/rndis_wlan.c
43143@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
43144
43145 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
43146
43147- if (rts_threshold < 0 || rts_threshold > 2347)
43148+ if (rts_threshold > 2347)
43149 rts_threshold = 2347;
43150
43151 tmp = cpu_to_le32(rts_threshold);
43152diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
43153index 7510723..5ba37f5 100644
43154--- a/drivers/net/wireless/rt2x00/rt2x00.h
43155+++ b/drivers/net/wireless/rt2x00/rt2x00.h
43156@@ -386,7 +386,7 @@ struct rt2x00_intf {
43157 * for hardware which doesn't support hardware
43158 * sequence counting.
43159 */
43160- atomic_t seqno;
43161+ atomic_unchecked_t seqno;
43162 };
43163
43164 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
43165diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
43166index d955741..8730748 100644
43167--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
43168+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
43169@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
43170 * sequence counter given by mac80211.
43171 */
43172 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
43173- seqno = atomic_add_return(0x10, &intf->seqno);
43174+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
43175 else
43176- seqno = atomic_read(&intf->seqno);
43177+ seqno = atomic_read_unchecked(&intf->seqno);
43178
43179 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
43180 hdr->seq_ctrl |= cpu_to_le16(seqno);
43181diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
43182index e2b3d9c..67a5184 100644
43183--- a/drivers/net/wireless/ti/wl1251/sdio.c
43184+++ b/drivers/net/wireless/ti/wl1251/sdio.c
43185@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
43186
43187 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
43188
43189- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
43190- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
43191+ pax_open_kernel();
43192+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
43193+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
43194+ pax_close_kernel();
43195
43196 wl1251_info("using dedicated interrupt line");
43197 } else {
43198- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
43199- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
43200+ pax_open_kernel();
43201+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
43202+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
43203+ pax_close_kernel();
43204
43205 wl1251_info("using SDIO interrupt");
43206 }
43207diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
43208index 1c627da..69f7d17 100644
43209--- a/drivers/net/wireless/ti/wl12xx/main.c
43210+++ b/drivers/net/wireless/ti/wl12xx/main.c
43211@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
43212 sizeof(wl->conf.mem));
43213
43214 /* read data preparation is only needed by wl127x */
43215- wl->ops->prepare_read = wl127x_prepare_read;
43216+ pax_open_kernel();
43217+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
43218+ pax_close_kernel();
43219
43220 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
43221 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
43222@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
43223 sizeof(wl->conf.mem));
43224
43225 /* read data preparation is only needed by wl127x */
43226- wl->ops->prepare_read = wl127x_prepare_read;
43227+ pax_open_kernel();
43228+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
43229+ pax_close_kernel();
43230
43231 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
43232 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
43233diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
43234index 9fa692d..b31fee0 100644
43235--- a/drivers/net/wireless/ti/wl18xx/main.c
43236+++ b/drivers/net/wireless/ti/wl18xx/main.c
43237@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
43238 }
43239
43240 if (!checksum_param) {
43241- wl18xx_ops.set_rx_csum = NULL;
43242- wl18xx_ops.init_vif = NULL;
43243+ pax_open_kernel();
43244+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
43245+ *(void **)&wl18xx_ops.init_vif = NULL;
43246+ pax_close_kernel();
43247 }
43248
43249 /* Enable 11a Band only if we have 5G antennas */
43250diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
43251index 7ef0b4a..ff65c28 100644
43252--- a/drivers/net/wireless/zd1211rw/zd_usb.c
43253+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
43254@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
43255 {
43256 struct zd_usb *usb = urb->context;
43257 struct zd_usb_interrupt *intr = &usb->intr;
43258- int len;
43259+ unsigned int len;
43260 u16 int_num;
43261
43262 ZD_ASSERT(in_interrupt());
43263diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
43264index d93b2b6..ae50401 100644
43265--- a/drivers/oprofile/buffer_sync.c
43266+++ b/drivers/oprofile/buffer_sync.c
43267@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
43268 if (cookie == NO_COOKIE)
43269 offset = pc;
43270 if (cookie == INVALID_COOKIE) {
43271- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
43272+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
43273 offset = pc;
43274 }
43275 if (cookie != last_cookie) {
43276@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
43277 /* add userspace sample */
43278
43279 if (!mm) {
43280- atomic_inc(&oprofile_stats.sample_lost_no_mm);
43281+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
43282 return 0;
43283 }
43284
43285 cookie = lookup_dcookie(mm, s->eip, &offset);
43286
43287 if (cookie == INVALID_COOKIE) {
43288- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
43289+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
43290 return 0;
43291 }
43292
43293@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
43294 /* ignore backtraces if failed to add a sample */
43295 if (state == sb_bt_start) {
43296 state = sb_bt_ignore;
43297- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
43298+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
43299 }
43300 }
43301 release_mm(mm);
43302diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
43303index c0cc4e7..44d4e54 100644
43304--- a/drivers/oprofile/event_buffer.c
43305+++ b/drivers/oprofile/event_buffer.c
43306@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
43307 }
43308
43309 if (buffer_pos == buffer_size) {
43310- atomic_inc(&oprofile_stats.event_lost_overflow);
43311+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
43312 return;
43313 }
43314
43315diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
43316index ed2c3ec..deda85a 100644
43317--- a/drivers/oprofile/oprof.c
43318+++ b/drivers/oprofile/oprof.c
43319@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
43320 if (oprofile_ops.switch_events())
43321 return;
43322
43323- atomic_inc(&oprofile_stats.multiplex_counter);
43324+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
43325 start_switch_worker();
43326 }
43327
43328diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
43329index 84a208d..d61b0a1 100644
43330--- a/drivers/oprofile/oprofile_files.c
43331+++ b/drivers/oprofile/oprofile_files.c
43332@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
43333
43334 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
43335
43336-static ssize_t timeout_read(struct file *file, char __user *buf,
43337+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
43338 size_t count, loff_t *offset)
43339 {
43340 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
43341diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
43342index 917d28e..d62d981 100644
43343--- a/drivers/oprofile/oprofile_stats.c
43344+++ b/drivers/oprofile/oprofile_stats.c
43345@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
43346 cpu_buf->sample_invalid_eip = 0;
43347 }
43348
43349- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
43350- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
43351- atomic_set(&oprofile_stats.event_lost_overflow, 0);
43352- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
43353- atomic_set(&oprofile_stats.multiplex_counter, 0);
43354+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
43355+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
43356+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
43357+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
43358+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
43359 }
43360
43361
43362diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
43363index 38b6fc0..b5cbfce 100644
43364--- a/drivers/oprofile/oprofile_stats.h
43365+++ b/drivers/oprofile/oprofile_stats.h
43366@@ -13,11 +13,11 @@
43367 #include <linux/atomic.h>
43368
43369 struct oprofile_stat_struct {
43370- atomic_t sample_lost_no_mm;
43371- atomic_t sample_lost_no_mapping;
43372- atomic_t bt_lost_no_mapping;
43373- atomic_t event_lost_overflow;
43374- atomic_t multiplex_counter;
43375+ atomic_unchecked_t sample_lost_no_mm;
43376+ atomic_unchecked_t sample_lost_no_mapping;
43377+ atomic_unchecked_t bt_lost_no_mapping;
43378+ atomic_unchecked_t event_lost_overflow;
43379+ atomic_unchecked_t multiplex_counter;
43380 };
43381
43382 extern struct oprofile_stat_struct oprofile_stats;
43383diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
43384index 7c12d9c..558bf3bb 100644
43385--- a/drivers/oprofile/oprofilefs.c
43386+++ b/drivers/oprofile/oprofilefs.c
43387@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
43388
43389
43390 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
43391- char const *name, atomic_t *val)
43392+ char const *name, atomic_unchecked_t *val)
43393 {
43394 return __oprofilefs_create_file(sb, root, name,
43395 &atomic_ro_fops, 0444, val);
43396diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
43397index 93404f7..4a313d8 100644
43398--- a/drivers/oprofile/timer_int.c
43399+++ b/drivers/oprofile/timer_int.c
43400@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
43401 return NOTIFY_OK;
43402 }
43403
43404-static struct notifier_block __refdata oprofile_cpu_notifier = {
43405+static struct notifier_block oprofile_cpu_notifier = {
43406 .notifier_call = oprofile_cpu_notify,
43407 };
43408
43409diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
43410index 92ed045..62d39bd7 100644
43411--- a/drivers/parport/procfs.c
43412+++ b/drivers/parport/procfs.c
43413@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
43414
43415 *ppos += len;
43416
43417- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
43418+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
43419 }
43420
43421 #ifdef CONFIG_PARPORT_1284
43422@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
43423
43424 *ppos += len;
43425
43426- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
43427+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
43428 }
43429 #endif /* IEEE1284.3 support. */
43430
43431diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
43432index c35e8ad..fc33beb 100644
43433--- a/drivers/pci/hotplug/acpiphp_ibm.c
43434+++ b/drivers/pci/hotplug/acpiphp_ibm.c
43435@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
43436 goto init_cleanup;
43437 }
43438
43439- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
43440+ pax_open_kernel();
43441+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
43442+ pax_close_kernel();
43443 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
43444
43445 return retval;
43446diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
43447index a6a71c4..c91097b 100644
43448--- a/drivers/pci/hotplug/cpcihp_generic.c
43449+++ b/drivers/pci/hotplug/cpcihp_generic.c
43450@@ -73,7 +73,6 @@ static u16 port;
43451 static unsigned int enum_bit;
43452 static u8 enum_mask;
43453
43454-static struct cpci_hp_controller_ops generic_hpc_ops;
43455 static struct cpci_hp_controller generic_hpc;
43456
43457 static int __init validate_parameters(void)
43458@@ -139,6 +138,10 @@ static int query_enum(void)
43459 return ((value & enum_mask) == enum_mask);
43460 }
43461
43462+static struct cpci_hp_controller_ops generic_hpc_ops = {
43463+ .query_enum = query_enum,
43464+};
43465+
43466 static int __init cpcihp_generic_init(void)
43467 {
43468 int status;
43469@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
43470 pci_dev_put(dev);
43471
43472 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
43473- generic_hpc_ops.query_enum = query_enum;
43474 generic_hpc.ops = &generic_hpc_ops;
43475
43476 status = cpci_hp_register_controller(&generic_hpc);
43477diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
43478index 449b4bb..257e2e8 100644
43479--- a/drivers/pci/hotplug/cpcihp_zt5550.c
43480+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
43481@@ -59,7 +59,6 @@
43482 /* local variables */
43483 static bool debug;
43484 static bool poll;
43485-static struct cpci_hp_controller_ops zt5550_hpc_ops;
43486 static struct cpci_hp_controller zt5550_hpc;
43487
43488 /* Primary cPCI bus bridge device */
43489@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
43490 return 0;
43491 }
43492
43493+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
43494+ .query_enum = zt5550_hc_query_enum,
43495+};
43496+
43497 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
43498 {
43499 int status;
43500@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
43501 dbg("returned from zt5550_hc_config");
43502
43503 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
43504- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
43505 zt5550_hpc.ops = &zt5550_hpc_ops;
43506 if(!poll) {
43507 zt5550_hpc.irq = hc_dev->irq;
43508 zt5550_hpc.irq_flags = IRQF_SHARED;
43509 zt5550_hpc.dev_id = hc_dev;
43510
43511- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
43512- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
43513- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
43514+ pax_open_kernel();
43515+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
43516+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
43517+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
43518+ pax_open_kernel();
43519 } else {
43520 info("using ENUM# polling mode");
43521 }
43522diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
43523index 76ba8a1..20ca857 100644
43524--- a/drivers/pci/hotplug/cpqphp_nvram.c
43525+++ b/drivers/pci/hotplug/cpqphp_nvram.c
43526@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
43527
43528 void compaq_nvram_init (void __iomem *rom_start)
43529 {
43530+
43531+#ifndef CONFIG_PAX_KERNEXEC
43532 if (rom_start) {
43533 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
43534 }
43535+#endif
43536+
43537 dbg("int15 entry = %p\n", compaq_int15_entry_point);
43538
43539 /* initialize our int15 lock */
43540diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
43541index ec20f74..c1d961e 100644
43542--- a/drivers/pci/hotplug/pci_hotplug_core.c
43543+++ b/drivers/pci/hotplug/pci_hotplug_core.c
43544@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
43545 return -EINVAL;
43546 }
43547
43548- slot->ops->owner = owner;
43549- slot->ops->mod_name = mod_name;
43550+ pax_open_kernel();
43551+ *(struct module **)&slot->ops->owner = owner;
43552+ *(const char **)&slot->ops->mod_name = mod_name;
43553+ pax_close_kernel();
43554
43555 mutex_lock(&pci_hp_mutex);
43556 /*
43557diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
43558index 7d72c5e..edce02c 100644
43559--- a/drivers/pci/hotplug/pciehp_core.c
43560+++ b/drivers/pci/hotplug/pciehp_core.c
43561@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
43562 struct slot *slot = ctrl->slot;
43563 struct hotplug_slot *hotplug = NULL;
43564 struct hotplug_slot_info *info = NULL;
43565- struct hotplug_slot_ops *ops = NULL;
43566+ hotplug_slot_ops_no_const *ops = NULL;
43567 char name[SLOT_NAME_SIZE];
43568 int retval = -ENOMEM;
43569
43570diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
43571index 5b4a9d9..cd5ac1f 100644
43572--- a/drivers/pci/pci-sysfs.c
43573+++ b/drivers/pci/pci-sysfs.c
43574@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
43575 {
43576 /* allocate attribute structure, piggyback attribute name */
43577 int name_len = write_combine ? 13 : 10;
43578- struct bin_attribute *res_attr;
43579+ bin_attribute_no_const *res_attr;
43580 int retval;
43581
43582 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
43583@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
43584 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
43585 {
43586 int retval;
43587- struct bin_attribute *attr;
43588+ bin_attribute_no_const *attr;
43589
43590 /* If the device has VPD, try to expose it in sysfs. */
43591 if (dev->vpd) {
43592@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
43593 {
43594 int retval;
43595 int rom_size = 0;
43596- struct bin_attribute *attr;
43597+ bin_attribute_no_const *attr;
43598
43599 if (!sysfs_initialized)
43600 return -EACCES;
43601diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
43602index d1182c4..2a138ec 100644
43603--- a/drivers/pci/pci.h
43604+++ b/drivers/pci/pci.h
43605@@ -92,7 +92,7 @@ struct pci_vpd_ops {
43606 struct pci_vpd {
43607 unsigned int len;
43608 const struct pci_vpd_ops *ops;
43609- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
43610+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
43611 };
43612
43613 int pci_vpd_pci22_init(struct pci_dev *dev);
43614diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
43615index d320df6..ca9a8f6 100644
43616--- a/drivers/pci/pcie/aspm.c
43617+++ b/drivers/pci/pcie/aspm.c
43618@@ -27,9 +27,9 @@
43619 #define MODULE_PARAM_PREFIX "pcie_aspm."
43620
43621 /* Note: those are not register definitions */
43622-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
43623-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
43624-#define ASPM_STATE_L1 (4) /* L1 state */
43625+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
43626+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
43627+#define ASPM_STATE_L1 (4U) /* L1 state */
43628 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
43629 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
43630
43631diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
43632index ea37072..10e58e56 100644
43633--- a/drivers/pci/probe.c
43634+++ b/drivers/pci/probe.c
43635@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
43636 struct pci_bus_region region;
43637 bool bar_too_big = false, bar_disabled = false;
43638
43639- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
43640+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
43641
43642 /* No printks while decoding is disabled! */
43643 if (!dev->mmio_always_on) {
43644diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
43645index 0812608..b04018c4 100644
43646--- a/drivers/pci/proc.c
43647+++ b/drivers/pci/proc.c
43648@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
43649 static int __init pci_proc_init(void)
43650 {
43651 struct pci_dev *dev = NULL;
43652+
43653+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43654+#ifdef CONFIG_GRKERNSEC_PROC_USER
43655+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
43656+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43657+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43658+#endif
43659+#else
43660 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
43661+#endif
43662 proc_create("devices", 0, proc_bus_pci_dir,
43663 &proc_bus_pci_dev_operations);
43664 proc_initialized = 1;
43665diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
43666index 3e5b4497..dcdfb70 100644
43667--- a/drivers/platform/x86/chromeos_laptop.c
43668+++ b/drivers/platform/x86/chromeos_laptop.c
43669@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
43670 return 0;
43671 }
43672
43673-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
43674+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
43675 {
43676 .ident = "Samsung Series 5 550 - Touchpad",
43677 .matches = {
43678diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
43679index 6b22938..bc9700e 100644
43680--- a/drivers/platform/x86/msi-laptop.c
43681+++ b/drivers/platform/x86/msi-laptop.c
43682@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
43683
43684 if (!quirks->ec_read_only) {
43685 /* allow userland write sysfs file */
43686- dev_attr_bluetooth.store = store_bluetooth;
43687- dev_attr_wlan.store = store_wlan;
43688- dev_attr_threeg.store = store_threeg;
43689- dev_attr_bluetooth.attr.mode |= S_IWUSR;
43690- dev_attr_wlan.attr.mode |= S_IWUSR;
43691- dev_attr_threeg.attr.mode |= S_IWUSR;
43692+ pax_open_kernel();
43693+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
43694+ *(void **)&dev_attr_wlan.store = store_wlan;
43695+ *(void **)&dev_attr_threeg.store = store_threeg;
43696+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
43697+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
43698+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
43699+ pax_close_kernel();
43700 }
43701
43702 /* disable hardware control by fn key */
43703diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
43704index 2ac045f..39c443d 100644
43705--- a/drivers/platform/x86/sony-laptop.c
43706+++ b/drivers/platform/x86/sony-laptop.c
43707@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
43708 }
43709
43710 /* High speed charging function */
43711-static struct device_attribute *hsc_handle;
43712+static device_attribute_no_const *hsc_handle;
43713
43714 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
43715 struct device_attribute *attr,
43716diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
43717index 54d31c0..3f896d3 100644
43718--- a/drivers/platform/x86/thinkpad_acpi.c
43719+++ b/drivers/platform/x86/thinkpad_acpi.c
43720@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
43721 return 0;
43722 }
43723
43724-void static hotkey_mask_warn_incomplete_mask(void)
43725+static void hotkey_mask_warn_incomplete_mask(void)
43726 {
43727 /* log only what the user can fix... */
43728 const u32 wantedmask = hotkey_driver_mask &
43729@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
43730 }
43731 }
43732
43733-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43734- struct tp_nvram_state *newn,
43735- const u32 event_mask)
43736-{
43737-
43738 #define TPACPI_COMPARE_KEY(__scancode, __member) \
43739 do { \
43740 if ((event_mask & (1 << __scancode)) && \
43741@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43742 tpacpi_hotkey_send_key(__scancode); \
43743 } while (0)
43744
43745- void issue_volchange(const unsigned int oldvol,
43746- const unsigned int newvol)
43747- {
43748- unsigned int i = oldvol;
43749+static void issue_volchange(const unsigned int oldvol,
43750+ const unsigned int newvol,
43751+ const u32 event_mask)
43752+{
43753+ unsigned int i = oldvol;
43754
43755- while (i > newvol) {
43756- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
43757- i--;
43758- }
43759- while (i < newvol) {
43760- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
43761- i++;
43762- }
43763+ while (i > newvol) {
43764+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
43765+ i--;
43766 }
43767+ while (i < newvol) {
43768+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
43769+ i++;
43770+ }
43771+}
43772
43773- void issue_brightnesschange(const unsigned int oldbrt,
43774- const unsigned int newbrt)
43775- {
43776- unsigned int i = oldbrt;
43777+static void issue_brightnesschange(const unsigned int oldbrt,
43778+ const unsigned int newbrt,
43779+ const u32 event_mask)
43780+{
43781+ unsigned int i = oldbrt;
43782
43783- while (i > newbrt) {
43784- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
43785- i--;
43786- }
43787- while (i < newbrt) {
43788- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
43789- i++;
43790- }
43791+ while (i > newbrt) {
43792+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
43793+ i--;
43794+ }
43795+ while (i < newbrt) {
43796+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
43797+ i++;
43798 }
43799+}
43800
43801+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43802+ struct tp_nvram_state *newn,
43803+ const u32 event_mask)
43804+{
43805 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
43806 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
43807 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
43808@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43809 oldn->volume_level != newn->volume_level) {
43810 /* recently muted, or repeated mute keypress, or
43811 * multiple presses ending in mute */
43812- issue_volchange(oldn->volume_level, newn->volume_level);
43813+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
43814 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
43815 }
43816 } else {
43817@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43818 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
43819 }
43820 if (oldn->volume_level != newn->volume_level) {
43821- issue_volchange(oldn->volume_level, newn->volume_level);
43822+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
43823 } else if (oldn->volume_toggle != newn->volume_toggle) {
43824 /* repeated vol up/down keypress at end of scale ? */
43825 if (newn->volume_level == 0)
43826@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43827 /* handle brightness */
43828 if (oldn->brightness_level != newn->brightness_level) {
43829 issue_brightnesschange(oldn->brightness_level,
43830- newn->brightness_level);
43831+ newn->brightness_level,
43832+ event_mask);
43833 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
43834 /* repeated key presses that didn't change state */
43835 if (newn->brightness_level == 0)
43836@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43837 && !tp_features.bright_unkfw)
43838 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
43839 }
43840+}
43841
43842 #undef TPACPI_COMPARE_KEY
43843 #undef TPACPI_MAY_SEND_KEY
43844-}
43845
43846 /*
43847 * Polling driver
43848diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
43849index 769d265..a3a05ca 100644
43850--- a/drivers/pnp/pnpbios/bioscalls.c
43851+++ b/drivers/pnp/pnpbios/bioscalls.c
43852@@ -58,7 +58,7 @@ do { \
43853 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
43854 } while(0)
43855
43856-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
43857+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
43858 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
43859
43860 /*
43861@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
43862
43863 cpu = get_cpu();
43864 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
43865+
43866+ pax_open_kernel();
43867 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
43868+ pax_close_kernel();
43869
43870 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
43871 spin_lock_irqsave(&pnp_bios_lock, flags);
43872@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
43873 :"memory");
43874 spin_unlock_irqrestore(&pnp_bios_lock, flags);
43875
43876+ pax_open_kernel();
43877 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
43878+ pax_close_kernel();
43879+
43880 put_cpu();
43881
43882 /* If we get here and this is set then the PnP BIOS faulted on us. */
43883@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
43884 return status;
43885 }
43886
43887-void pnpbios_calls_init(union pnp_bios_install_struct *header)
43888+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
43889 {
43890 int i;
43891
43892@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
43893 pnp_bios_callpoint.offset = header->fields.pm16offset;
43894 pnp_bios_callpoint.segment = PNP_CS16;
43895
43896+ pax_open_kernel();
43897+
43898 for_each_possible_cpu(i) {
43899 struct desc_struct *gdt = get_cpu_gdt_table(i);
43900 if (!gdt)
43901@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
43902 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
43903 (unsigned long)__va(header->fields.pm16dseg));
43904 }
43905+
43906+ pax_close_kernel();
43907 }
43908diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
43909index 3e6db1c..1fbbdae 100644
43910--- a/drivers/pnp/resource.c
43911+++ b/drivers/pnp/resource.c
43912@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
43913 return 1;
43914
43915 /* check if the resource is valid */
43916- if (*irq < 0 || *irq > 15)
43917+ if (*irq > 15)
43918 return 0;
43919
43920 /* check if the resource is reserved */
43921@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
43922 return 1;
43923
43924 /* check if the resource is valid */
43925- if (*dma < 0 || *dma == 4 || *dma > 7)
43926+ if (*dma == 4 || *dma > 7)
43927 return 0;
43928
43929 /* check if the resource is reserved */
43930diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
43931index 0c52e2a..3421ab7 100644
43932--- a/drivers/power/pda_power.c
43933+++ b/drivers/power/pda_power.c
43934@@ -37,7 +37,11 @@ static int polling;
43935
43936 #if IS_ENABLED(CONFIG_USB_PHY)
43937 static struct usb_phy *transceiver;
43938-static struct notifier_block otg_nb;
43939+static int otg_handle_notification(struct notifier_block *nb,
43940+ unsigned long event, void *unused);
43941+static struct notifier_block otg_nb = {
43942+ .notifier_call = otg_handle_notification
43943+};
43944 #endif
43945
43946 static struct regulator *ac_draw;
43947@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
43948
43949 #if IS_ENABLED(CONFIG_USB_PHY)
43950 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
43951- otg_nb.notifier_call = otg_handle_notification;
43952 ret = usb_register_notifier(transceiver, &otg_nb);
43953 if (ret) {
43954 dev_err(dev, "failure to register otg notifier\n");
43955diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
43956index cc439fd..8fa30df 100644
43957--- a/drivers/power/power_supply.h
43958+++ b/drivers/power/power_supply.h
43959@@ -16,12 +16,12 @@ struct power_supply;
43960
43961 #ifdef CONFIG_SYSFS
43962
43963-extern void power_supply_init_attrs(struct device_type *dev_type);
43964+extern void power_supply_init_attrs(void);
43965 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
43966
43967 #else
43968
43969-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
43970+static inline void power_supply_init_attrs(void) {}
43971 #define power_supply_uevent NULL
43972
43973 #endif /* CONFIG_SYSFS */
43974diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
43975index 1c517c3..ffa2f17 100644
43976--- a/drivers/power/power_supply_core.c
43977+++ b/drivers/power/power_supply_core.c
43978@@ -24,7 +24,10 @@
43979 struct class *power_supply_class;
43980 EXPORT_SYMBOL_GPL(power_supply_class);
43981
43982-static struct device_type power_supply_dev_type;
43983+extern const struct attribute_group *power_supply_attr_groups[];
43984+static struct device_type power_supply_dev_type = {
43985+ .groups = power_supply_attr_groups,
43986+};
43987
43988 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
43989 struct power_supply *supply)
43990@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
43991 return PTR_ERR(power_supply_class);
43992
43993 power_supply_class->dev_uevent = power_supply_uevent;
43994- power_supply_init_attrs(&power_supply_dev_type);
43995+ power_supply_init_attrs();
43996
43997 return 0;
43998 }
43999diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
44000index 29178f7..c65f324 100644
44001--- a/drivers/power/power_supply_sysfs.c
44002+++ b/drivers/power/power_supply_sysfs.c
44003@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
44004 .is_visible = power_supply_attr_is_visible,
44005 };
44006
44007-static const struct attribute_group *power_supply_attr_groups[] = {
44008+const struct attribute_group *power_supply_attr_groups[] = {
44009 &power_supply_attr_group,
44010 NULL,
44011 };
44012
44013-void power_supply_init_attrs(struct device_type *dev_type)
44014+void power_supply_init_attrs(void)
44015 {
44016 int i;
44017
44018- dev_type->groups = power_supply_attr_groups;
44019-
44020 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
44021 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
44022 }
44023diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
44024index d428ef9..fdc0357 100644
44025--- a/drivers/regulator/max8660.c
44026+++ b/drivers/regulator/max8660.c
44027@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
44028 max8660->shadow_regs[MAX8660_OVER1] = 5;
44029 } else {
44030 /* Otherwise devices can be toggled via software */
44031- max8660_dcdc_ops.enable = max8660_dcdc_enable;
44032- max8660_dcdc_ops.disable = max8660_dcdc_disable;
44033+ pax_open_kernel();
44034+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
44035+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
44036+ pax_close_kernel();
44037 }
44038
44039 /*
44040diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
44041index adb1414..c13e0ce 100644
44042--- a/drivers/regulator/max8973-regulator.c
44043+++ b/drivers/regulator/max8973-regulator.c
44044@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
44045 if (!pdata->enable_ext_control) {
44046 max->desc.enable_reg = MAX8973_VOUT;
44047 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
44048- max8973_dcdc_ops.enable = regulator_enable_regmap;
44049- max8973_dcdc_ops.disable = regulator_disable_regmap;
44050- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
44051+ pax_open_kernel();
44052+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
44053+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
44054+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
44055+ pax_close_kernel();
44056 }
44057
44058 max->enable_external_control = pdata->enable_ext_control;
44059diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
44060index b716283..3cc4349 100644
44061--- a/drivers/regulator/mc13892-regulator.c
44062+++ b/drivers/regulator/mc13892-regulator.c
44063@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
44064 }
44065 mc13xxx_unlock(mc13892);
44066
44067- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
44068+ pax_open_kernel();
44069+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
44070 = mc13892_vcam_set_mode;
44071- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
44072+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
44073 = mc13892_vcam_get_mode;
44074+ pax_close_kernel();
44075
44076 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
44077 ARRAY_SIZE(mc13892_regulators));
44078diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
44079index f1cb706..4c7832a 100644
44080--- a/drivers/rtc/rtc-cmos.c
44081+++ b/drivers/rtc/rtc-cmos.c
44082@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
44083 hpet_rtc_timer_init();
44084
44085 /* export at least the first block of NVRAM */
44086- nvram.size = address_space - NVRAM_OFFSET;
44087+ pax_open_kernel();
44088+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
44089+ pax_close_kernel();
44090 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
44091 if (retval < 0) {
44092 dev_dbg(dev, "can't create nvram file? %d\n", retval);
44093diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
44094index d049393..bb20be0 100644
44095--- a/drivers/rtc/rtc-dev.c
44096+++ b/drivers/rtc/rtc-dev.c
44097@@ -16,6 +16,7 @@
44098 #include <linux/module.h>
44099 #include <linux/rtc.h>
44100 #include <linux/sched.h>
44101+#include <linux/grsecurity.h>
44102 #include "rtc-core.h"
44103
44104 static dev_t rtc_devt;
44105@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
44106 if (copy_from_user(&tm, uarg, sizeof(tm)))
44107 return -EFAULT;
44108
44109+ gr_log_timechange();
44110+
44111 return rtc_set_time(rtc, &tm);
44112
44113 case RTC_PIE_ON:
44114diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
44115index b53992a..776df84 100644
44116--- a/drivers/rtc/rtc-ds1307.c
44117+++ b/drivers/rtc/rtc-ds1307.c
44118@@ -107,7 +107,7 @@ struct ds1307 {
44119 u8 offset; /* register's offset */
44120 u8 regs[11];
44121 u16 nvram_offset;
44122- struct bin_attribute *nvram;
44123+ bin_attribute_no_const *nvram;
44124 enum ds_type type;
44125 unsigned long flags;
44126 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
44127diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
44128index 130f29a..6179d03 100644
44129--- a/drivers/rtc/rtc-m48t59.c
44130+++ b/drivers/rtc/rtc-m48t59.c
44131@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
44132 goto out;
44133 }
44134
44135- m48t59_nvram_attr.size = pdata->offset;
44136+ pax_open_kernel();
44137+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
44138+ pax_close_kernel();
44139
44140 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
44141 if (ret) {
44142diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
44143index e693af6..2e525b6 100644
44144--- a/drivers/scsi/bfa/bfa_fcpim.h
44145+++ b/drivers/scsi/bfa/bfa_fcpim.h
44146@@ -36,7 +36,7 @@ struct bfa_iotag_s {
44147
44148 struct bfa_itn_s {
44149 bfa_isr_func_t isr;
44150-};
44151+} __no_const;
44152
44153 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
44154 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
44155diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
44156index 23a90e7..9cf04ee 100644
44157--- a/drivers/scsi/bfa/bfa_ioc.h
44158+++ b/drivers/scsi/bfa/bfa_ioc.h
44159@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
44160 bfa_ioc_disable_cbfn_t disable_cbfn;
44161 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
44162 bfa_ioc_reset_cbfn_t reset_cbfn;
44163-};
44164+} __no_const;
44165
44166 /*
44167 * IOC event notification mechanism.
44168@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
44169 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
44170 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
44171 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
44172-};
44173+} __no_const;
44174
44175 /*
44176 * Queue element to wait for room in request queue. FIFO order is
44177diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
44178index df0c3c7..b00e1d0 100644
44179--- a/drivers/scsi/hosts.c
44180+++ b/drivers/scsi/hosts.c
44181@@ -42,7 +42,7 @@
44182 #include "scsi_logging.h"
44183
44184
44185-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
44186+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
44187
44188
44189 static void scsi_host_cls_release(struct device *dev)
44190@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
44191 * subtract one because we increment first then return, but we need to
44192 * know what the next host number was before increment
44193 */
44194- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
44195+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
44196 shost->dma_channel = 0xff;
44197
44198 /* These three are default values which can be overridden */
44199diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
44200index 7f4f790..b75b92a 100644
44201--- a/drivers/scsi/hpsa.c
44202+++ b/drivers/scsi/hpsa.c
44203@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
44204 unsigned long flags;
44205
44206 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
44207- return h->access.command_completed(h, q);
44208+ return h->access->command_completed(h, q);
44209
44210 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
44211 a = rq->head[rq->current_entry];
44212@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
44213 while (!list_empty(&h->reqQ)) {
44214 c = list_entry(h->reqQ.next, struct CommandList, list);
44215 /* can't do anything if fifo is full */
44216- if ((h->access.fifo_full(h))) {
44217+ if ((h->access->fifo_full(h))) {
44218 dev_warn(&h->pdev->dev, "fifo full\n");
44219 break;
44220 }
44221@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
44222
44223 /* Tell the controller execute command */
44224 spin_unlock_irqrestore(&h->lock, flags);
44225- h->access.submit_command(h, c);
44226+ h->access->submit_command(h, c);
44227 spin_lock_irqsave(&h->lock, flags);
44228 }
44229 spin_unlock_irqrestore(&h->lock, flags);
44230@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
44231
44232 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
44233 {
44234- return h->access.command_completed(h, q);
44235+ return h->access->command_completed(h, q);
44236 }
44237
44238 static inline bool interrupt_pending(struct ctlr_info *h)
44239 {
44240- return h->access.intr_pending(h);
44241+ return h->access->intr_pending(h);
44242 }
44243
44244 static inline long interrupt_not_for_us(struct ctlr_info *h)
44245 {
44246- return (h->access.intr_pending(h) == 0) ||
44247+ return (h->access->intr_pending(h) == 0) ||
44248 (h->interrupts_enabled == 0);
44249 }
44250
44251@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
44252 if (prod_index < 0)
44253 return -ENODEV;
44254 h->product_name = products[prod_index].product_name;
44255- h->access = *(products[prod_index].access);
44256+ h->access = products[prod_index].access;
44257
44258 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
44259 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
44260@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
44261
44262 assert_spin_locked(&lockup_detector_lock);
44263 remove_ctlr_from_lockup_detector_list(h);
44264- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44265+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44266 spin_lock_irqsave(&h->lock, flags);
44267 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
44268 spin_unlock_irqrestore(&h->lock, flags);
44269@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
44270 }
44271
44272 /* make sure the board interrupts are off */
44273- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44274+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44275
44276 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
44277 goto clean2;
44278@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
44279 * fake ones to scoop up any residual completions.
44280 */
44281 spin_lock_irqsave(&h->lock, flags);
44282- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44283+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44284 spin_unlock_irqrestore(&h->lock, flags);
44285 free_irqs(h);
44286 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
44287@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
44288 dev_info(&h->pdev->dev, "Board READY.\n");
44289 dev_info(&h->pdev->dev,
44290 "Waiting for stale completions to drain.\n");
44291- h->access.set_intr_mask(h, HPSA_INTR_ON);
44292+ h->access->set_intr_mask(h, HPSA_INTR_ON);
44293 msleep(10000);
44294- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44295+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44296
44297 rc = controller_reset_failed(h->cfgtable);
44298 if (rc)
44299@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
44300 }
44301
44302 /* Turn the interrupts on so we can service requests */
44303- h->access.set_intr_mask(h, HPSA_INTR_ON);
44304+ h->access->set_intr_mask(h, HPSA_INTR_ON);
44305
44306 hpsa_hba_inquiry(h);
44307 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
44308@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
44309 * To write all data in the battery backed cache to disks
44310 */
44311 hpsa_flush_cache(h);
44312- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44313+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44314 hpsa_free_irqs_and_disable_msix(h);
44315 }
44316
44317@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
44318 return;
44319 }
44320 /* Change the access methods to the performant access methods */
44321- h->access = SA5_performant_access;
44322+ h->access = &SA5_performant_access;
44323 h->transMethod = CFGTBL_Trans_Performant;
44324 }
44325
44326diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
44327index 9816479..c5d4e97 100644
44328--- a/drivers/scsi/hpsa.h
44329+++ b/drivers/scsi/hpsa.h
44330@@ -79,7 +79,7 @@ struct ctlr_info {
44331 unsigned int msix_vector;
44332 unsigned int msi_vector;
44333 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
44334- struct access_method access;
44335+ struct access_method *access;
44336
44337 /* queue and queue Info */
44338 struct list_head reqQ;
44339diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
44340index 8b928c6..9c76300 100644
44341--- a/drivers/scsi/libfc/fc_exch.c
44342+++ b/drivers/scsi/libfc/fc_exch.c
44343@@ -100,12 +100,12 @@ struct fc_exch_mgr {
44344 u16 pool_max_index;
44345
44346 struct {
44347- atomic_t no_free_exch;
44348- atomic_t no_free_exch_xid;
44349- atomic_t xid_not_found;
44350- atomic_t xid_busy;
44351- atomic_t seq_not_found;
44352- atomic_t non_bls_resp;
44353+ atomic_unchecked_t no_free_exch;
44354+ atomic_unchecked_t no_free_exch_xid;
44355+ atomic_unchecked_t xid_not_found;
44356+ atomic_unchecked_t xid_busy;
44357+ atomic_unchecked_t seq_not_found;
44358+ atomic_unchecked_t non_bls_resp;
44359 } stats;
44360 };
44361
44362@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
44363 /* allocate memory for exchange */
44364 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
44365 if (!ep) {
44366- atomic_inc(&mp->stats.no_free_exch);
44367+ atomic_inc_unchecked(&mp->stats.no_free_exch);
44368 goto out;
44369 }
44370 memset(ep, 0, sizeof(*ep));
44371@@ -797,7 +797,7 @@ out:
44372 return ep;
44373 err:
44374 spin_unlock_bh(&pool->lock);
44375- atomic_inc(&mp->stats.no_free_exch_xid);
44376+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
44377 mempool_free(ep, mp->ep_pool);
44378 return NULL;
44379 }
44380@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44381 xid = ntohs(fh->fh_ox_id); /* we originated exch */
44382 ep = fc_exch_find(mp, xid);
44383 if (!ep) {
44384- atomic_inc(&mp->stats.xid_not_found);
44385+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44386 reject = FC_RJT_OX_ID;
44387 goto out;
44388 }
44389@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44390 ep = fc_exch_find(mp, xid);
44391 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
44392 if (ep) {
44393- atomic_inc(&mp->stats.xid_busy);
44394+ atomic_inc_unchecked(&mp->stats.xid_busy);
44395 reject = FC_RJT_RX_ID;
44396 goto rel;
44397 }
44398@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44399 }
44400 xid = ep->xid; /* get our XID */
44401 } else if (!ep) {
44402- atomic_inc(&mp->stats.xid_not_found);
44403+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44404 reject = FC_RJT_RX_ID; /* XID not found */
44405 goto out;
44406 }
44407@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44408 } else {
44409 sp = &ep->seq;
44410 if (sp->id != fh->fh_seq_id) {
44411- atomic_inc(&mp->stats.seq_not_found);
44412+ atomic_inc_unchecked(&mp->stats.seq_not_found);
44413 if (f_ctl & FC_FC_END_SEQ) {
44414 /*
44415 * Update sequence_id based on incoming last
44416@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
44417
44418 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
44419 if (!ep) {
44420- atomic_inc(&mp->stats.xid_not_found);
44421+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44422 goto out;
44423 }
44424 if (ep->esb_stat & ESB_ST_COMPLETE) {
44425- atomic_inc(&mp->stats.xid_not_found);
44426+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44427 goto rel;
44428 }
44429 if (ep->rxid == FC_XID_UNKNOWN)
44430 ep->rxid = ntohs(fh->fh_rx_id);
44431 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
44432- atomic_inc(&mp->stats.xid_not_found);
44433+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44434 goto rel;
44435 }
44436 if (ep->did != ntoh24(fh->fh_s_id) &&
44437 ep->did != FC_FID_FLOGI) {
44438- atomic_inc(&mp->stats.xid_not_found);
44439+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44440 goto rel;
44441 }
44442 sof = fr_sof(fp);
44443@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
44444 sp->ssb_stat |= SSB_ST_RESP;
44445 sp->id = fh->fh_seq_id;
44446 } else if (sp->id != fh->fh_seq_id) {
44447- atomic_inc(&mp->stats.seq_not_found);
44448+ atomic_inc_unchecked(&mp->stats.seq_not_found);
44449 goto rel;
44450 }
44451
44452@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
44453 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
44454
44455 if (!sp)
44456- atomic_inc(&mp->stats.xid_not_found);
44457+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44458 else
44459- atomic_inc(&mp->stats.non_bls_resp);
44460+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
44461
44462 fc_frame_free(fp);
44463 }
44464@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
44465
44466 list_for_each_entry(ema, &lport->ema_list, ema_list) {
44467 mp = ema->mp;
44468- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
44469+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
44470 st->fc_no_free_exch_xid +=
44471- atomic_read(&mp->stats.no_free_exch_xid);
44472- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
44473- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
44474- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
44475- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
44476+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
44477+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
44478+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
44479+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
44480+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
44481 }
44482 }
44483 EXPORT_SYMBOL(fc_exch_update_stats);
44484diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
44485index 161c98e..6d563b3 100644
44486--- a/drivers/scsi/libsas/sas_ata.c
44487+++ b/drivers/scsi/libsas/sas_ata.c
44488@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
44489 .postreset = ata_std_postreset,
44490 .error_handler = ata_std_error_handler,
44491 .post_internal_cmd = sas_ata_post_internal,
44492- .qc_defer = ata_std_qc_defer,
44493+ .qc_defer = ata_std_qc_defer,
44494 .qc_prep = ata_noop_qc_prep,
44495 .qc_issue = sas_ata_qc_issue,
44496 .qc_fill_rtf = sas_ata_qc_fill_rtf,
44497diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
44498index bcc56ca..6f4174a 100644
44499--- a/drivers/scsi/lpfc/lpfc.h
44500+++ b/drivers/scsi/lpfc/lpfc.h
44501@@ -431,7 +431,7 @@ struct lpfc_vport {
44502 struct dentry *debug_nodelist;
44503 struct dentry *vport_debugfs_root;
44504 struct lpfc_debugfs_trc *disc_trc;
44505- atomic_t disc_trc_cnt;
44506+ atomic_unchecked_t disc_trc_cnt;
44507 #endif
44508 uint8_t stat_data_enabled;
44509 uint8_t stat_data_blocked;
44510@@ -865,8 +865,8 @@ struct lpfc_hba {
44511 struct timer_list fabric_block_timer;
44512 unsigned long bit_flags;
44513 #define FABRIC_COMANDS_BLOCKED 0
44514- atomic_t num_rsrc_err;
44515- atomic_t num_cmd_success;
44516+ atomic_unchecked_t num_rsrc_err;
44517+ atomic_unchecked_t num_cmd_success;
44518 unsigned long last_rsrc_error_time;
44519 unsigned long last_ramp_down_time;
44520 unsigned long last_ramp_up_time;
44521@@ -902,7 +902,7 @@ struct lpfc_hba {
44522
44523 struct dentry *debug_slow_ring_trc;
44524 struct lpfc_debugfs_trc *slow_ring_trc;
44525- atomic_t slow_ring_trc_cnt;
44526+ atomic_unchecked_t slow_ring_trc_cnt;
44527 /* iDiag debugfs sub-directory */
44528 struct dentry *idiag_root;
44529 struct dentry *idiag_pci_cfg;
44530diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
44531index f525ecb..32549a4 100644
44532--- a/drivers/scsi/lpfc/lpfc_debugfs.c
44533+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
44534@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
44535
44536 #include <linux/debugfs.h>
44537
44538-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
44539+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
44540 static unsigned long lpfc_debugfs_start_time = 0L;
44541
44542 /* iDiag */
44543@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
44544 lpfc_debugfs_enable = 0;
44545
44546 len = 0;
44547- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
44548+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
44549 (lpfc_debugfs_max_disc_trc - 1);
44550 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
44551 dtp = vport->disc_trc + i;
44552@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
44553 lpfc_debugfs_enable = 0;
44554
44555 len = 0;
44556- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
44557+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
44558 (lpfc_debugfs_max_slow_ring_trc - 1);
44559 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
44560 dtp = phba->slow_ring_trc + i;
44561@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
44562 !vport || !vport->disc_trc)
44563 return;
44564
44565- index = atomic_inc_return(&vport->disc_trc_cnt) &
44566+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
44567 (lpfc_debugfs_max_disc_trc - 1);
44568 dtp = vport->disc_trc + index;
44569 dtp->fmt = fmt;
44570 dtp->data1 = data1;
44571 dtp->data2 = data2;
44572 dtp->data3 = data3;
44573- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
44574+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
44575 dtp->jif = jiffies;
44576 #endif
44577 return;
44578@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
44579 !phba || !phba->slow_ring_trc)
44580 return;
44581
44582- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
44583+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
44584 (lpfc_debugfs_max_slow_ring_trc - 1);
44585 dtp = phba->slow_ring_trc + index;
44586 dtp->fmt = fmt;
44587 dtp->data1 = data1;
44588 dtp->data2 = data2;
44589 dtp->data3 = data3;
44590- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
44591+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
44592 dtp->jif = jiffies;
44593 #endif
44594 return;
44595@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
44596 "slow_ring buffer\n");
44597 goto debug_failed;
44598 }
44599- atomic_set(&phba->slow_ring_trc_cnt, 0);
44600+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
44601 memset(phba->slow_ring_trc, 0,
44602 (sizeof(struct lpfc_debugfs_trc) *
44603 lpfc_debugfs_max_slow_ring_trc));
44604@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
44605 "buffer\n");
44606 goto debug_failed;
44607 }
44608- atomic_set(&vport->disc_trc_cnt, 0);
44609+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
44610
44611 snprintf(name, sizeof(name), "discovery_trace");
44612 vport->debug_disc_trc =
44613diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
44614index cb465b2..2e7b25f 100644
44615--- a/drivers/scsi/lpfc/lpfc_init.c
44616+++ b/drivers/scsi/lpfc/lpfc_init.c
44617@@ -10950,8 +10950,10 @@ lpfc_init(void)
44618 "misc_register returned with status %d", error);
44619
44620 if (lpfc_enable_npiv) {
44621- lpfc_transport_functions.vport_create = lpfc_vport_create;
44622- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
44623+ pax_open_kernel();
44624+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
44625+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
44626+ pax_close_kernel();
44627 }
44628 lpfc_transport_template =
44629 fc_attach_transport(&lpfc_transport_functions);
44630diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
44631index 8523b278e..ce1d812 100644
44632--- a/drivers/scsi/lpfc/lpfc_scsi.c
44633+++ b/drivers/scsi/lpfc/lpfc_scsi.c
44634@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
44635 uint32_t evt_posted;
44636
44637 spin_lock_irqsave(&phba->hbalock, flags);
44638- atomic_inc(&phba->num_rsrc_err);
44639+ atomic_inc_unchecked(&phba->num_rsrc_err);
44640 phba->last_rsrc_error_time = jiffies;
44641
44642 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
44643@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
44644 unsigned long flags;
44645 struct lpfc_hba *phba = vport->phba;
44646 uint32_t evt_posted;
44647- atomic_inc(&phba->num_cmd_success);
44648+ atomic_inc_unchecked(&phba->num_cmd_success);
44649
44650 if (vport->cfg_lun_queue_depth <= queue_depth)
44651 return;
44652@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
44653 unsigned long num_rsrc_err, num_cmd_success;
44654 int i;
44655
44656- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
44657- num_cmd_success = atomic_read(&phba->num_cmd_success);
44658+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
44659+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
44660
44661 /*
44662 * The error and success command counters are global per
44663@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
44664 }
44665 }
44666 lpfc_destroy_vport_work_array(phba, vports);
44667- atomic_set(&phba->num_rsrc_err, 0);
44668- atomic_set(&phba->num_cmd_success, 0);
44669+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
44670+ atomic_set_unchecked(&phba->num_cmd_success, 0);
44671 }
44672
44673 /**
44674@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
44675 }
44676 }
44677 lpfc_destroy_vport_work_array(phba, vports);
44678- atomic_set(&phba->num_rsrc_err, 0);
44679- atomic_set(&phba->num_cmd_success, 0);
44680+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
44681+ atomic_set_unchecked(&phba->num_cmd_success, 0);
44682 }
44683
44684 /**
44685diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
44686index 8e1b737..50ff510 100644
44687--- a/drivers/scsi/pmcraid.c
44688+++ b/drivers/scsi/pmcraid.c
44689@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
44690 res->scsi_dev = scsi_dev;
44691 scsi_dev->hostdata = res;
44692 res->change_detected = 0;
44693- atomic_set(&res->read_failures, 0);
44694- atomic_set(&res->write_failures, 0);
44695+ atomic_set_unchecked(&res->read_failures, 0);
44696+ atomic_set_unchecked(&res->write_failures, 0);
44697 rc = 0;
44698 }
44699 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
44700@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
44701
44702 /* If this was a SCSI read/write command keep count of errors */
44703 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
44704- atomic_inc(&res->read_failures);
44705+ atomic_inc_unchecked(&res->read_failures);
44706 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
44707- atomic_inc(&res->write_failures);
44708+ atomic_inc_unchecked(&res->write_failures);
44709
44710 if (!RES_IS_GSCSI(res->cfg_entry) &&
44711 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
44712@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
44713 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
44714 * hrrq_id assigned here in queuecommand
44715 */
44716- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
44717+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
44718 pinstance->num_hrrq;
44719 cmd->cmd_done = pmcraid_io_done;
44720
44721@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
44722 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
44723 * hrrq_id assigned here in queuecommand
44724 */
44725- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
44726+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
44727 pinstance->num_hrrq;
44728
44729 if (request_size) {
44730@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
44731
44732 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
44733 /* add resources only after host is added into system */
44734- if (!atomic_read(&pinstance->expose_resources))
44735+ if (!atomic_read_unchecked(&pinstance->expose_resources))
44736 return;
44737
44738 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
44739@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
44740 init_waitqueue_head(&pinstance->reset_wait_q);
44741
44742 atomic_set(&pinstance->outstanding_cmds, 0);
44743- atomic_set(&pinstance->last_message_id, 0);
44744- atomic_set(&pinstance->expose_resources, 0);
44745+ atomic_set_unchecked(&pinstance->last_message_id, 0);
44746+ atomic_set_unchecked(&pinstance->expose_resources, 0);
44747
44748 INIT_LIST_HEAD(&pinstance->free_res_q);
44749 INIT_LIST_HEAD(&pinstance->used_res_q);
44750@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
44751 /* Schedule worker thread to handle CCN and take care of adding and
44752 * removing devices to OS
44753 */
44754- atomic_set(&pinstance->expose_resources, 1);
44755+ atomic_set_unchecked(&pinstance->expose_resources, 1);
44756 schedule_work(&pinstance->worker_q);
44757 return rc;
44758
44759diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
44760index e1d150f..6c6df44 100644
44761--- a/drivers/scsi/pmcraid.h
44762+++ b/drivers/scsi/pmcraid.h
44763@@ -748,7 +748,7 @@ struct pmcraid_instance {
44764 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
44765
44766 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
44767- atomic_t last_message_id;
44768+ atomic_unchecked_t last_message_id;
44769
44770 /* configuration table */
44771 struct pmcraid_config_table *cfg_table;
44772@@ -777,7 +777,7 @@ struct pmcraid_instance {
44773 atomic_t outstanding_cmds;
44774
44775 /* should add/delete resources to mid-layer now ?*/
44776- atomic_t expose_resources;
44777+ atomic_unchecked_t expose_resources;
44778
44779
44780
44781@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
44782 struct pmcraid_config_table_entry_ext cfg_entry_ext;
44783 };
44784 struct scsi_device *scsi_dev; /* Link scsi_device structure */
44785- atomic_t read_failures; /* count of failed READ commands */
44786- atomic_t write_failures; /* count of failed WRITE commands */
44787+ atomic_unchecked_t read_failures; /* count of failed READ commands */
44788+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
44789
44790 /* To indicate add/delete/modify during CCN */
44791 u8 change_detected;
44792diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
44793index bf60c63..74d4dce 100644
44794--- a/drivers/scsi/qla2xxx/qla_attr.c
44795+++ b/drivers/scsi/qla2xxx/qla_attr.c
44796@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
44797 return 0;
44798 }
44799
44800-struct fc_function_template qla2xxx_transport_functions = {
44801+fc_function_template_no_const qla2xxx_transport_functions = {
44802
44803 .show_host_node_name = 1,
44804 .show_host_port_name = 1,
44805@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
44806 .bsg_timeout = qla24xx_bsg_timeout,
44807 };
44808
44809-struct fc_function_template qla2xxx_transport_vport_functions = {
44810+fc_function_template_no_const qla2xxx_transport_vport_functions = {
44811
44812 .show_host_node_name = 1,
44813 .show_host_port_name = 1,
44814diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
44815index 026bfde..90c4018 100644
44816--- a/drivers/scsi/qla2xxx/qla_gbl.h
44817+++ b/drivers/scsi/qla2xxx/qla_gbl.h
44818@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
44819 struct device_attribute;
44820 extern struct device_attribute *qla2x00_host_attrs[];
44821 struct fc_function_template;
44822-extern struct fc_function_template qla2xxx_transport_functions;
44823-extern struct fc_function_template qla2xxx_transport_vport_functions;
44824+extern fc_function_template_no_const qla2xxx_transport_functions;
44825+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
44826 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
44827 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
44828 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
44829diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
44830index ad72c1d..afc9a98 100644
44831--- a/drivers/scsi/qla2xxx/qla_os.c
44832+++ b/drivers/scsi/qla2xxx/qla_os.c
44833@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
44834 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
44835 /* Ok, a 64bit DMA mask is applicable. */
44836 ha->flags.enable_64bit_addressing = 1;
44837- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
44838- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
44839+ pax_open_kernel();
44840+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
44841+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
44842+ pax_close_kernel();
44843 return;
44844 }
44845 }
44846diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
44847index ddf16a8..80f4dd0 100644
44848--- a/drivers/scsi/qla4xxx/ql4_def.h
44849+++ b/drivers/scsi/qla4xxx/ql4_def.h
44850@@ -291,7 +291,7 @@ struct ddb_entry {
44851 * (4000 only) */
44852 atomic_t relogin_timer; /* Max Time to wait for
44853 * relogin to complete */
44854- atomic_t relogin_retry_count; /* Num of times relogin has been
44855+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
44856 * retried */
44857 uint32_t default_time2wait; /* Default Min time between
44858 * relogins (+aens) */
44859diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
44860index 4d231c1..2892c37 100644
44861--- a/drivers/scsi/qla4xxx/ql4_os.c
44862+++ b/drivers/scsi/qla4xxx/ql4_os.c
44863@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
44864 */
44865 if (!iscsi_is_session_online(cls_sess)) {
44866 /* Reset retry relogin timer */
44867- atomic_inc(&ddb_entry->relogin_retry_count);
44868+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
44869 DEBUG2(ql4_printk(KERN_INFO, ha,
44870 "%s: index[%d] relogin timed out-retrying"
44871 " relogin (%d), retry (%d)\n", __func__,
44872 ddb_entry->fw_ddb_index,
44873- atomic_read(&ddb_entry->relogin_retry_count),
44874+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
44875 ddb_entry->default_time2wait + 4));
44876 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
44877 atomic_set(&ddb_entry->retry_relogin_timer,
44878@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
44879
44880 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
44881 atomic_set(&ddb_entry->relogin_timer, 0);
44882- atomic_set(&ddb_entry->relogin_retry_count, 0);
44883+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
44884 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
44885 ddb_entry->default_relogin_timeout =
44886 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
44887diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
44888index eaa808e..95f8841 100644
44889--- a/drivers/scsi/scsi.c
44890+++ b/drivers/scsi/scsi.c
44891@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
44892 unsigned long timeout;
44893 int rtn = 0;
44894
44895- atomic_inc(&cmd->device->iorequest_cnt);
44896+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
44897
44898 /* check if the device is still usable */
44899 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
44900diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
44901index 86d5220..f22c51a 100644
44902--- a/drivers/scsi/scsi_lib.c
44903+++ b/drivers/scsi/scsi_lib.c
44904@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
44905 shost = sdev->host;
44906 scsi_init_cmd_errh(cmd);
44907 cmd->result = DID_NO_CONNECT << 16;
44908- atomic_inc(&cmd->device->iorequest_cnt);
44909+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
44910
44911 /*
44912 * SCSI request completion path will do scsi_device_unbusy(),
44913@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
44914
44915 INIT_LIST_HEAD(&cmd->eh_entry);
44916
44917- atomic_inc(&cmd->device->iodone_cnt);
44918+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
44919 if (cmd->result)
44920- atomic_inc(&cmd->device->ioerr_cnt);
44921+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
44922
44923 disposition = scsi_decide_disposition(cmd);
44924 if (disposition != SUCCESS &&
44925diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
44926index 931a7d9..0c2a754 100644
44927--- a/drivers/scsi/scsi_sysfs.c
44928+++ b/drivers/scsi/scsi_sysfs.c
44929@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
44930 char *buf) \
44931 { \
44932 struct scsi_device *sdev = to_scsi_device(dev); \
44933- unsigned long long count = atomic_read(&sdev->field); \
44934+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
44935 return snprintf(buf, 20, "0x%llx\n", count); \
44936 } \
44937 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
44938diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
44939index 84a1fdf..693b0d6 100644
44940--- a/drivers/scsi/scsi_tgt_lib.c
44941+++ b/drivers/scsi/scsi_tgt_lib.c
44942@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
44943 int err;
44944
44945 dprintk("%lx %u\n", uaddr, len);
44946- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
44947+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
44948 if (err) {
44949 /*
44950 * TODO: need to fixup sg_tablesize, max_segment_size,
44951diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
44952index e106c27..11a380e 100644
44953--- a/drivers/scsi/scsi_transport_fc.c
44954+++ b/drivers/scsi/scsi_transport_fc.c
44955@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
44956 * Netlink Infrastructure
44957 */
44958
44959-static atomic_t fc_event_seq;
44960+static atomic_unchecked_t fc_event_seq;
44961
44962 /**
44963 * fc_get_event_number - Obtain the next sequential FC event number
44964@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
44965 u32
44966 fc_get_event_number(void)
44967 {
44968- return atomic_add_return(1, &fc_event_seq);
44969+ return atomic_add_return_unchecked(1, &fc_event_seq);
44970 }
44971 EXPORT_SYMBOL(fc_get_event_number);
44972
44973@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
44974 {
44975 int error;
44976
44977- atomic_set(&fc_event_seq, 0);
44978+ atomic_set_unchecked(&fc_event_seq, 0);
44979
44980 error = transport_class_register(&fc_host_class);
44981 if (error)
44982@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
44983 char *cp;
44984
44985 *val = simple_strtoul(buf, &cp, 0);
44986- if ((*cp && (*cp != '\n')) || (*val < 0))
44987+ if (*cp && (*cp != '\n'))
44988 return -EINVAL;
44989 /*
44990 * Check for overflow; dev_loss_tmo is u32
44991diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
44992index 133926b..903000d 100644
44993--- a/drivers/scsi/scsi_transport_iscsi.c
44994+++ b/drivers/scsi/scsi_transport_iscsi.c
44995@@ -80,7 +80,7 @@ struct iscsi_internal {
44996 struct transport_container session_cont;
44997 };
44998
44999-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
45000+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
45001 static struct workqueue_struct *iscsi_eh_timer_workq;
45002
45003 static DEFINE_IDA(iscsi_sess_ida);
45004@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
45005 int err;
45006
45007 ihost = shost->shost_data;
45008- session->sid = atomic_add_return(1, &iscsi_session_nr);
45009+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
45010
45011 if (target_id == ISCSI_MAX_TARGET) {
45012 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
45013@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
45014 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
45015 ISCSI_TRANSPORT_VERSION);
45016
45017- atomic_set(&iscsi_session_nr, 0);
45018+ atomic_set_unchecked(&iscsi_session_nr, 0);
45019
45020 err = class_register(&iscsi_transport_class);
45021 if (err)
45022diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
45023index f379c7f..e8fc69c 100644
45024--- a/drivers/scsi/scsi_transport_srp.c
45025+++ b/drivers/scsi/scsi_transport_srp.c
45026@@ -33,7 +33,7 @@
45027 #include "scsi_transport_srp_internal.h"
45028
45029 struct srp_host_attrs {
45030- atomic_t next_port_id;
45031+ atomic_unchecked_t next_port_id;
45032 };
45033 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
45034
45035@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
45036 struct Scsi_Host *shost = dev_to_shost(dev);
45037 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
45038
45039- atomic_set(&srp_host->next_port_id, 0);
45040+ atomic_set_unchecked(&srp_host->next_port_id, 0);
45041 return 0;
45042 }
45043
45044@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
45045 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
45046 rport->roles = ids->roles;
45047
45048- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
45049+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
45050 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
45051
45052 transport_setup_device(&rport->dev);
45053diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
45054index 610417e..1544fa9 100644
45055--- a/drivers/scsi/sd.c
45056+++ b/drivers/scsi/sd.c
45057@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
45058 sdkp->disk = gd;
45059 sdkp->index = index;
45060 atomic_set(&sdkp->openers, 0);
45061- atomic_set(&sdkp->device->ioerr_cnt, 0);
45062+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
45063
45064 if (!sdp->request_queue->rq_timeout) {
45065 if (sdp->type != TYPE_MOD)
45066diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
45067index df5e961..df6b97f 100644
45068--- a/drivers/scsi/sg.c
45069+++ b/drivers/scsi/sg.c
45070@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
45071 sdp->disk->disk_name,
45072 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
45073 NULL,
45074- (char *)arg);
45075+ (char __user *)arg);
45076 case BLKTRACESTART:
45077 return blk_trace_startstop(sdp->device->request_queue, 1);
45078 case BLKTRACESTOP:
45079diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
45080index 32b7bb1..2f1c4bd 100644
45081--- a/drivers/spi/spi.c
45082+++ b/drivers/spi/spi.c
45083@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
45084 EXPORT_SYMBOL_GPL(spi_bus_unlock);
45085
45086 /* portable code must never pass more than 32 bytes */
45087-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
45088+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
45089
45090 static u8 *buf;
45091
45092diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
45093index 3675020..e80d92c 100644
45094--- a/drivers/staging/media/solo6x10/solo6x10-core.c
45095+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
45096@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
45097
45098 static int solo_sysfs_init(struct solo_dev *solo_dev)
45099 {
45100- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45101+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45102 struct device *dev = &solo_dev->dev;
45103 const char *driver;
45104 int i;
45105diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
45106index 34afc16..ffe44dd 100644
45107--- a/drivers/staging/octeon/ethernet-rx.c
45108+++ b/drivers/staging/octeon/ethernet-rx.c
45109@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
45110 /* Increment RX stats for virtual ports */
45111 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
45112 #ifdef CONFIG_64BIT
45113- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
45114- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
45115+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
45116+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
45117 #else
45118- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
45119- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
45120+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
45121+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
45122 #endif
45123 }
45124 netif_receive_skb(skb);
45125@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
45126 dev->name);
45127 */
45128 #ifdef CONFIG_64BIT
45129- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
45130+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
45131 #else
45132- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
45133+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
45134 #endif
45135 dev_kfree_skb_irq(skb);
45136 }
45137diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
45138index c3a90e7..023619a 100644
45139--- a/drivers/staging/octeon/ethernet.c
45140+++ b/drivers/staging/octeon/ethernet.c
45141@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
45142 * since the RX tasklet also increments it.
45143 */
45144 #ifdef CONFIG_64BIT
45145- atomic64_add(rx_status.dropped_packets,
45146- (atomic64_t *)&priv->stats.rx_dropped);
45147+ atomic64_add_unchecked(rx_status.dropped_packets,
45148+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
45149 #else
45150- atomic_add(rx_status.dropped_packets,
45151- (atomic_t *)&priv->stats.rx_dropped);
45152+ atomic_add_unchecked(rx_status.dropped_packets,
45153+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
45154 #endif
45155 }
45156
45157diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
45158index dc23395..cf7e9b1 100644
45159--- a/drivers/staging/rtl8712/rtl871x_io.h
45160+++ b/drivers/staging/rtl8712/rtl871x_io.h
45161@@ -108,7 +108,7 @@ struct _io_ops {
45162 u8 *pmem);
45163 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
45164 u8 *pmem);
45165-};
45166+} __no_const;
45167
45168 struct io_req {
45169 struct list_head list;
45170diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
45171index 1f5088b..0e59820 100644
45172--- a/drivers/staging/sbe-2t3e3/netdev.c
45173+++ b/drivers/staging/sbe-2t3e3/netdev.c
45174@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
45175 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
45176
45177 if (rlen)
45178- if (copy_to_user(data, &resp, rlen))
45179+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
45180 return -EFAULT;
45181
45182 return 0;
45183diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
45184index a863a98..d272795 100644
45185--- a/drivers/staging/usbip/vhci.h
45186+++ b/drivers/staging/usbip/vhci.h
45187@@ -83,7 +83,7 @@ struct vhci_hcd {
45188 unsigned resuming:1;
45189 unsigned long re_timeout;
45190
45191- atomic_t seqnum;
45192+ atomic_unchecked_t seqnum;
45193
45194 /*
45195 * NOTE:
45196diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
45197index d7974cb..d78076b 100644
45198--- a/drivers/staging/usbip/vhci_hcd.c
45199+++ b/drivers/staging/usbip/vhci_hcd.c
45200@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
45201
45202 spin_lock(&vdev->priv_lock);
45203
45204- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
45205+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
45206 if (priv->seqnum == 0xffff)
45207 dev_info(&urb->dev->dev, "seqnum max\n");
45208
45209@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
45210 return -ENOMEM;
45211 }
45212
45213- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
45214+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
45215 if (unlink->seqnum == 0xffff)
45216 pr_info("seqnum max\n");
45217
45218@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
45219 vdev->rhport = rhport;
45220 }
45221
45222- atomic_set(&vhci->seqnum, 0);
45223+ atomic_set_unchecked(&vhci->seqnum, 0);
45224 spin_lock_init(&vhci->lock);
45225
45226 hcd->power_budget = 0; /* no limit */
45227diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
45228index d07fcb5..358e1e1 100644
45229--- a/drivers/staging/usbip/vhci_rx.c
45230+++ b/drivers/staging/usbip/vhci_rx.c
45231@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
45232 if (!urb) {
45233 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
45234 pr_info("max seqnum %d\n",
45235- atomic_read(&the_controller->seqnum));
45236+ atomic_read_unchecked(&the_controller->seqnum));
45237 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
45238 return;
45239 }
45240diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
45241index 8417c2f..ef5ebd6 100644
45242--- a/drivers/staging/vt6655/hostap.c
45243+++ b/drivers/staging/vt6655/hostap.c
45244@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
45245 *
45246 */
45247
45248+static net_device_ops_no_const apdev_netdev_ops;
45249+
45250 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
45251 {
45252 PSDevice apdev_priv;
45253 struct net_device *dev = pDevice->dev;
45254 int ret;
45255- const struct net_device_ops apdev_netdev_ops = {
45256- .ndo_start_xmit = pDevice->tx_80211,
45257- };
45258
45259 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
45260
45261@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
45262 *apdev_priv = *pDevice;
45263 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
45264
45265+ /* only half broken now */
45266+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
45267 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
45268
45269 pDevice->apdev->type = ARPHRD_IEEE80211;
45270diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
45271index c699a30..b90a5fd 100644
45272--- a/drivers/staging/vt6656/hostap.c
45273+++ b/drivers/staging/vt6656/hostap.c
45274@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
45275 *
45276 */
45277
45278+static net_device_ops_no_const apdev_netdev_ops;
45279+
45280 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
45281 {
45282 struct vnt_private *apdev_priv;
45283 struct net_device *dev = pDevice->dev;
45284 int ret;
45285- const struct net_device_ops apdev_netdev_ops = {
45286- .ndo_start_xmit = pDevice->tx_80211,
45287- };
45288
45289 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
45290
45291@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
45292 *apdev_priv = *pDevice;
45293 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
45294
45295+ /* only half broken now */
45296+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
45297 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
45298
45299 pDevice->apdev->type = ARPHRD_IEEE80211;
45300diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
45301index d7e51e4..d07eaab 100644
45302--- a/drivers/staging/zcache/tmem.c
45303+++ b/drivers/staging/zcache/tmem.c
45304@@ -51,7 +51,7 @@
45305 * A tmem host implementation must use this function to register callbacks
45306 * for memory allocation.
45307 */
45308-static struct tmem_hostops tmem_hostops;
45309+static tmem_hostops_no_const tmem_hostops;
45310
45311 static void tmem_objnode_tree_init(void);
45312
45313@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
45314 * A tmem host implementation must use this function to register
45315 * callbacks for a page-accessible memory (PAM) implementation.
45316 */
45317-static struct tmem_pamops tmem_pamops;
45318+static tmem_pamops_no_const tmem_pamops;
45319
45320 void tmem_register_pamops(struct tmem_pamops *m)
45321 {
45322diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
45323index d128ce2..a43980c 100644
45324--- a/drivers/staging/zcache/tmem.h
45325+++ b/drivers/staging/zcache/tmem.h
45326@@ -226,6 +226,7 @@ struct tmem_pamops {
45327 int (*replace_in_obj)(void *, struct tmem_obj *);
45328 #endif
45329 };
45330+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
45331 extern void tmem_register_pamops(struct tmem_pamops *m);
45332
45333 /* memory allocation methods provided by the host implementation */
45334@@ -235,6 +236,7 @@ struct tmem_hostops {
45335 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
45336 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
45337 };
45338+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
45339 extern void tmem_register_hostops(struct tmem_hostops *m);
45340
45341 /* core tmem accessor functions */
45342diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
45343index 4630481..c26782a 100644
45344--- a/drivers/target/target_core_device.c
45345+++ b/drivers/target/target_core_device.c
45346@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
45347 spin_lock_init(&dev->se_port_lock);
45348 spin_lock_init(&dev->se_tmr_lock);
45349 spin_lock_init(&dev->qf_cmd_lock);
45350- atomic_set(&dev->dev_ordered_id, 0);
45351+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
45352 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
45353 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
45354 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
45355diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
45356index 21e3158..43c6004 100644
45357--- a/drivers/target/target_core_transport.c
45358+++ b/drivers/target/target_core_transport.c
45359@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
45360 * Used to determine when ORDERED commands should go from
45361 * Dormant to Active status.
45362 */
45363- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
45364+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
45365 smp_mb__after_atomic_inc();
45366 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
45367 cmd->se_ordered_id, cmd->sam_task_attr,
45368diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
45369index 33f83fe..d80f8e1 100644
45370--- a/drivers/tty/cyclades.c
45371+++ b/drivers/tty/cyclades.c
45372@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
45373 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
45374 info->port.count);
45375 #endif
45376- info->port.count++;
45377+ atomic_inc(&info->port.count);
45378 #ifdef CY_DEBUG_COUNT
45379 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
45380- current->pid, info->port.count);
45381+ current->pid, atomic_read(&info->port.count));
45382 #endif
45383
45384 /*
45385@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
45386 for (j = 0; j < cy_card[i].nports; j++) {
45387 info = &cy_card[i].ports[j];
45388
45389- if (info->port.count) {
45390+ if (atomic_read(&info->port.count)) {
45391 /* XXX is the ldisc num worth this? */
45392 struct tty_struct *tty;
45393 struct tty_ldisc *ld;
45394diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
45395index eb255e8..f637a57 100644
45396--- a/drivers/tty/hvc/hvc_console.c
45397+++ b/drivers/tty/hvc/hvc_console.c
45398@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
45399
45400 spin_lock_irqsave(&hp->port.lock, flags);
45401 /* Check and then increment for fast path open. */
45402- if (hp->port.count++ > 0) {
45403+ if (atomic_inc_return(&hp->port.count) > 1) {
45404 spin_unlock_irqrestore(&hp->port.lock, flags);
45405 hvc_kick();
45406 return 0;
45407@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
45408
45409 spin_lock_irqsave(&hp->port.lock, flags);
45410
45411- if (--hp->port.count == 0) {
45412+ if (atomic_dec_return(&hp->port.count) == 0) {
45413 spin_unlock_irqrestore(&hp->port.lock, flags);
45414 /* We are done with the tty pointer now. */
45415 tty_port_tty_set(&hp->port, NULL);
45416@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
45417 */
45418 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
45419 } else {
45420- if (hp->port.count < 0)
45421+ if (atomic_read(&hp->port.count) < 0)
45422 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
45423- hp->vtermno, hp->port.count);
45424+ hp->vtermno, atomic_read(&hp->port.count));
45425 spin_unlock_irqrestore(&hp->port.lock, flags);
45426 }
45427 }
45428@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
45429 * open->hangup case this can be called after the final close so prevent
45430 * that from happening for now.
45431 */
45432- if (hp->port.count <= 0) {
45433+ if (atomic_read(&hp->port.count) <= 0) {
45434 spin_unlock_irqrestore(&hp->port.lock, flags);
45435 return;
45436 }
45437
45438- hp->port.count = 0;
45439+ atomic_set(&hp->port.count, 0);
45440 spin_unlock_irqrestore(&hp->port.lock, flags);
45441 tty_port_tty_set(&hp->port, NULL);
45442
45443@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
45444 return -EPIPE;
45445
45446 /* FIXME what's this (unprotected) check for? */
45447- if (hp->port.count <= 0)
45448+ if (atomic_read(&hp->port.count) <= 0)
45449 return -EIO;
45450
45451 spin_lock_irqsave(&hp->lock, flags);
45452diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
45453index 81e939e..95ead10 100644
45454--- a/drivers/tty/hvc/hvcs.c
45455+++ b/drivers/tty/hvc/hvcs.c
45456@@ -83,6 +83,7 @@
45457 #include <asm/hvcserver.h>
45458 #include <asm/uaccess.h>
45459 #include <asm/vio.h>
45460+#include <asm/local.h>
45461
45462 /*
45463 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
45464@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
45465
45466 spin_lock_irqsave(&hvcsd->lock, flags);
45467
45468- if (hvcsd->port.count > 0) {
45469+ if (atomic_read(&hvcsd->port.count) > 0) {
45470 spin_unlock_irqrestore(&hvcsd->lock, flags);
45471 printk(KERN_INFO "HVCS: vterm state unchanged. "
45472 "The hvcs device node is still in use.\n");
45473@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
45474 }
45475 }
45476
45477- hvcsd->port.count = 0;
45478+ atomic_set(&hvcsd->port.count, 0);
45479 hvcsd->port.tty = tty;
45480 tty->driver_data = hvcsd;
45481
45482@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
45483 unsigned long flags;
45484
45485 spin_lock_irqsave(&hvcsd->lock, flags);
45486- hvcsd->port.count++;
45487+ atomic_inc(&hvcsd->port.count);
45488 hvcsd->todo_mask |= HVCS_SCHED_READ;
45489 spin_unlock_irqrestore(&hvcsd->lock, flags);
45490
45491@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
45492 hvcsd = tty->driver_data;
45493
45494 spin_lock_irqsave(&hvcsd->lock, flags);
45495- if (--hvcsd->port.count == 0) {
45496+ if (atomic_dec_and_test(&hvcsd->port.count)) {
45497
45498 vio_disable_interrupts(hvcsd->vdev);
45499
45500@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
45501
45502 free_irq(irq, hvcsd);
45503 return;
45504- } else if (hvcsd->port.count < 0) {
45505+ } else if (atomic_read(&hvcsd->port.count) < 0) {
45506 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
45507 " is missmanaged.\n",
45508- hvcsd->vdev->unit_address, hvcsd->port.count);
45509+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
45510 }
45511
45512 spin_unlock_irqrestore(&hvcsd->lock, flags);
45513@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
45514
45515 spin_lock_irqsave(&hvcsd->lock, flags);
45516 /* Preserve this so that we know how many kref refs to put */
45517- temp_open_count = hvcsd->port.count;
45518+ temp_open_count = atomic_read(&hvcsd->port.count);
45519
45520 /*
45521 * Don't kref put inside the spinlock because the destruction
45522@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
45523 tty->driver_data = NULL;
45524 hvcsd->port.tty = NULL;
45525
45526- hvcsd->port.count = 0;
45527+ atomic_set(&hvcsd->port.count, 0);
45528
45529 /* This will drop any buffered data on the floor which is OK in a hangup
45530 * scenario. */
45531@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
45532 * the middle of a write operation? This is a crummy place to do this
45533 * but we want to keep it all in the spinlock.
45534 */
45535- if (hvcsd->port.count <= 0) {
45536+ if (atomic_read(&hvcsd->port.count) <= 0) {
45537 spin_unlock_irqrestore(&hvcsd->lock, flags);
45538 return -ENODEV;
45539 }
45540@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
45541 {
45542 struct hvcs_struct *hvcsd = tty->driver_data;
45543
45544- if (!hvcsd || hvcsd->port.count <= 0)
45545+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
45546 return 0;
45547
45548 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
45549diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
45550index 8fd72ff..34a0bed 100644
45551--- a/drivers/tty/ipwireless/tty.c
45552+++ b/drivers/tty/ipwireless/tty.c
45553@@ -29,6 +29,7 @@
45554 #include <linux/tty_driver.h>
45555 #include <linux/tty_flip.h>
45556 #include <linux/uaccess.h>
45557+#include <asm/local.h>
45558
45559 #include "tty.h"
45560 #include "network.h"
45561@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
45562 mutex_unlock(&tty->ipw_tty_mutex);
45563 return -ENODEV;
45564 }
45565- if (tty->port.count == 0)
45566+ if (atomic_read(&tty->port.count) == 0)
45567 tty->tx_bytes_queued = 0;
45568
45569- tty->port.count++;
45570+ atomic_inc(&tty->port.count);
45571
45572 tty->port.tty = linux_tty;
45573 linux_tty->driver_data = tty;
45574@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
45575
45576 static void do_ipw_close(struct ipw_tty *tty)
45577 {
45578- tty->port.count--;
45579-
45580- if (tty->port.count == 0) {
45581+ if (atomic_dec_return(&tty->port.count) == 0) {
45582 struct tty_struct *linux_tty = tty->port.tty;
45583
45584 if (linux_tty != NULL) {
45585@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
45586 return;
45587
45588 mutex_lock(&tty->ipw_tty_mutex);
45589- if (tty->port.count == 0) {
45590+ if (atomic_read(&tty->port.count) == 0) {
45591 mutex_unlock(&tty->ipw_tty_mutex);
45592 return;
45593 }
45594@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
45595
45596 mutex_lock(&tty->ipw_tty_mutex);
45597
45598- if (!tty->port.count) {
45599+ if (!atomic_read(&tty->port.count)) {
45600 mutex_unlock(&tty->ipw_tty_mutex);
45601 return;
45602 }
45603@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
45604 return -ENODEV;
45605
45606 mutex_lock(&tty->ipw_tty_mutex);
45607- if (!tty->port.count) {
45608+ if (!atomic_read(&tty->port.count)) {
45609 mutex_unlock(&tty->ipw_tty_mutex);
45610 return -EINVAL;
45611 }
45612@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
45613 if (!tty)
45614 return -ENODEV;
45615
45616- if (!tty->port.count)
45617+ if (!atomic_read(&tty->port.count))
45618 return -EINVAL;
45619
45620 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
45621@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
45622 if (!tty)
45623 return 0;
45624
45625- if (!tty->port.count)
45626+ if (!atomic_read(&tty->port.count))
45627 return 0;
45628
45629 return tty->tx_bytes_queued;
45630@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
45631 if (!tty)
45632 return -ENODEV;
45633
45634- if (!tty->port.count)
45635+ if (!atomic_read(&tty->port.count))
45636 return -EINVAL;
45637
45638 return get_control_lines(tty);
45639@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
45640 if (!tty)
45641 return -ENODEV;
45642
45643- if (!tty->port.count)
45644+ if (!atomic_read(&tty->port.count))
45645 return -EINVAL;
45646
45647 return set_control_lines(tty, set, clear);
45648@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
45649 if (!tty)
45650 return -ENODEV;
45651
45652- if (!tty->port.count)
45653+ if (!atomic_read(&tty->port.count))
45654 return -EINVAL;
45655
45656 /* FIXME: Exactly how is the tty object locked here .. */
45657@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
45658 * are gone */
45659 mutex_lock(&ttyj->ipw_tty_mutex);
45660 }
45661- while (ttyj->port.count)
45662+ while (atomic_read(&ttyj->port.count))
45663 do_ipw_close(ttyj);
45664 ipwireless_disassociate_network_ttys(network,
45665 ttyj->channel_idx);
45666diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
45667index 1deaca4..c8582d4 100644
45668--- a/drivers/tty/moxa.c
45669+++ b/drivers/tty/moxa.c
45670@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
45671 }
45672
45673 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
45674- ch->port.count++;
45675+ atomic_inc(&ch->port.count);
45676 tty->driver_data = ch;
45677 tty_port_tty_set(&ch->port, tty);
45678 mutex_lock(&ch->port.mutex);
45679diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
45680index 6422390..49003ac8 100644
45681--- a/drivers/tty/n_gsm.c
45682+++ b/drivers/tty/n_gsm.c
45683@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
45684 spin_lock_init(&dlci->lock);
45685 mutex_init(&dlci->mutex);
45686 dlci->fifo = &dlci->_fifo;
45687- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
45688+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
45689 kfree(dlci);
45690 return NULL;
45691 }
45692@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
45693 struct gsm_dlci *dlci = tty->driver_data;
45694 struct tty_port *port = &dlci->port;
45695
45696- port->count++;
45697+ atomic_inc(&port->count);
45698 dlci_get(dlci);
45699 dlci_get(dlci->gsm->dlci[0]);
45700 mux_get(dlci->gsm);
45701diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
45702index 6c7fe90..9241dab 100644
45703--- a/drivers/tty/n_tty.c
45704+++ b/drivers/tty/n_tty.c
45705@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
45706 {
45707 *ops = tty_ldisc_N_TTY;
45708 ops->owner = NULL;
45709- ops->refcount = ops->flags = 0;
45710+ atomic_set(&ops->refcount, 0);
45711+ ops->flags = 0;
45712 }
45713 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
45714diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
45715index abfd990..5ab5da9 100644
45716--- a/drivers/tty/pty.c
45717+++ b/drivers/tty/pty.c
45718@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
45719 panic("Couldn't register Unix98 pts driver");
45720
45721 /* Now create the /dev/ptmx special device */
45722+ pax_open_kernel();
45723 tty_default_fops(&ptmx_fops);
45724- ptmx_fops.open = ptmx_open;
45725+ *(void **)&ptmx_fops.open = ptmx_open;
45726+ pax_close_kernel();
45727
45728 cdev_init(&ptmx_cdev, &ptmx_fops);
45729 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
45730diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
45731index 354564e..fe50d9a 100644
45732--- a/drivers/tty/rocket.c
45733+++ b/drivers/tty/rocket.c
45734@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
45735 tty->driver_data = info;
45736 tty_port_tty_set(port, tty);
45737
45738- if (port->count++ == 0) {
45739+ if (atomic_inc_return(&port->count) == 1) {
45740 atomic_inc(&rp_num_ports_open);
45741
45742 #ifdef ROCKET_DEBUG_OPEN
45743@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
45744 #endif
45745 }
45746 #ifdef ROCKET_DEBUG_OPEN
45747- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
45748+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
45749 #endif
45750
45751 /*
45752@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
45753 spin_unlock_irqrestore(&info->port.lock, flags);
45754 return;
45755 }
45756- if (info->port.count)
45757+ if (atomic_read(&info->port.count))
45758 atomic_dec(&rp_num_ports_open);
45759 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
45760 spin_unlock_irqrestore(&info->port.lock, flags);
45761diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
45762index 1002054..dd644a8 100644
45763--- a/drivers/tty/serial/kgdboc.c
45764+++ b/drivers/tty/serial/kgdboc.c
45765@@ -24,8 +24,9 @@
45766 #define MAX_CONFIG_LEN 40
45767
45768 static struct kgdb_io kgdboc_io_ops;
45769+static struct kgdb_io kgdboc_io_ops_console;
45770
45771-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
45772+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
45773 static int configured = -1;
45774
45775 static char config[MAX_CONFIG_LEN];
45776@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
45777 kgdboc_unregister_kbd();
45778 if (configured == 1)
45779 kgdb_unregister_io_module(&kgdboc_io_ops);
45780+ else if (configured == 2)
45781+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
45782 }
45783
45784 static int configure_kgdboc(void)
45785@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
45786 int err;
45787 char *cptr = config;
45788 struct console *cons;
45789+ int is_console = 0;
45790
45791 err = kgdboc_option_setup(config);
45792 if (err || !strlen(config) || isspace(config[0]))
45793 goto noconfig;
45794
45795 err = -ENODEV;
45796- kgdboc_io_ops.is_console = 0;
45797 kgdb_tty_driver = NULL;
45798
45799 kgdboc_use_kms = 0;
45800@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
45801 int idx;
45802 if (cons->device && cons->device(cons, &idx) == p &&
45803 idx == tty_line) {
45804- kgdboc_io_ops.is_console = 1;
45805+ is_console = 1;
45806 break;
45807 }
45808 cons = cons->next;
45809@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
45810 kgdb_tty_line = tty_line;
45811
45812 do_register:
45813- err = kgdb_register_io_module(&kgdboc_io_ops);
45814+ if (is_console) {
45815+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
45816+ configured = 2;
45817+ } else {
45818+ err = kgdb_register_io_module(&kgdboc_io_ops);
45819+ configured = 1;
45820+ }
45821 if (err)
45822 goto noconfig;
45823
45824@@ -205,8 +214,6 @@ do_register:
45825 if (err)
45826 goto nmi_con_failed;
45827
45828- configured = 1;
45829-
45830 return 0;
45831
45832 nmi_con_failed:
45833@@ -223,7 +230,7 @@ noconfig:
45834 static int __init init_kgdboc(void)
45835 {
45836 /* Already configured? */
45837- if (configured == 1)
45838+ if (configured >= 1)
45839 return 0;
45840
45841 return configure_kgdboc();
45842@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
45843 if (config[len - 1] == '\n')
45844 config[len - 1] = '\0';
45845
45846- if (configured == 1)
45847+ if (configured >= 1)
45848 cleanup_kgdboc();
45849
45850 /* Go and configure with the new params. */
45851@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
45852 .post_exception = kgdboc_post_exp_handler,
45853 };
45854
45855+static struct kgdb_io kgdboc_io_ops_console = {
45856+ .name = "kgdboc",
45857+ .read_char = kgdboc_get_char,
45858+ .write_char = kgdboc_put_char,
45859+ .pre_exception = kgdboc_pre_exp_handler,
45860+ .post_exception = kgdboc_post_exp_handler,
45861+ .is_console = 1
45862+};
45863+
45864 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
45865 /* This is only available if kgdboc is a built in for early debugging */
45866 static int __init kgdboc_early_init(char *opt)
45867diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
45868index 0c8a9fa..234a95f 100644
45869--- a/drivers/tty/serial/samsung.c
45870+++ b/drivers/tty/serial/samsung.c
45871@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
45872 }
45873 }
45874
45875+static int s3c64xx_serial_startup(struct uart_port *port);
45876 static int s3c24xx_serial_startup(struct uart_port *port)
45877 {
45878 struct s3c24xx_uart_port *ourport = to_ourport(port);
45879 int ret;
45880
45881+ /* Startup sequence is different for s3c64xx and higher SoC's */
45882+ if (s3c24xx_serial_has_interrupt_mask(port))
45883+ return s3c64xx_serial_startup(port);
45884+
45885 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
45886 port->mapbase, port->membase);
45887
45888@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
45889 /* setup info for port */
45890 port->dev = &platdev->dev;
45891
45892- /* Startup sequence is different for s3c64xx and higher SoC's */
45893- if (s3c24xx_serial_has_interrupt_mask(port))
45894- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
45895-
45896 port->uartclk = 1;
45897
45898 if (cfg->uart_flags & UPF_CONS_FLOW) {
45899diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
45900index f87dbfd..42ad4b1 100644
45901--- a/drivers/tty/serial/serial_core.c
45902+++ b/drivers/tty/serial/serial_core.c
45903@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
45904 uart_flush_buffer(tty);
45905 uart_shutdown(tty, state);
45906 spin_lock_irqsave(&port->lock, flags);
45907- port->count = 0;
45908+ atomic_set(&port->count, 0);
45909 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
45910 spin_unlock_irqrestore(&port->lock, flags);
45911 tty_port_tty_set(port, NULL);
45912@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
45913 goto end;
45914 }
45915
45916- port->count++;
45917+ atomic_inc(&port->count);
45918 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
45919 retval = -ENXIO;
45920 goto err_dec_count;
45921@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
45922 /*
45923 * Make sure the device is in D0 state.
45924 */
45925- if (port->count == 1)
45926+ if (atomic_read(&port->count) == 1)
45927 uart_change_pm(state, UART_PM_STATE_ON);
45928
45929 /*
45930@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
45931 end:
45932 return retval;
45933 err_dec_count:
45934- port->count--;
45935+ atomic_inc(&port->count);
45936 mutex_unlock(&port->mutex);
45937 goto end;
45938 }
45939diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
45940index 8eaf1ab..85c030d 100644
45941--- a/drivers/tty/synclink.c
45942+++ b/drivers/tty/synclink.c
45943@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
45944
45945 if (debug_level >= DEBUG_LEVEL_INFO)
45946 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
45947- __FILE__,__LINE__, info->device_name, info->port.count);
45948+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
45949
45950 if (tty_port_close_start(&info->port, tty, filp) == 0)
45951 goto cleanup;
45952@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
45953 cleanup:
45954 if (debug_level >= DEBUG_LEVEL_INFO)
45955 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
45956- tty->driver->name, info->port.count);
45957+ tty->driver->name, atomic_read(&info->port.count));
45958
45959 } /* end of mgsl_close() */
45960
45961@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
45962
45963 mgsl_flush_buffer(tty);
45964 shutdown(info);
45965-
45966- info->port.count = 0;
45967+
45968+ atomic_set(&info->port.count, 0);
45969 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
45970 info->port.tty = NULL;
45971
45972@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
45973
45974 if (debug_level >= DEBUG_LEVEL_INFO)
45975 printk("%s(%d):block_til_ready before block on %s count=%d\n",
45976- __FILE__,__LINE__, tty->driver->name, port->count );
45977+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
45978
45979 spin_lock_irqsave(&info->irq_spinlock, flags);
45980 if (!tty_hung_up_p(filp)) {
45981 extra_count = true;
45982- port->count--;
45983+ atomic_dec(&port->count);
45984 }
45985 spin_unlock_irqrestore(&info->irq_spinlock, flags);
45986 port->blocked_open++;
45987@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
45988
45989 if (debug_level >= DEBUG_LEVEL_INFO)
45990 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
45991- __FILE__,__LINE__, tty->driver->name, port->count );
45992+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
45993
45994 tty_unlock(tty);
45995 schedule();
45996@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
45997
45998 /* FIXME: Racy on hangup during close wait */
45999 if (extra_count)
46000- port->count++;
46001+ atomic_inc(&port->count);
46002 port->blocked_open--;
46003
46004 if (debug_level >= DEBUG_LEVEL_INFO)
46005 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
46006- __FILE__,__LINE__, tty->driver->name, port->count );
46007+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46008
46009 if (!retval)
46010 port->flags |= ASYNC_NORMAL_ACTIVE;
46011@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
46012
46013 if (debug_level >= DEBUG_LEVEL_INFO)
46014 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
46015- __FILE__,__LINE__,tty->driver->name, info->port.count);
46016+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
46017
46018 /* If port is closing, signal caller to try again */
46019 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
46020@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
46021 spin_unlock_irqrestore(&info->netlock, flags);
46022 goto cleanup;
46023 }
46024- info->port.count++;
46025+ atomic_inc(&info->port.count);
46026 spin_unlock_irqrestore(&info->netlock, flags);
46027
46028- if (info->port.count == 1) {
46029+ if (atomic_read(&info->port.count) == 1) {
46030 /* 1st open on this device, init hardware */
46031 retval = startup(info);
46032 if (retval < 0)
46033@@ -3446,8 +3446,8 @@ cleanup:
46034 if (retval) {
46035 if (tty->count == 1)
46036 info->port.tty = NULL; /* tty layer will release tty struct */
46037- if(info->port.count)
46038- info->port.count--;
46039+ if (atomic_read(&info->port.count))
46040+ atomic_dec(&info->port.count);
46041 }
46042
46043 return retval;
46044@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
46045 unsigned short new_crctype;
46046
46047 /* return error if TTY interface open */
46048- if (info->port.count)
46049+ if (atomic_read(&info->port.count))
46050 return -EBUSY;
46051
46052 switch (encoding)
46053@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
46054
46055 /* arbitrate between network and tty opens */
46056 spin_lock_irqsave(&info->netlock, flags);
46057- if (info->port.count != 0 || info->netcount != 0) {
46058+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
46059 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
46060 spin_unlock_irqrestore(&info->netlock, flags);
46061 return -EBUSY;
46062@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46063 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
46064
46065 /* return error if TTY interface open */
46066- if (info->port.count)
46067+ if (atomic_read(&info->port.count))
46068 return -EBUSY;
46069
46070 if (cmd != SIOCWANDEV)
46071diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
46072index 1abf946..1ee34fc 100644
46073--- a/drivers/tty/synclink_gt.c
46074+++ b/drivers/tty/synclink_gt.c
46075@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
46076 tty->driver_data = info;
46077 info->port.tty = tty;
46078
46079- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
46080+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
46081
46082 /* If port is closing, signal caller to try again */
46083 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
46084@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
46085 mutex_unlock(&info->port.mutex);
46086 goto cleanup;
46087 }
46088- info->port.count++;
46089+ atomic_inc(&info->port.count);
46090 spin_unlock_irqrestore(&info->netlock, flags);
46091
46092- if (info->port.count == 1) {
46093+ if (atomic_read(&info->port.count) == 1) {
46094 /* 1st open on this device, init hardware */
46095 retval = startup(info);
46096 if (retval < 0) {
46097@@ -715,8 +715,8 @@ cleanup:
46098 if (retval) {
46099 if (tty->count == 1)
46100 info->port.tty = NULL; /* tty layer will release tty struct */
46101- if(info->port.count)
46102- info->port.count--;
46103+ if(atomic_read(&info->port.count))
46104+ atomic_dec(&info->port.count);
46105 }
46106
46107 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
46108@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46109
46110 if (sanity_check(info, tty->name, "close"))
46111 return;
46112- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
46113+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
46114
46115 if (tty_port_close_start(&info->port, tty, filp) == 0)
46116 goto cleanup;
46117@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46118 tty_port_close_end(&info->port, tty);
46119 info->port.tty = NULL;
46120 cleanup:
46121- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
46122+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
46123 }
46124
46125 static void hangup(struct tty_struct *tty)
46126@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
46127 shutdown(info);
46128
46129 spin_lock_irqsave(&info->port.lock, flags);
46130- info->port.count = 0;
46131+ atomic_set(&info->port.count, 0);
46132 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
46133 info->port.tty = NULL;
46134 spin_unlock_irqrestore(&info->port.lock, flags);
46135@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
46136 unsigned short new_crctype;
46137
46138 /* return error if TTY interface open */
46139- if (info->port.count)
46140+ if (atomic_read(&info->port.count))
46141 return -EBUSY;
46142
46143 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
46144@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
46145
46146 /* arbitrate between network and tty opens */
46147 spin_lock_irqsave(&info->netlock, flags);
46148- if (info->port.count != 0 || info->netcount != 0) {
46149+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
46150 DBGINFO(("%s hdlc_open busy\n", dev->name));
46151 spin_unlock_irqrestore(&info->netlock, flags);
46152 return -EBUSY;
46153@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46154 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
46155
46156 /* return error if TTY interface open */
46157- if (info->port.count)
46158+ if (atomic_read(&info->port.count))
46159 return -EBUSY;
46160
46161 if (cmd != SIOCWANDEV)
46162@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
46163 if (port == NULL)
46164 continue;
46165 spin_lock(&port->lock);
46166- if ((port->port.count || port->netcount) &&
46167+ if ((atomic_read(&port->port.count) || port->netcount) &&
46168 port->pending_bh && !port->bh_running &&
46169 !port->bh_requested) {
46170 DBGISR(("%s bh queued\n", port->device_name));
46171@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46172 spin_lock_irqsave(&info->lock, flags);
46173 if (!tty_hung_up_p(filp)) {
46174 extra_count = true;
46175- port->count--;
46176+ atomic_dec(&port->count);
46177 }
46178 spin_unlock_irqrestore(&info->lock, flags);
46179 port->blocked_open++;
46180@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46181 remove_wait_queue(&port->open_wait, &wait);
46182
46183 if (extra_count)
46184- port->count++;
46185+ atomic_inc(&port->count);
46186 port->blocked_open--;
46187
46188 if (!retval)
46189diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
46190index ff17138..e38b41e 100644
46191--- a/drivers/tty/synclinkmp.c
46192+++ b/drivers/tty/synclinkmp.c
46193@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
46194
46195 if (debug_level >= DEBUG_LEVEL_INFO)
46196 printk("%s(%d):%s open(), old ref count = %d\n",
46197- __FILE__,__LINE__,tty->driver->name, info->port.count);
46198+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
46199
46200 /* If port is closing, signal caller to try again */
46201 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
46202@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
46203 spin_unlock_irqrestore(&info->netlock, flags);
46204 goto cleanup;
46205 }
46206- info->port.count++;
46207+ atomic_inc(&info->port.count);
46208 spin_unlock_irqrestore(&info->netlock, flags);
46209
46210- if (info->port.count == 1) {
46211+ if (atomic_read(&info->port.count) == 1) {
46212 /* 1st open on this device, init hardware */
46213 retval = startup(info);
46214 if (retval < 0)
46215@@ -796,8 +796,8 @@ cleanup:
46216 if (retval) {
46217 if (tty->count == 1)
46218 info->port.tty = NULL; /* tty layer will release tty struct */
46219- if(info->port.count)
46220- info->port.count--;
46221+ if(atomic_read(&info->port.count))
46222+ atomic_dec(&info->port.count);
46223 }
46224
46225 return retval;
46226@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46227
46228 if (debug_level >= DEBUG_LEVEL_INFO)
46229 printk("%s(%d):%s close() entry, count=%d\n",
46230- __FILE__,__LINE__, info->device_name, info->port.count);
46231+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
46232
46233 if (tty_port_close_start(&info->port, tty, filp) == 0)
46234 goto cleanup;
46235@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46236 cleanup:
46237 if (debug_level >= DEBUG_LEVEL_INFO)
46238 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
46239- tty->driver->name, info->port.count);
46240+ tty->driver->name, atomic_read(&info->port.count));
46241 }
46242
46243 /* Called by tty_hangup() when a hangup is signaled.
46244@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
46245 shutdown(info);
46246
46247 spin_lock_irqsave(&info->port.lock, flags);
46248- info->port.count = 0;
46249+ atomic_set(&info->port.count, 0);
46250 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
46251 info->port.tty = NULL;
46252 spin_unlock_irqrestore(&info->port.lock, flags);
46253@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
46254 unsigned short new_crctype;
46255
46256 /* return error if TTY interface open */
46257- if (info->port.count)
46258+ if (atomic_read(&info->port.count))
46259 return -EBUSY;
46260
46261 switch (encoding)
46262@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
46263
46264 /* arbitrate between network and tty opens */
46265 spin_lock_irqsave(&info->netlock, flags);
46266- if (info->port.count != 0 || info->netcount != 0) {
46267+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
46268 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
46269 spin_unlock_irqrestore(&info->netlock, flags);
46270 return -EBUSY;
46271@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46272 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
46273
46274 /* return error if TTY interface open */
46275- if (info->port.count)
46276+ if (atomic_read(&info->port.count))
46277 return -EBUSY;
46278
46279 if (cmd != SIOCWANDEV)
46280@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
46281 * do not request bottom half processing if the
46282 * device is not open in a normal mode.
46283 */
46284- if ( port && (port->port.count || port->netcount) &&
46285+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
46286 port->pending_bh && !port->bh_running &&
46287 !port->bh_requested ) {
46288 if ( debug_level >= DEBUG_LEVEL_ISR )
46289@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46290
46291 if (debug_level >= DEBUG_LEVEL_INFO)
46292 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
46293- __FILE__,__LINE__, tty->driver->name, port->count );
46294+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46295
46296 spin_lock_irqsave(&info->lock, flags);
46297 if (!tty_hung_up_p(filp)) {
46298 extra_count = true;
46299- port->count--;
46300+ atomic_dec(&port->count);
46301 }
46302 spin_unlock_irqrestore(&info->lock, flags);
46303 port->blocked_open++;
46304@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46305
46306 if (debug_level >= DEBUG_LEVEL_INFO)
46307 printk("%s(%d):%s block_til_ready() count=%d\n",
46308- __FILE__,__LINE__, tty->driver->name, port->count );
46309+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46310
46311 tty_unlock(tty);
46312 schedule();
46313@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46314 remove_wait_queue(&port->open_wait, &wait);
46315
46316 if (extra_count)
46317- port->count++;
46318+ atomic_inc(&port->count);
46319 port->blocked_open--;
46320
46321 if (debug_level >= DEBUG_LEVEL_INFO)
46322 printk("%s(%d):%s block_til_ready() after, count=%d\n",
46323- __FILE__,__LINE__, tty->driver->name, port->count );
46324+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46325
46326 if (!retval)
46327 port->flags |= ASYNC_NORMAL_ACTIVE;
46328diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
46329index b51c154..17d55d1 100644
46330--- a/drivers/tty/sysrq.c
46331+++ b/drivers/tty/sysrq.c
46332@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
46333 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
46334 size_t count, loff_t *ppos)
46335 {
46336- if (count) {
46337+ if (count && capable(CAP_SYS_ADMIN)) {
46338 char c;
46339
46340 if (get_user(c, buf))
46341diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
46342index 4476682..d77e748 100644
46343--- a/drivers/tty/tty_io.c
46344+++ b/drivers/tty/tty_io.c
46345@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
46346
46347 void tty_default_fops(struct file_operations *fops)
46348 {
46349- *fops = tty_fops;
46350+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
46351 }
46352
46353 /*
46354diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
46355index 1afe192..73d2c20 100644
46356--- a/drivers/tty/tty_ldisc.c
46357+++ b/drivers/tty/tty_ldisc.c
46358@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
46359 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
46360 tty_ldiscs[disc] = new_ldisc;
46361 new_ldisc->num = disc;
46362- new_ldisc->refcount = 0;
46363+ atomic_set(&new_ldisc->refcount, 0);
46364 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
46365
46366 return ret;
46367@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
46368 return -EINVAL;
46369
46370 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
46371- if (tty_ldiscs[disc]->refcount)
46372+ if (atomic_read(&tty_ldiscs[disc]->refcount))
46373 ret = -EBUSY;
46374 else
46375 tty_ldiscs[disc] = NULL;
46376@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
46377 if (ldops) {
46378 ret = ERR_PTR(-EAGAIN);
46379 if (try_module_get(ldops->owner)) {
46380- ldops->refcount++;
46381+ atomic_inc(&ldops->refcount);
46382 ret = ldops;
46383 }
46384 }
46385@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
46386 unsigned long flags;
46387
46388 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
46389- ldops->refcount--;
46390+ atomic_dec(&ldops->refcount);
46391 module_put(ldops->owner);
46392 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
46393 }
46394@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
46395 /* unreleased reader reference(s) will cause this WARN */
46396 WARN_ON(!atomic_dec_and_test(&ld->users));
46397
46398- ld->ops->refcount--;
46399+ atomic_dec(&ld->ops->refcount);
46400 module_put(ld->ops->owner);
46401 kfree(ld);
46402 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
46403diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
46404index f597e88..b7f68ed 100644
46405--- a/drivers/tty/tty_port.c
46406+++ b/drivers/tty/tty_port.c
46407@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
46408 unsigned long flags;
46409
46410 spin_lock_irqsave(&port->lock, flags);
46411- port->count = 0;
46412+ atomic_set(&port->count, 0);
46413 port->flags &= ~ASYNC_NORMAL_ACTIVE;
46414 tty = port->tty;
46415 if (tty)
46416@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
46417 /* The port lock protects the port counts */
46418 spin_lock_irqsave(&port->lock, flags);
46419 if (!tty_hung_up_p(filp))
46420- port->count--;
46421+ atomic_dec(&port->count);
46422 port->blocked_open++;
46423 spin_unlock_irqrestore(&port->lock, flags);
46424
46425@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
46426 we must not mess that up further */
46427 spin_lock_irqsave(&port->lock, flags);
46428 if (!tty_hung_up_p(filp))
46429- port->count++;
46430+ atomic_inc(&port->count);
46431 port->blocked_open--;
46432 if (retval == 0)
46433 port->flags |= ASYNC_NORMAL_ACTIVE;
46434@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
46435 return 0;
46436 }
46437
46438- if (tty->count == 1 && port->count != 1) {
46439+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
46440 printk(KERN_WARNING
46441 "tty_port_close_start: tty->count = 1 port count = %d.\n",
46442- port->count);
46443- port->count = 1;
46444+ atomic_read(&port->count));
46445+ atomic_set(&port->count, 1);
46446 }
46447- if (--port->count < 0) {
46448+ if (atomic_dec_return(&port->count) < 0) {
46449 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
46450- port->count);
46451- port->count = 0;
46452+ atomic_read(&port->count));
46453+ atomic_set(&port->count, 0);
46454 }
46455
46456- if (port->count) {
46457+ if (atomic_read(&port->count)) {
46458 spin_unlock_irqrestore(&port->lock, flags);
46459 if (port->ops->drop)
46460 port->ops->drop(port);
46461@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
46462 {
46463 spin_lock_irq(&port->lock);
46464 if (!tty_hung_up_p(filp))
46465- ++port->count;
46466+ atomic_inc(&port->count);
46467 spin_unlock_irq(&port->lock);
46468 tty_port_tty_set(port, tty);
46469
46470diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
46471index a9af1b9a..1e08e7f 100644
46472--- a/drivers/tty/vt/keyboard.c
46473+++ b/drivers/tty/vt/keyboard.c
46474@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
46475 kbd->kbdmode == VC_OFF) &&
46476 value != KVAL(K_SAK))
46477 return; /* SAK is allowed even in raw mode */
46478+
46479+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46480+ {
46481+ void *func = fn_handler[value];
46482+ if (func == fn_show_state || func == fn_show_ptregs ||
46483+ func == fn_show_mem)
46484+ return;
46485+ }
46486+#endif
46487+
46488 fn_handler[value](vc);
46489 }
46490
46491@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
46492 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
46493 return -EFAULT;
46494
46495- if (!capable(CAP_SYS_TTY_CONFIG))
46496- perm = 0;
46497-
46498 switch (cmd) {
46499 case KDGKBENT:
46500 /* Ensure another thread doesn't free it under us */
46501@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
46502 spin_unlock_irqrestore(&kbd_event_lock, flags);
46503 return put_user(val, &user_kbe->kb_value);
46504 case KDSKBENT:
46505+ if (!capable(CAP_SYS_TTY_CONFIG))
46506+ perm = 0;
46507+
46508 if (!perm)
46509 return -EPERM;
46510 if (!i && v == K_NOSUCHMAP) {
46511@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
46512 int i, j, k;
46513 int ret;
46514
46515- if (!capable(CAP_SYS_TTY_CONFIG))
46516- perm = 0;
46517-
46518 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
46519 if (!kbs) {
46520 ret = -ENOMEM;
46521@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
46522 kfree(kbs);
46523 return ((p && *p) ? -EOVERFLOW : 0);
46524 case KDSKBSENT:
46525+ if (!capable(CAP_SYS_TTY_CONFIG))
46526+ perm = 0;
46527+
46528 if (!perm) {
46529 ret = -EPERM;
46530 goto reterr;
46531diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
46532index b645c47..a55c182 100644
46533--- a/drivers/uio/uio.c
46534+++ b/drivers/uio/uio.c
46535@@ -25,6 +25,7 @@
46536 #include <linux/kobject.h>
46537 #include <linux/cdev.h>
46538 #include <linux/uio_driver.h>
46539+#include <asm/local.h>
46540
46541 #define UIO_MAX_DEVICES (1U << MINORBITS)
46542
46543@@ -32,10 +33,10 @@ struct uio_device {
46544 struct module *owner;
46545 struct device *dev;
46546 int minor;
46547- atomic_t event;
46548+ atomic_unchecked_t event;
46549 struct fasync_struct *async_queue;
46550 wait_queue_head_t wait;
46551- int vma_count;
46552+ local_t vma_count;
46553 struct uio_info *info;
46554 struct kobject *map_dir;
46555 struct kobject *portio_dir;
46556@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
46557 struct device_attribute *attr, char *buf)
46558 {
46559 struct uio_device *idev = dev_get_drvdata(dev);
46560- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
46561+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
46562 }
46563
46564 static struct device_attribute uio_class_attributes[] = {
46565@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
46566 {
46567 struct uio_device *idev = info->uio_dev;
46568
46569- atomic_inc(&idev->event);
46570+ atomic_inc_unchecked(&idev->event);
46571 wake_up_interruptible(&idev->wait);
46572 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
46573 }
46574@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
46575 }
46576
46577 listener->dev = idev;
46578- listener->event_count = atomic_read(&idev->event);
46579+ listener->event_count = atomic_read_unchecked(&idev->event);
46580 filep->private_data = listener;
46581
46582 if (idev->info->open) {
46583@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
46584 return -EIO;
46585
46586 poll_wait(filep, &idev->wait, wait);
46587- if (listener->event_count != atomic_read(&idev->event))
46588+ if (listener->event_count != atomic_read_unchecked(&idev->event))
46589 return POLLIN | POLLRDNORM;
46590 return 0;
46591 }
46592@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
46593 do {
46594 set_current_state(TASK_INTERRUPTIBLE);
46595
46596- event_count = atomic_read(&idev->event);
46597+ event_count = atomic_read_unchecked(&idev->event);
46598 if (event_count != listener->event_count) {
46599 if (copy_to_user(buf, &event_count, count))
46600 retval = -EFAULT;
46601@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
46602 static void uio_vma_open(struct vm_area_struct *vma)
46603 {
46604 struct uio_device *idev = vma->vm_private_data;
46605- idev->vma_count++;
46606+ local_inc(&idev->vma_count);
46607 }
46608
46609 static void uio_vma_close(struct vm_area_struct *vma)
46610 {
46611 struct uio_device *idev = vma->vm_private_data;
46612- idev->vma_count--;
46613+ local_dec(&idev->vma_count);
46614 }
46615
46616 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
46617@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
46618 idev->owner = owner;
46619 idev->info = info;
46620 init_waitqueue_head(&idev->wait);
46621- atomic_set(&idev->event, 0);
46622+ atomic_set_unchecked(&idev->event, 0);
46623
46624 ret = uio_get_minor(idev);
46625 if (ret)
46626diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
46627index 8a7eb77..c00402f 100644
46628--- a/drivers/usb/atm/cxacru.c
46629+++ b/drivers/usb/atm/cxacru.c
46630@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
46631 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
46632 if (ret < 2)
46633 return -EINVAL;
46634- if (index < 0 || index > 0x7f)
46635+ if (index > 0x7f)
46636 return -EINVAL;
46637 pos += tmp;
46638
46639diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
46640index d3527dd..26effa2 100644
46641--- a/drivers/usb/atm/usbatm.c
46642+++ b/drivers/usb/atm/usbatm.c
46643@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46644 if (printk_ratelimit())
46645 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
46646 __func__, vpi, vci);
46647- atomic_inc(&vcc->stats->rx_err);
46648+ atomic_inc_unchecked(&vcc->stats->rx_err);
46649 return;
46650 }
46651
46652@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46653 if (length > ATM_MAX_AAL5_PDU) {
46654 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
46655 __func__, length, vcc);
46656- atomic_inc(&vcc->stats->rx_err);
46657+ atomic_inc_unchecked(&vcc->stats->rx_err);
46658 goto out;
46659 }
46660
46661@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46662 if (sarb->len < pdu_length) {
46663 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
46664 __func__, pdu_length, sarb->len, vcc);
46665- atomic_inc(&vcc->stats->rx_err);
46666+ atomic_inc_unchecked(&vcc->stats->rx_err);
46667 goto out;
46668 }
46669
46670 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
46671 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
46672 __func__, vcc);
46673- atomic_inc(&vcc->stats->rx_err);
46674+ atomic_inc_unchecked(&vcc->stats->rx_err);
46675 goto out;
46676 }
46677
46678@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46679 if (printk_ratelimit())
46680 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
46681 __func__, length);
46682- atomic_inc(&vcc->stats->rx_drop);
46683+ atomic_inc_unchecked(&vcc->stats->rx_drop);
46684 goto out;
46685 }
46686
46687@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46688
46689 vcc->push(vcc, skb);
46690
46691- atomic_inc(&vcc->stats->rx);
46692+ atomic_inc_unchecked(&vcc->stats->rx);
46693 out:
46694 skb_trim(sarb, 0);
46695 }
46696@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
46697 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
46698
46699 usbatm_pop(vcc, skb);
46700- atomic_inc(&vcc->stats->tx);
46701+ atomic_inc_unchecked(&vcc->stats->tx);
46702
46703 skb = skb_dequeue(&instance->sndqueue);
46704 }
46705@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
46706 if (!left--)
46707 return sprintf(page,
46708 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
46709- atomic_read(&atm_dev->stats.aal5.tx),
46710- atomic_read(&atm_dev->stats.aal5.tx_err),
46711- atomic_read(&atm_dev->stats.aal5.rx),
46712- atomic_read(&atm_dev->stats.aal5.rx_err),
46713- atomic_read(&atm_dev->stats.aal5.rx_drop));
46714+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
46715+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
46716+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
46717+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
46718+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
46719
46720 if (!left--) {
46721 if (instance->disconnected)
46722diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
46723index 2a3bbdf..91d72cf 100644
46724--- a/drivers/usb/core/devices.c
46725+++ b/drivers/usb/core/devices.c
46726@@ -126,7 +126,7 @@ static const char format_endpt[] =
46727 * time it gets called.
46728 */
46729 static struct device_connect_event {
46730- atomic_t count;
46731+ atomic_unchecked_t count;
46732 wait_queue_head_t wait;
46733 } device_event = {
46734 .count = ATOMIC_INIT(1),
46735@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
46736
46737 void usbfs_conn_disc_event(void)
46738 {
46739- atomic_add(2, &device_event.count);
46740+ atomic_add_unchecked(2, &device_event.count);
46741 wake_up(&device_event.wait);
46742 }
46743
46744@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
46745
46746 poll_wait(file, &device_event.wait, wait);
46747
46748- event_count = atomic_read(&device_event.count);
46749+ event_count = atomic_read_unchecked(&device_event.count);
46750 if (file->f_version != event_count) {
46751 file->f_version = event_count;
46752 return POLLIN | POLLRDNORM;
46753diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
46754index d53547d..6a22d02 100644
46755--- a/drivers/usb/core/hcd.c
46756+++ b/drivers/usb/core/hcd.c
46757@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
46758 */
46759 usb_get_urb(urb);
46760 atomic_inc(&urb->use_count);
46761- atomic_inc(&urb->dev->urbnum);
46762+ atomic_inc_unchecked(&urb->dev->urbnum);
46763 usbmon_urb_submit(&hcd->self, urb);
46764
46765 /* NOTE requirements on root-hub callers (usbfs and the hub
46766@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
46767 urb->hcpriv = NULL;
46768 INIT_LIST_HEAD(&urb->urb_list);
46769 atomic_dec(&urb->use_count);
46770- atomic_dec(&urb->dev->urbnum);
46771+ atomic_dec_unchecked(&urb->dev->urbnum);
46772 if (atomic_read(&urb->reject))
46773 wake_up(&usb_kill_urb_queue);
46774 usb_put_urb(urb);
46775diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
46776index 444d30e..f15c850 100644
46777--- a/drivers/usb/core/message.c
46778+++ b/drivers/usb/core/message.c
46779@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
46780 * method can wait for it to complete. Since you don't have a handle on the
46781 * URB used, you can't cancel the request.
46782 */
46783-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
46784+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
46785 __u8 requesttype, __u16 value, __u16 index, void *data,
46786 __u16 size, int timeout)
46787 {
46788diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
46789index aa38db4..0a08682 100644
46790--- a/drivers/usb/core/sysfs.c
46791+++ b/drivers/usb/core/sysfs.c
46792@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
46793 struct usb_device *udev;
46794
46795 udev = to_usb_device(dev);
46796- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
46797+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
46798 }
46799 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
46800
46801diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
46802index b10da72..43aa0b2 100644
46803--- a/drivers/usb/core/usb.c
46804+++ b/drivers/usb/core/usb.c
46805@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
46806 set_dev_node(&dev->dev, dev_to_node(bus->controller));
46807 dev->state = USB_STATE_ATTACHED;
46808 dev->lpm_disable_count = 1;
46809- atomic_set(&dev->urbnum, 0);
46810+ atomic_set_unchecked(&dev->urbnum, 0);
46811
46812 INIT_LIST_HEAD(&dev->ep0.urb_list);
46813 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
46814diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
46815index 5e29dde..eca992f 100644
46816--- a/drivers/usb/early/ehci-dbgp.c
46817+++ b/drivers/usb/early/ehci-dbgp.c
46818@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
46819
46820 #ifdef CONFIG_KGDB
46821 static struct kgdb_io kgdbdbgp_io_ops;
46822-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
46823+static struct kgdb_io kgdbdbgp_io_ops_console;
46824+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
46825 #else
46826 #define dbgp_kgdb_mode (0)
46827 #endif
46828@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
46829 .write_char = kgdbdbgp_write_char,
46830 };
46831
46832+static struct kgdb_io kgdbdbgp_io_ops_console = {
46833+ .name = "kgdbdbgp",
46834+ .read_char = kgdbdbgp_read_char,
46835+ .write_char = kgdbdbgp_write_char,
46836+ .is_console = 1
46837+};
46838+
46839 static int kgdbdbgp_wait_time;
46840
46841 static int __init kgdbdbgp_parse_config(char *str)
46842@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
46843 ptr++;
46844 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
46845 }
46846- kgdb_register_io_module(&kgdbdbgp_io_ops);
46847- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
46848+ if (early_dbgp_console.index != -1)
46849+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
46850+ else
46851+ kgdb_register_io_module(&kgdbdbgp_io_ops);
46852
46853 return 0;
46854 }
46855diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
46856index b369292..9f3ba40 100644
46857--- a/drivers/usb/gadget/u_serial.c
46858+++ b/drivers/usb/gadget/u_serial.c
46859@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
46860 spin_lock_irq(&port->port_lock);
46861
46862 /* already open? Great. */
46863- if (port->port.count) {
46864+ if (atomic_read(&port->port.count)) {
46865 status = 0;
46866- port->port.count++;
46867+ atomic_inc(&port->port.count);
46868
46869 /* currently opening/closing? wait ... */
46870 } else if (port->openclose) {
46871@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
46872 tty->driver_data = port;
46873 port->port.tty = tty;
46874
46875- port->port.count = 1;
46876+ atomic_set(&port->port.count, 1);
46877 port->openclose = false;
46878
46879 /* if connected, start the I/O stream */
46880@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
46881
46882 spin_lock_irq(&port->port_lock);
46883
46884- if (port->port.count != 1) {
46885- if (port->port.count == 0)
46886+ if (atomic_read(&port->port.count) != 1) {
46887+ if (atomic_read(&port->port.count) == 0)
46888 WARN_ON(1);
46889 else
46890- --port->port.count;
46891+ atomic_dec(&port->port.count);
46892 goto exit;
46893 }
46894
46895@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
46896 * and sleep if necessary
46897 */
46898 port->openclose = true;
46899- port->port.count = 0;
46900+ atomic_set(&port->port.count, 0);
46901
46902 gser = port->port_usb;
46903 if (gser && gser->disconnect)
46904@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
46905 int cond;
46906
46907 spin_lock_irq(&port->port_lock);
46908- cond = (port->port.count == 0) && !port->openclose;
46909+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
46910 spin_unlock_irq(&port->port_lock);
46911 return cond;
46912 }
46913@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
46914 /* if it's already open, start I/O ... and notify the serial
46915 * protocol about open/close status (connect/disconnect).
46916 */
46917- if (port->port.count) {
46918+ if (atomic_read(&port->port.count)) {
46919 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
46920 gs_start_io(port);
46921 if (gser->connect)
46922@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
46923
46924 port->port_usb = NULL;
46925 gser->ioport = NULL;
46926- if (port->port.count > 0 || port->openclose) {
46927+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
46928 wake_up_interruptible(&port->drain_wait);
46929 if (port->port.tty)
46930 tty_hangup(port->port.tty);
46931@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
46932
46933 /* finally, free any unused/unusable I/O buffers */
46934 spin_lock_irqsave(&port->port_lock, flags);
46935- if (port->port.count == 0 && !port->openclose)
46936+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
46937 gs_buf_free(&port->port_write_buf);
46938 gs_free_requests(gser->out, &port->read_pool, NULL);
46939 gs_free_requests(gser->out, &port->read_queue, NULL);
46940diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
46941index 5f3bcd3..bfca43f 100644
46942--- a/drivers/usb/serial/console.c
46943+++ b/drivers/usb/serial/console.c
46944@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
46945
46946 info->port = port;
46947
46948- ++port->port.count;
46949+ atomic_inc(&port->port.count);
46950 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
46951 if (serial->type->set_termios) {
46952 /*
46953@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
46954 }
46955 /* Now that any required fake tty operations are completed restore
46956 * the tty port count */
46957- --port->port.count;
46958+ atomic_dec(&port->port.count);
46959 /* The console is special in terms of closing the device so
46960 * indicate this port is now acting as a system console. */
46961 port->port.console = 1;
46962@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
46963 free_tty:
46964 kfree(tty);
46965 reset_open_count:
46966- port->port.count = 0;
46967+ atomic_set(&port->port.count, 0);
46968 usb_autopm_put_interface(serial->interface);
46969 error_get_interface:
46970 usb_serial_put(serial);
46971diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
46972index 75f70f0..d467e1a 100644
46973--- a/drivers/usb/storage/usb.h
46974+++ b/drivers/usb/storage/usb.h
46975@@ -63,7 +63,7 @@ struct us_unusual_dev {
46976 __u8 useProtocol;
46977 __u8 useTransport;
46978 int (*initFunction)(struct us_data *);
46979-};
46980+} __do_const;
46981
46982
46983 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
46984diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
46985index d6bea3e..60b250e 100644
46986--- a/drivers/usb/wusbcore/wa-hc.h
46987+++ b/drivers/usb/wusbcore/wa-hc.h
46988@@ -192,7 +192,7 @@ struct wahc {
46989 struct list_head xfer_delayed_list;
46990 spinlock_t xfer_list_lock;
46991 struct work_struct xfer_work;
46992- atomic_t xfer_id_count;
46993+ atomic_unchecked_t xfer_id_count;
46994 };
46995
46996
46997@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
46998 INIT_LIST_HEAD(&wa->xfer_delayed_list);
46999 spin_lock_init(&wa->xfer_list_lock);
47000 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
47001- atomic_set(&wa->xfer_id_count, 1);
47002+ atomic_set_unchecked(&wa->xfer_id_count, 1);
47003 }
47004
47005 /**
47006diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
47007index 6ef94bc..1b41265 100644
47008--- a/drivers/usb/wusbcore/wa-xfer.c
47009+++ b/drivers/usb/wusbcore/wa-xfer.c
47010@@ -296,7 +296,7 @@ out:
47011 */
47012 static void wa_xfer_id_init(struct wa_xfer *xfer)
47013 {
47014- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
47015+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
47016 }
47017
47018 /*
47019diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
47020index 5174eba..86e764a 100644
47021--- a/drivers/vhost/vringh.c
47022+++ b/drivers/vhost/vringh.c
47023@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
47024
47025 static inline int putu16_kern(u16 *p, u16 val)
47026 {
47027- ACCESS_ONCE(*p) = val;
47028+ ACCESS_ONCE_RW(*p) = val;
47029 return 0;
47030 }
47031
47032diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
47033index 8c55011..eed4ae1a 100644
47034--- a/drivers/video/aty/aty128fb.c
47035+++ b/drivers/video/aty/aty128fb.c
47036@@ -149,7 +149,7 @@ enum {
47037 };
47038
47039 /* Must match above enum */
47040-static char * const r128_family[] = {
47041+static const char * const r128_family[] = {
47042 "AGP",
47043 "PCI",
47044 "PRO AGP",
47045diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
47046index 4f27fdc..d3537e6 100644
47047--- a/drivers/video/aty/atyfb_base.c
47048+++ b/drivers/video/aty/atyfb_base.c
47049@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
47050 par->accel_flags = var->accel_flags; /* hack */
47051
47052 if (var->accel_flags) {
47053- info->fbops->fb_sync = atyfb_sync;
47054+ pax_open_kernel();
47055+ *(void **)&info->fbops->fb_sync = atyfb_sync;
47056+ pax_close_kernel();
47057 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47058 } else {
47059- info->fbops->fb_sync = NULL;
47060+ pax_open_kernel();
47061+ *(void **)&info->fbops->fb_sync = NULL;
47062+ pax_close_kernel();
47063 info->flags |= FBINFO_HWACCEL_DISABLED;
47064 }
47065
47066diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
47067index 95ec042..e6affdd 100644
47068--- a/drivers/video/aty/mach64_cursor.c
47069+++ b/drivers/video/aty/mach64_cursor.c
47070@@ -7,6 +7,7 @@
47071 #include <linux/string.h>
47072
47073 #include <asm/io.h>
47074+#include <asm/pgtable.h>
47075
47076 #ifdef __sparc__
47077 #include <asm/fbio.h>
47078@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
47079 info->sprite.buf_align = 16; /* and 64 lines tall. */
47080 info->sprite.flags = FB_PIXMAP_IO;
47081
47082- info->fbops->fb_cursor = atyfb_cursor;
47083+ pax_open_kernel();
47084+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
47085+ pax_close_kernel();
47086
47087 return 0;
47088 }
47089diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
47090index c74e7aa..e3c2790 100644
47091--- a/drivers/video/backlight/backlight.c
47092+++ b/drivers/video/backlight/backlight.c
47093@@ -304,7 +304,7 @@ struct backlight_device *backlight_device_register(const char *name,
47094 new_bd->dev.class = backlight_class;
47095 new_bd->dev.parent = parent;
47096 new_bd->dev.release = bl_device_release;
47097- dev_set_name(&new_bd->dev, name);
47098+ dev_set_name(&new_bd->dev, "%s", name);
47099 dev_set_drvdata(&new_bd->dev, devdata);
47100
47101 /* Set default properties */
47102diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
47103index bca6ccc..252107e 100644
47104--- a/drivers/video/backlight/kb3886_bl.c
47105+++ b/drivers/video/backlight/kb3886_bl.c
47106@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
47107 static unsigned long kb3886bl_flags;
47108 #define KB3886BL_SUSPENDED 0x01
47109
47110-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
47111+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
47112 {
47113 .ident = "Sahara Touch-iT",
47114 .matches = {
47115diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
47116index 34fb6bd..3649fd9 100644
47117--- a/drivers/video/backlight/lcd.c
47118+++ b/drivers/video/backlight/lcd.c
47119@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
47120 new_ld->dev.class = lcd_class;
47121 new_ld->dev.parent = parent;
47122 new_ld->dev.release = lcd_device_release;
47123- dev_set_name(&new_ld->dev, name);
47124+ dev_set_name(&new_ld->dev, "%s", name);
47125 dev_set_drvdata(&new_ld->dev, devdata);
47126
47127 rc = device_register(&new_ld->dev);
47128diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
47129index 900aa4e..6d49418 100644
47130--- a/drivers/video/fb_defio.c
47131+++ b/drivers/video/fb_defio.c
47132@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
47133
47134 BUG_ON(!fbdefio);
47135 mutex_init(&fbdefio->lock);
47136- info->fbops->fb_mmap = fb_deferred_io_mmap;
47137+ pax_open_kernel();
47138+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
47139+ pax_close_kernel();
47140 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
47141 INIT_LIST_HEAD(&fbdefio->pagelist);
47142 if (fbdefio->delay == 0) /* set a default of 1 s */
47143@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
47144 page->mapping = NULL;
47145 }
47146
47147- info->fbops->fb_mmap = NULL;
47148+ *(void **)&info->fbops->fb_mmap = NULL;
47149 mutex_destroy(&fbdefio->lock);
47150 }
47151 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
47152diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
47153index 5c3960d..15cf8fc 100644
47154--- a/drivers/video/fbcmap.c
47155+++ b/drivers/video/fbcmap.c
47156@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
47157 rc = -ENODEV;
47158 goto out;
47159 }
47160- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
47161- !info->fbops->fb_setcmap)) {
47162+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
47163 rc = -EINVAL;
47164 goto out1;
47165 }
47166diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
47167index 098bfc6..796841d 100644
47168--- a/drivers/video/fbmem.c
47169+++ b/drivers/video/fbmem.c
47170@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
47171 image->dx += image->width + 8;
47172 }
47173 } else if (rotate == FB_ROTATE_UD) {
47174- for (x = 0; x < num && image->dx >= 0; x++) {
47175+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
47176 info->fbops->fb_imageblit(info, image);
47177 image->dx -= image->width + 8;
47178 }
47179@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
47180 image->dy += image->height + 8;
47181 }
47182 } else if (rotate == FB_ROTATE_CCW) {
47183- for (x = 0; x < num && image->dy >= 0; x++) {
47184+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
47185 info->fbops->fb_imageblit(info, image);
47186 image->dy -= image->height + 8;
47187 }
47188@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
47189 return -EFAULT;
47190 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
47191 return -EINVAL;
47192- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
47193+ if (con2fb.framebuffer >= FB_MAX)
47194 return -EINVAL;
47195 if (!registered_fb[con2fb.framebuffer])
47196 request_module("fb%d", con2fb.framebuffer);
47197diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
47198index 7672d2e..b56437f 100644
47199--- a/drivers/video/i810/i810_accel.c
47200+++ b/drivers/video/i810/i810_accel.c
47201@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
47202 }
47203 }
47204 printk("ringbuffer lockup!!!\n");
47205+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
47206 i810_report_error(mmio);
47207 par->dev_flags |= LOCKUP;
47208 info->pixmap.scan_align = 1;
47209diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
47210index 3c14e43..eafa544 100644
47211--- a/drivers/video/logo/logo_linux_clut224.ppm
47212+++ b/drivers/video/logo/logo_linux_clut224.ppm
47213@@ -1,1604 +1,1123 @@
47214 P3
47215-# Standard 224-color Linux logo
47216 80 80
47217 255
47218- 0 0 0 0 0 0 0 0 0 0 0 0
47219- 0 0 0 0 0 0 0 0 0 0 0 0
47220- 0 0 0 0 0 0 0 0 0 0 0 0
47221- 0 0 0 0 0 0 0 0 0 0 0 0
47222- 0 0 0 0 0 0 0 0 0 0 0 0
47223- 0 0 0 0 0 0 0 0 0 0 0 0
47224- 0 0 0 0 0 0 0 0 0 0 0 0
47225- 0 0 0 0 0 0 0 0 0 0 0 0
47226- 0 0 0 0 0 0 0 0 0 0 0 0
47227- 6 6 6 6 6 6 10 10 10 10 10 10
47228- 10 10 10 6 6 6 6 6 6 6 6 6
47229- 0 0 0 0 0 0 0 0 0 0 0 0
47230- 0 0 0 0 0 0 0 0 0 0 0 0
47231- 0 0 0 0 0 0 0 0 0 0 0 0
47232- 0 0 0 0 0 0 0 0 0 0 0 0
47233- 0 0 0 0 0 0 0 0 0 0 0 0
47234- 0 0 0 0 0 0 0 0 0 0 0 0
47235- 0 0 0 0 0 0 0 0 0 0 0 0
47236- 0 0 0 0 0 0 0 0 0 0 0 0
47237- 0 0 0 0 0 0 0 0 0 0 0 0
47238- 0 0 0 0 0 0 0 0 0 0 0 0
47239- 0 0 0 0 0 0 0 0 0 0 0 0
47240- 0 0 0 0 0 0 0 0 0 0 0 0
47241- 0 0 0 0 0 0 0 0 0 0 0 0
47242- 0 0 0 0 0 0 0 0 0 0 0 0
47243- 0 0 0 0 0 0 0 0 0 0 0 0
47244- 0 0 0 0 0 0 0 0 0 0 0 0
47245- 0 0 0 0 0 0 0 0 0 0 0 0
47246- 0 0 0 6 6 6 10 10 10 14 14 14
47247- 22 22 22 26 26 26 30 30 30 34 34 34
47248- 30 30 30 30 30 30 26 26 26 18 18 18
47249- 14 14 14 10 10 10 6 6 6 0 0 0
47250- 0 0 0 0 0 0 0 0 0 0 0 0
47251- 0 0 0 0 0 0 0 0 0 0 0 0
47252- 0 0 0 0 0 0 0 0 0 0 0 0
47253- 0 0 0 0 0 0 0 0 0 0 0 0
47254- 0 0 0 0 0 0 0 0 0 0 0 0
47255- 0 0 0 0 0 0 0 0 0 0 0 0
47256- 0 0 0 0 0 0 0 0 0 0 0 0
47257- 0 0 0 0 0 0 0 0 0 0 0 0
47258- 0 0 0 0 0 0 0 0 0 0 0 0
47259- 0 0 0 0 0 1 0 0 1 0 0 0
47260- 0 0 0 0 0 0 0 0 0 0 0 0
47261- 0 0 0 0 0 0 0 0 0 0 0 0
47262- 0 0 0 0 0 0 0 0 0 0 0 0
47263- 0 0 0 0 0 0 0 0 0 0 0 0
47264- 0 0 0 0 0 0 0 0 0 0 0 0
47265- 0 0 0 0 0 0 0 0 0 0 0 0
47266- 6 6 6 14 14 14 26 26 26 42 42 42
47267- 54 54 54 66 66 66 78 78 78 78 78 78
47268- 78 78 78 74 74 74 66 66 66 54 54 54
47269- 42 42 42 26 26 26 18 18 18 10 10 10
47270- 6 6 6 0 0 0 0 0 0 0 0 0
47271- 0 0 0 0 0 0 0 0 0 0 0 0
47272- 0 0 0 0 0 0 0 0 0 0 0 0
47273- 0 0 0 0 0 0 0 0 0 0 0 0
47274- 0 0 0 0 0 0 0 0 0 0 0 0
47275- 0 0 0 0 0 0 0 0 0 0 0 0
47276- 0 0 0 0 0 0 0 0 0 0 0 0
47277- 0 0 0 0 0 0 0 0 0 0 0 0
47278- 0 0 0 0 0 0 0 0 0 0 0 0
47279- 0 0 1 0 0 0 0 0 0 0 0 0
47280- 0 0 0 0 0 0 0 0 0 0 0 0
47281- 0 0 0 0 0 0 0 0 0 0 0 0
47282- 0 0 0 0 0 0 0 0 0 0 0 0
47283- 0 0 0 0 0 0 0 0 0 0 0 0
47284- 0 0 0 0 0 0 0 0 0 0 0 0
47285- 0 0 0 0 0 0 0 0 0 10 10 10
47286- 22 22 22 42 42 42 66 66 66 86 86 86
47287- 66 66 66 38 38 38 38 38 38 22 22 22
47288- 26 26 26 34 34 34 54 54 54 66 66 66
47289- 86 86 86 70 70 70 46 46 46 26 26 26
47290- 14 14 14 6 6 6 0 0 0 0 0 0
47291- 0 0 0 0 0 0 0 0 0 0 0 0
47292- 0 0 0 0 0 0 0 0 0 0 0 0
47293- 0 0 0 0 0 0 0 0 0 0 0 0
47294- 0 0 0 0 0 0 0 0 0 0 0 0
47295- 0 0 0 0 0 0 0 0 0 0 0 0
47296- 0 0 0 0 0 0 0 0 0 0 0 0
47297- 0 0 0 0 0 0 0 0 0 0 0 0
47298- 0 0 0 0 0 0 0 0 0 0 0 0
47299- 0 0 1 0 0 1 0 0 1 0 0 0
47300- 0 0 0 0 0 0 0 0 0 0 0 0
47301- 0 0 0 0 0 0 0 0 0 0 0 0
47302- 0 0 0 0 0 0 0 0 0 0 0 0
47303- 0 0 0 0 0 0 0 0 0 0 0 0
47304- 0 0 0 0 0 0 0 0 0 0 0 0
47305- 0 0 0 0 0 0 10 10 10 26 26 26
47306- 50 50 50 82 82 82 58 58 58 6 6 6
47307- 2 2 6 2 2 6 2 2 6 2 2 6
47308- 2 2 6 2 2 6 2 2 6 2 2 6
47309- 6 6 6 54 54 54 86 86 86 66 66 66
47310- 38 38 38 18 18 18 6 6 6 0 0 0
47311- 0 0 0 0 0 0 0 0 0 0 0 0
47312- 0 0 0 0 0 0 0 0 0 0 0 0
47313- 0 0 0 0 0 0 0 0 0 0 0 0
47314- 0 0 0 0 0 0 0 0 0 0 0 0
47315- 0 0 0 0 0 0 0 0 0 0 0 0
47316- 0 0 0 0 0 0 0 0 0 0 0 0
47317- 0 0 0 0 0 0 0 0 0 0 0 0
47318- 0 0 0 0 0 0 0 0 0 0 0 0
47319- 0 0 0 0 0 0 0 0 0 0 0 0
47320- 0 0 0 0 0 0 0 0 0 0 0 0
47321- 0 0 0 0 0 0 0 0 0 0 0 0
47322- 0 0 0 0 0 0 0 0 0 0 0 0
47323- 0 0 0 0 0 0 0 0 0 0 0 0
47324- 0 0 0 0 0 0 0 0 0 0 0 0
47325- 0 0 0 6 6 6 22 22 22 50 50 50
47326- 78 78 78 34 34 34 2 2 6 2 2 6
47327- 2 2 6 2 2 6 2 2 6 2 2 6
47328- 2 2 6 2 2 6 2 2 6 2 2 6
47329- 2 2 6 2 2 6 6 6 6 70 70 70
47330- 78 78 78 46 46 46 22 22 22 6 6 6
47331- 0 0 0 0 0 0 0 0 0 0 0 0
47332- 0 0 0 0 0 0 0 0 0 0 0 0
47333- 0 0 0 0 0 0 0 0 0 0 0 0
47334- 0 0 0 0 0 0 0 0 0 0 0 0
47335- 0 0 0 0 0 0 0 0 0 0 0 0
47336- 0 0 0 0 0 0 0 0 0 0 0 0
47337- 0 0 0 0 0 0 0 0 0 0 0 0
47338- 0 0 0 0 0 0 0 0 0 0 0 0
47339- 0 0 1 0 0 1 0 0 1 0 0 0
47340- 0 0 0 0 0 0 0 0 0 0 0 0
47341- 0 0 0 0 0 0 0 0 0 0 0 0
47342- 0 0 0 0 0 0 0 0 0 0 0 0
47343- 0 0 0 0 0 0 0 0 0 0 0 0
47344- 0 0 0 0 0 0 0 0 0 0 0 0
47345- 6 6 6 18 18 18 42 42 42 82 82 82
47346- 26 26 26 2 2 6 2 2 6 2 2 6
47347- 2 2 6 2 2 6 2 2 6 2 2 6
47348- 2 2 6 2 2 6 2 2 6 14 14 14
47349- 46 46 46 34 34 34 6 6 6 2 2 6
47350- 42 42 42 78 78 78 42 42 42 18 18 18
47351- 6 6 6 0 0 0 0 0 0 0 0 0
47352- 0 0 0 0 0 0 0 0 0 0 0 0
47353- 0 0 0 0 0 0 0 0 0 0 0 0
47354- 0 0 0 0 0 0 0 0 0 0 0 0
47355- 0 0 0 0 0 0 0 0 0 0 0 0
47356- 0 0 0 0 0 0 0 0 0 0 0 0
47357- 0 0 0 0 0 0 0 0 0 0 0 0
47358- 0 0 0 0 0 0 0 0 0 0 0 0
47359- 0 0 1 0 0 0 0 0 1 0 0 0
47360- 0 0 0 0 0 0 0 0 0 0 0 0
47361- 0 0 0 0 0 0 0 0 0 0 0 0
47362- 0 0 0 0 0 0 0 0 0 0 0 0
47363- 0 0 0 0 0 0 0 0 0 0 0 0
47364- 0 0 0 0 0 0 0 0 0 0 0 0
47365- 10 10 10 30 30 30 66 66 66 58 58 58
47366- 2 2 6 2 2 6 2 2 6 2 2 6
47367- 2 2 6 2 2 6 2 2 6 2 2 6
47368- 2 2 6 2 2 6 2 2 6 26 26 26
47369- 86 86 86 101 101 101 46 46 46 10 10 10
47370- 2 2 6 58 58 58 70 70 70 34 34 34
47371- 10 10 10 0 0 0 0 0 0 0 0 0
47372- 0 0 0 0 0 0 0 0 0 0 0 0
47373- 0 0 0 0 0 0 0 0 0 0 0 0
47374- 0 0 0 0 0 0 0 0 0 0 0 0
47375- 0 0 0 0 0 0 0 0 0 0 0 0
47376- 0 0 0 0 0 0 0 0 0 0 0 0
47377- 0 0 0 0 0 0 0 0 0 0 0 0
47378- 0 0 0 0 0 0 0 0 0 0 0 0
47379- 0 0 1 0 0 1 0 0 1 0 0 0
47380- 0 0 0 0 0 0 0 0 0 0 0 0
47381- 0 0 0 0 0 0 0 0 0 0 0 0
47382- 0 0 0 0 0 0 0 0 0 0 0 0
47383- 0 0 0 0 0 0 0 0 0 0 0 0
47384- 0 0 0 0 0 0 0 0 0 0 0 0
47385- 14 14 14 42 42 42 86 86 86 10 10 10
47386- 2 2 6 2 2 6 2 2 6 2 2 6
47387- 2 2 6 2 2 6 2 2 6 2 2 6
47388- 2 2 6 2 2 6 2 2 6 30 30 30
47389- 94 94 94 94 94 94 58 58 58 26 26 26
47390- 2 2 6 6 6 6 78 78 78 54 54 54
47391- 22 22 22 6 6 6 0 0 0 0 0 0
47392- 0 0 0 0 0 0 0 0 0 0 0 0
47393- 0 0 0 0 0 0 0 0 0 0 0 0
47394- 0 0 0 0 0 0 0 0 0 0 0 0
47395- 0 0 0 0 0 0 0 0 0 0 0 0
47396- 0 0 0 0 0 0 0 0 0 0 0 0
47397- 0 0 0 0 0 0 0 0 0 0 0 0
47398- 0 0 0 0 0 0 0 0 0 0 0 0
47399- 0 0 0 0 0 0 0 0 0 0 0 0
47400- 0 0 0 0 0 0 0 0 0 0 0 0
47401- 0 0 0 0 0 0 0 0 0 0 0 0
47402- 0 0 0 0 0 0 0 0 0 0 0 0
47403- 0 0 0 0 0 0 0 0 0 0 0 0
47404- 0 0 0 0 0 0 0 0 0 6 6 6
47405- 22 22 22 62 62 62 62 62 62 2 2 6
47406- 2 2 6 2 2 6 2 2 6 2 2 6
47407- 2 2 6 2 2 6 2 2 6 2 2 6
47408- 2 2 6 2 2 6 2 2 6 26 26 26
47409- 54 54 54 38 38 38 18 18 18 10 10 10
47410- 2 2 6 2 2 6 34 34 34 82 82 82
47411- 38 38 38 14 14 14 0 0 0 0 0 0
47412- 0 0 0 0 0 0 0 0 0 0 0 0
47413- 0 0 0 0 0 0 0 0 0 0 0 0
47414- 0 0 0 0 0 0 0 0 0 0 0 0
47415- 0 0 0 0 0 0 0 0 0 0 0 0
47416- 0 0 0 0 0 0 0 0 0 0 0 0
47417- 0 0 0 0 0 0 0 0 0 0 0 0
47418- 0 0 0 0 0 0 0 0 0 0 0 0
47419- 0 0 0 0 0 1 0 0 1 0 0 0
47420- 0 0 0 0 0 0 0 0 0 0 0 0
47421- 0 0 0 0 0 0 0 0 0 0 0 0
47422- 0 0 0 0 0 0 0 0 0 0 0 0
47423- 0 0 0 0 0 0 0 0 0 0 0 0
47424- 0 0 0 0 0 0 0 0 0 6 6 6
47425- 30 30 30 78 78 78 30 30 30 2 2 6
47426- 2 2 6 2 2 6 2 2 6 2 2 6
47427- 2 2 6 2 2 6 2 2 6 2 2 6
47428- 2 2 6 2 2 6 2 2 6 10 10 10
47429- 10 10 10 2 2 6 2 2 6 2 2 6
47430- 2 2 6 2 2 6 2 2 6 78 78 78
47431- 50 50 50 18 18 18 6 6 6 0 0 0
47432- 0 0 0 0 0 0 0 0 0 0 0 0
47433- 0 0 0 0 0 0 0 0 0 0 0 0
47434- 0 0 0 0 0 0 0 0 0 0 0 0
47435- 0 0 0 0 0 0 0 0 0 0 0 0
47436- 0 0 0 0 0 0 0 0 0 0 0 0
47437- 0 0 0 0 0 0 0 0 0 0 0 0
47438- 0 0 0 0 0 0 0 0 0 0 0 0
47439- 0 0 1 0 0 0 0 0 0 0 0 0
47440- 0 0 0 0 0 0 0 0 0 0 0 0
47441- 0 0 0 0 0 0 0 0 0 0 0 0
47442- 0 0 0 0 0 0 0 0 0 0 0 0
47443- 0 0 0 0 0 0 0 0 0 0 0 0
47444- 0 0 0 0 0 0 0 0 0 10 10 10
47445- 38 38 38 86 86 86 14 14 14 2 2 6
47446- 2 2 6 2 2 6 2 2 6 2 2 6
47447- 2 2 6 2 2 6 2 2 6 2 2 6
47448- 2 2 6 2 2 6 2 2 6 2 2 6
47449- 2 2 6 2 2 6 2 2 6 2 2 6
47450- 2 2 6 2 2 6 2 2 6 54 54 54
47451- 66 66 66 26 26 26 6 6 6 0 0 0
47452- 0 0 0 0 0 0 0 0 0 0 0 0
47453- 0 0 0 0 0 0 0 0 0 0 0 0
47454- 0 0 0 0 0 0 0 0 0 0 0 0
47455- 0 0 0 0 0 0 0 0 0 0 0 0
47456- 0 0 0 0 0 0 0 0 0 0 0 0
47457- 0 0 0 0 0 0 0 0 0 0 0 0
47458- 0 0 0 0 0 0 0 0 0 0 0 0
47459- 0 0 0 0 0 1 0 0 1 0 0 0
47460- 0 0 0 0 0 0 0 0 0 0 0 0
47461- 0 0 0 0 0 0 0 0 0 0 0 0
47462- 0 0 0 0 0 0 0 0 0 0 0 0
47463- 0 0 0 0 0 0 0 0 0 0 0 0
47464- 0 0 0 0 0 0 0 0 0 14 14 14
47465- 42 42 42 82 82 82 2 2 6 2 2 6
47466- 2 2 6 6 6 6 10 10 10 2 2 6
47467- 2 2 6 2 2 6 2 2 6 2 2 6
47468- 2 2 6 2 2 6 2 2 6 6 6 6
47469- 14 14 14 10 10 10 2 2 6 2 2 6
47470- 2 2 6 2 2 6 2 2 6 18 18 18
47471- 82 82 82 34 34 34 10 10 10 0 0 0
47472- 0 0 0 0 0 0 0 0 0 0 0 0
47473- 0 0 0 0 0 0 0 0 0 0 0 0
47474- 0 0 0 0 0 0 0 0 0 0 0 0
47475- 0 0 0 0 0 0 0 0 0 0 0 0
47476- 0 0 0 0 0 0 0 0 0 0 0 0
47477- 0 0 0 0 0 0 0 0 0 0 0 0
47478- 0 0 0 0 0 0 0 0 0 0 0 0
47479- 0 0 1 0 0 0 0 0 0 0 0 0
47480- 0 0 0 0 0 0 0 0 0 0 0 0
47481- 0 0 0 0 0 0 0 0 0 0 0 0
47482- 0 0 0 0 0 0 0 0 0 0 0 0
47483- 0 0 0 0 0 0 0 0 0 0 0 0
47484- 0 0 0 0 0 0 0 0 0 14 14 14
47485- 46 46 46 86 86 86 2 2 6 2 2 6
47486- 6 6 6 6 6 6 22 22 22 34 34 34
47487- 6 6 6 2 2 6 2 2 6 2 2 6
47488- 2 2 6 2 2 6 18 18 18 34 34 34
47489- 10 10 10 50 50 50 22 22 22 2 2 6
47490- 2 2 6 2 2 6 2 2 6 10 10 10
47491- 86 86 86 42 42 42 14 14 14 0 0 0
47492- 0 0 0 0 0 0 0 0 0 0 0 0
47493- 0 0 0 0 0 0 0 0 0 0 0 0
47494- 0 0 0 0 0 0 0 0 0 0 0 0
47495- 0 0 0 0 0 0 0 0 0 0 0 0
47496- 0 0 0 0 0 0 0 0 0 0 0 0
47497- 0 0 0 0 0 0 0 0 0 0 0 0
47498- 0 0 0 0 0 0 0 0 0 0 0 0
47499- 0 0 1 0 0 1 0 0 1 0 0 0
47500- 0 0 0 0 0 0 0 0 0 0 0 0
47501- 0 0 0 0 0 0 0 0 0 0 0 0
47502- 0 0 0 0 0 0 0 0 0 0 0 0
47503- 0 0 0 0 0 0 0 0 0 0 0 0
47504- 0 0 0 0 0 0 0 0 0 14 14 14
47505- 46 46 46 86 86 86 2 2 6 2 2 6
47506- 38 38 38 116 116 116 94 94 94 22 22 22
47507- 22 22 22 2 2 6 2 2 6 2 2 6
47508- 14 14 14 86 86 86 138 138 138 162 162 162
47509-154 154 154 38 38 38 26 26 26 6 6 6
47510- 2 2 6 2 2 6 2 2 6 2 2 6
47511- 86 86 86 46 46 46 14 14 14 0 0 0
47512- 0 0 0 0 0 0 0 0 0 0 0 0
47513- 0 0 0 0 0 0 0 0 0 0 0 0
47514- 0 0 0 0 0 0 0 0 0 0 0 0
47515- 0 0 0 0 0 0 0 0 0 0 0 0
47516- 0 0 0 0 0 0 0 0 0 0 0 0
47517- 0 0 0 0 0 0 0 0 0 0 0 0
47518- 0 0 0 0 0 0 0 0 0 0 0 0
47519- 0 0 0 0 0 0 0 0 0 0 0 0
47520- 0 0 0 0 0 0 0 0 0 0 0 0
47521- 0 0 0 0 0 0 0 0 0 0 0 0
47522- 0 0 0 0 0 0 0 0 0 0 0 0
47523- 0 0 0 0 0 0 0 0 0 0 0 0
47524- 0 0 0 0 0 0 0 0 0 14 14 14
47525- 46 46 46 86 86 86 2 2 6 14 14 14
47526-134 134 134 198 198 198 195 195 195 116 116 116
47527- 10 10 10 2 2 6 2 2 6 6 6 6
47528-101 98 89 187 187 187 210 210 210 218 218 218
47529-214 214 214 134 134 134 14 14 14 6 6 6
47530- 2 2 6 2 2 6 2 2 6 2 2 6
47531- 86 86 86 50 50 50 18 18 18 6 6 6
47532- 0 0 0 0 0 0 0 0 0 0 0 0
47533- 0 0 0 0 0 0 0 0 0 0 0 0
47534- 0 0 0 0 0 0 0 0 0 0 0 0
47535- 0 0 0 0 0 0 0 0 0 0 0 0
47536- 0 0 0 0 0 0 0 0 0 0 0 0
47537- 0 0 0 0 0 0 0 0 0 0 0 0
47538- 0 0 0 0 0 0 0 0 1 0 0 0
47539- 0 0 1 0 0 1 0 0 1 0 0 0
47540- 0 0 0 0 0 0 0 0 0 0 0 0
47541- 0 0 0 0 0 0 0 0 0 0 0 0
47542- 0 0 0 0 0 0 0 0 0 0 0 0
47543- 0 0 0 0 0 0 0 0 0 0 0 0
47544- 0 0 0 0 0 0 0 0 0 14 14 14
47545- 46 46 46 86 86 86 2 2 6 54 54 54
47546-218 218 218 195 195 195 226 226 226 246 246 246
47547- 58 58 58 2 2 6 2 2 6 30 30 30
47548-210 210 210 253 253 253 174 174 174 123 123 123
47549-221 221 221 234 234 234 74 74 74 2 2 6
47550- 2 2 6 2 2 6 2 2 6 2 2 6
47551- 70 70 70 58 58 58 22 22 22 6 6 6
47552- 0 0 0 0 0 0 0 0 0 0 0 0
47553- 0 0 0 0 0 0 0 0 0 0 0 0
47554- 0 0 0 0 0 0 0 0 0 0 0 0
47555- 0 0 0 0 0 0 0 0 0 0 0 0
47556- 0 0 0 0 0 0 0 0 0 0 0 0
47557- 0 0 0 0 0 0 0 0 0 0 0 0
47558- 0 0 0 0 0 0 0 0 0 0 0 0
47559- 0 0 0 0 0 0 0 0 0 0 0 0
47560- 0 0 0 0 0 0 0 0 0 0 0 0
47561- 0 0 0 0 0 0 0 0 0 0 0 0
47562- 0 0 0 0 0 0 0 0 0 0 0 0
47563- 0 0 0 0 0 0 0 0 0 0 0 0
47564- 0 0 0 0 0 0 0 0 0 14 14 14
47565- 46 46 46 82 82 82 2 2 6 106 106 106
47566-170 170 170 26 26 26 86 86 86 226 226 226
47567-123 123 123 10 10 10 14 14 14 46 46 46
47568-231 231 231 190 190 190 6 6 6 70 70 70
47569- 90 90 90 238 238 238 158 158 158 2 2 6
47570- 2 2 6 2 2 6 2 2 6 2 2 6
47571- 70 70 70 58 58 58 22 22 22 6 6 6
47572- 0 0 0 0 0 0 0 0 0 0 0 0
47573- 0 0 0 0 0 0 0 0 0 0 0 0
47574- 0 0 0 0 0 0 0 0 0 0 0 0
47575- 0 0 0 0 0 0 0 0 0 0 0 0
47576- 0 0 0 0 0 0 0 0 0 0 0 0
47577- 0 0 0 0 0 0 0 0 0 0 0 0
47578- 0 0 0 0 0 0 0 0 1 0 0 0
47579- 0 0 1 0 0 1 0 0 1 0 0 0
47580- 0 0 0 0 0 0 0 0 0 0 0 0
47581- 0 0 0 0 0 0 0 0 0 0 0 0
47582- 0 0 0 0 0 0 0 0 0 0 0 0
47583- 0 0 0 0 0 0 0 0 0 0 0 0
47584- 0 0 0 0 0 0 0 0 0 14 14 14
47585- 42 42 42 86 86 86 6 6 6 116 116 116
47586-106 106 106 6 6 6 70 70 70 149 149 149
47587-128 128 128 18 18 18 38 38 38 54 54 54
47588-221 221 221 106 106 106 2 2 6 14 14 14
47589- 46 46 46 190 190 190 198 198 198 2 2 6
47590- 2 2 6 2 2 6 2 2 6 2 2 6
47591- 74 74 74 62 62 62 22 22 22 6 6 6
47592- 0 0 0 0 0 0 0 0 0 0 0 0
47593- 0 0 0 0 0 0 0 0 0 0 0 0
47594- 0 0 0 0 0 0 0 0 0 0 0 0
47595- 0 0 0 0 0 0 0 0 0 0 0 0
47596- 0 0 0 0 0 0 0 0 0 0 0 0
47597- 0 0 0 0 0 0 0 0 0 0 0 0
47598- 0 0 0 0 0 0 0 0 1 0 0 0
47599- 0 0 1 0 0 0 0 0 1 0 0 0
47600- 0 0 0 0 0 0 0 0 0 0 0 0
47601- 0 0 0 0 0 0 0 0 0 0 0 0
47602- 0 0 0 0 0 0 0 0 0 0 0 0
47603- 0 0 0 0 0 0 0 0 0 0 0 0
47604- 0 0 0 0 0 0 0 0 0 14 14 14
47605- 42 42 42 94 94 94 14 14 14 101 101 101
47606-128 128 128 2 2 6 18 18 18 116 116 116
47607-118 98 46 121 92 8 121 92 8 98 78 10
47608-162 162 162 106 106 106 2 2 6 2 2 6
47609- 2 2 6 195 195 195 195 195 195 6 6 6
47610- 2 2 6 2 2 6 2 2 6 2 2 6
47611- 74 74 74 62 62 62 22 22 22 6 6 6
47612- 0 0 0 0 0 0 0 0 0 0 0 0
47613- 0 0 0 0 0 0 0 0 0 0 0 0
47614- 0 0 0 0 0 0 0 0 0 0 0 0
47615- 0 0 0 0 0 0 0 0 0 0 0 0
47616- 0 0 0 0 0 0 0 0 0 0 0 0
47617- 0 0 0 0 0 0 0 0 0 0 0 0
47618- 0 0 0 0 0 0 0 0 1 0 0 1
47619- 0 0 1 0 0 0 0 0 1 0 0 0
47620- 0 0 0 0 0 0 0 0 0 0 0 0
47621- 0 0 0 0 0 0 0 0 0 0 0 0
47622- 0 0 0 0 0 0 0 0 0 0 0 0
47623- 0 0 0 0 0 0 0 0 0 0 0 0
47624- 0 0 0 0 0 0 0 0 0 10 10 10
47625- 38 38 38 90 90 90 14 14 14 58 58 58
47626-210 210 210 26 26 26 54 38 6 154 114 10
47627-226 170 11 236 186 11 225 175 15 184 144 12
47628-215 174 15 175 146 61 37 26 9 2 2 6
47629- 70 70 70 246 246 246 138 138 138 2 2 6
47630- 2 2 6 2 2 6 2 2 6 2 2 6
47631- 70 70 70 66 66 66 26 26 26 6 6 6
47632- 0 0 0 0 0 0 0 0 0 0 0 0
47633- 0 0 0 0 0 0 0 0 0 0 0 0
47634- 0 0 0 0 0 0 0 0 0 0 0 0
47635- 0 0 0 0 0 0 0 0 0 0 0 0
47636- 0 0 0 0 0 0 0 0 0 0 0 0
47637- 0 0 0 0 0 0 0 0 0 0 0 0
47638- 0 0 0 0 0 0 0 0 0 0 0 0
47639- 0 0 0 0 0 0 0 0 0 0 0 0
47640- 0 0 0 0 0 0 0 0 0 0 0 0
47641- 0 0 0 0 0 0 0 0 0 0 0 0
47642- 0 0 0 0 0 0 0 0 0 0 0 0
47643- 0 0 0 0 0 0 0 0 0 0 0 0
47644- 0 0 0 0 0 0 0 0 0 10 10 10
47645- 38 38 38 86 86 86 14 14 14 10 10 10
47646-195 195 195 188 164 115 192 133 9 225 175 15
47647-239 182 13 234 190 10 232 195 16 232 200 30
47648-245 207 45 241 208 19 232 195 16 184 144 12
47649-218 194 134 211 206 186 42 42 42 2 2 6
47650- 2 2 6 2 2 6 2 2 6 2 2 6
47651- 50 50 50 74 74 74 30 30 30 6 6 6
47652- 0 0 0 0 0 0 0 0 0 0 0 0
47653- 0 0 0 0 0 0 0 0 0 0 0 0
47654- 0 0 0 0 0 0 0 0 0 0 0 0
47655- 0 0 0 0 0 0 0 0 0 0 0 0
47656- 0 0 0 0 0 0 0 0 0 0 0 0
47657- 0 0 0 0 0 0 0 0 0 0 0 0
47658- 0 0 0 0 0 0 0 0 0 0 0 0
47659- 0 0 0 0 0 0 0 0 0 0 0 0
47660- 0 0 0 0 0 0 0 0 0 0 0 0
47661- 0 0 0 0 0 0 0 0 0 0 0 0
47662- 0 0 0 0 0 0 0 0 0 0 0 0
47663- 0 0 0 0 0 0 0 0 0 0 0 0
47664- 0 0 0 0 0 0 0 0 0 10 10 10
47665- 34 34 34 86 86 86 14 14 14 2 2 6
47666-121 87 25 192 133 9 219 162 10 239 182 13
47667-236 186 11 232 195 16 241 208 19 244 214 54
47668-246 218 60 246 218 38 246 215 20 241 208 19
47669-241 208 19 226 184 13 121 87 25 2 2 6
47670- 2 2 6 2 2 6 2 2 6 2 2 6
47671- 50 50 50 82 82 82 34 34 34 10 10 10
47672- 0 0 0 0 0 0 0 0 0 0 0 0
47673- 0 0 0 0 0 0 0 0 0 0 0 0
47674- 0 0 0 0 0 0 0 0 0 0 0 0
47675- 0 0 0 0 0 0 0 0 0 0 0 0
47676- 0 0 0 0 0 0 0 0 0 0 0 0
47677- 0 0 0 0 0 0 0 0 0 0 0 0
47678- 0 0 0 0 0 0 0 0 0 0 0 0
47679- 0 0 0 0 0 0 0 0 0 0 0 0
47680- 0 0 0 0 0 0 0 0 0 0 0 0
47681- 0 0 0 0 0 0 0 0 0 0 0 0
47682- 0 0 0 0 0 0 0 0 0 0 0 0
47683- 0 0 0 0 0 0 0 0 0 0 0 0
47684- 0 0 0 0 0 0 0 0 0 10 10 10
47685- 34 34 34 82 82 82 30 30 30 61 42 6
47686-180 123 7 206 145 10 230 174 11 239 182 13
47687-234 190 10 238 202 15 241 208 19 246 218 74
47688-246 218 38 246 215 20 246 215 20 246 215 20
47689-226 184 13 215 174 15 184 144 12 6 6 6
47690- 2 2 6 2 2 6 2 2 6 2 2 6
47691- 26 26 26 94 94 94 42 42 42 14 14 14
47692- 0 0 0 0 0 0 0 0 0 0 0 0
47693- 0 0 0 0 0 0 0 0 0 0 0 0
47694- 0 0 0 0 0 0 0 0 0 0 0 0
47695- 0 0 0 0 0 0 0 0 0 0 0 0
47696- 0 0 0 0 0 0 0 0 0 0 0 0
47697- 0 0 0 0 0 0 0 0 0 0 0 0
47698- 0 0 0 0 0 0 0 0 0 0 0 0
47699- 0 0 0 0 0 0 0 0 0 0 0 0
47700- 0 0 0 0 0 0 0 0 0 0 0 0
47701- 0 0 0 0 0 0 0 0 0 0 0 0
47702- 0 0 0 0 0 0 0 0 0 0 0 0
47703- 0 0 0 0 0 0 0 0 0 0 0 0
47704- 0 0 0 0 0 0 0 0 0 10 10 10
47705- 30 30 30 78 78 78 50 50 50 104 69 6
47706-192 133 9 216 158 10 236 178 12 236 186 11
47707-232 195 16 241 208 19 244 214 54 245 215 43
47708-246 215 20 246 215 20 241 208 19 198 155 10
47709-200 144 11 216 158 10 156 118 10 2 2 6
47710- 2 2 6 2 2 6 2 2 6 2 2 6
47711- 6 6 6 90 90 90 54 54 54 18 18 18
47712- 6 6 6 0 0 0 0 0 0 0 0 0
47713- 0 0 0 0 0 0 0 0 0 0 0 0
47714- 0 0 0 0 0 0 0 0 0 0 0 0
47715- 0 0 0 0 0 0 0 0 0 0 0 0
47716- 0 0 0 0 0 0 0 0 0 0 0 0
47717- 0 0 0 0 0 0 0 0 0 0 0 0
47718- 0 0 0 0 0 0 0 0 0 0 0 0
47719- 0 0 0 0 0 0 0 0 0 0 0 0
47720- 0 0 0 0 0 0 0 0 0 0 0 0
47721- 0 0 0 0 0 0 0 0 0 0 0 0
47722- 0 0 0 0 0 0 0 0 0 0 0 0
47723- 0 0 0 0 0 0 0 0 0 0 0 0
47724- 0 0 0 0 0 0 0 0 0 10 10 10
47725- 30 30 30 78 78 78 46 46 46 22 22 22
47726-137 92 6 210 162 10 239 182 13 238 190 10
47727-238 202 15 241 208 19 246 215 20 246 215 20
47728-241 208 19 203 166 17 185 133 11 210 150 10
47729-216 158 10 210 150 10 102 78 10 2 2 6
47730- 6 6 6 54 54 54 14 14 14 2 2 6
47731- 2 2 6 62 62 62 74 74 74 30 30 30
47732- 10 10 10 0 0 0 0 0 0 0 0 0
47733- 0 0 0 0 0 0 0 0 0 0 0 0
47734- 0 0 0 0 0 0 0 0 0 0 0 0
47735- 0 0 0 0 0 0 0 0 0 0 0 0
47736- 0 0 0 0 0 0 0 0 0 0 0 0
47737- 0 0 0 0 0 0 0 0 0 0 0 0
47738- 0 0 0 0 0 0 0 0 0 0 0 0
47739- 0 0 0 0 0 0 0 0 0 0 0 0
47740- 0 0 0 0 0 0 0 0 0 0 0 0
47741- 0 0 0 0 0 0 0 0 0 0 0 0
47742- 0 0 0 0 0 0 0 0 0 0 0 0
47743- 0 0 0 0 0 0 0 0 0 0 0 0
47744- 0 0 0 0 0 0 0 0 0 10 10 10
47745- 34 34 34 78 78 78 50 50 50 6 6 6
47746- 94 70 30 139 102 15 190 146 13 226 184 13
47747-232 200 30 232 195 16 215 174 15 190 146 13
47748-168 122 10 192 133 9 210 150 10 213 154 11
47749-202 150 34 182 157 106 101 98 89 2 2 6
47750- 2 2 6 78 78 78 116 116 116 58 58 58
47751- 2 2 6 22 22 22 90 90 90 46 46 46
47752- 18 18 18 6 6 6 0 0 0 0 0 0
47753- 0 0 0 0 0 0 0 0 0 0 0 0
47754- 0 0 0 0 0 0 0 0 0 0 0 0
47755- 0 0 0 0 0 0 0 0 0 0 0 0
47756- 0 0 0 0 0 0 0 0 0 0 0 0
47757- 0 0 0 0 0 0 0 0 0 0 0 0
47758- 0 0 0 0 0 0 0 0 0 0 0 0
47759- 0 0 0 0 0 0 0 0 0 0 0 0
47760- 0 0 0 0 0 0 0 0 0 0 0 0
47761- 0 0 0 0 0 0 0 0 0 0 0 0
47762- 0 0 0 0 0 0 0 0 0 0 0 0
47763- 0 0 0 0 0 0 0 0 0 0 0 0
47764- 0 0 0 0 0 0 0 0 0 10 10 10
47765- 38 38 38 86 86 86 50 50 50 6 6 6
47766-128 128 128 174 154 114 156 107 11 168 122 10
47767-198 155 10 184 144 12 197 138 11 200 144 11
47768-206 145 10 206 145 10 197 138 11 188 164 115
47769-195 195 195 198 198 198 174 174 174 14 14 14
47770- 2 2 6 22 22 22 116 116 116 116 116 116
47771- 22 22 22 2 2 6 74 74 74 70 70 70
47772- 30 30 30 10 10 10 0 0 0 0 0 0
47773- 0 0 0 0 0 0 0 0 0 0 0 0
47774- 0 0 0 0 0 0 0 0 0 0 0 0
47775- 0 0 0 0 0 0 0 0 0 0 0 0
47776- 0 0 0 0 0 0 0 0 0 0 0 0
47777- 0 0 0 0 0 0 0 0 0 0 0 0
47778- 0 0 0 0 0 0 0 0 0 0 0 0
47779- 0 0 0 0 0 0 0 0 0 0 0 0
47780- 0 0 0 0 0 0 0 0 0 0 0 0
47781- 0 0 0 0 0 0 0 0 0 0 0 0
47782- 0 0 0 0 0 0 0 0 0 0 0 0
47783- 0 0 0 0 0 0 0 0 0 0 0 0
47784- 0 0 0 0 0 0 6 6 6 18 18 18
47785- 50 50 50 101 101 101 26 26 26 10 10 10
47786-138 138 138 190 190 190 174 154 114 156 107 11
47787-197 138 11 200 144 11 197 138 11 192 133 9
47788-180 123 7 190 142 34 190 178 144 187 187 187
47789-202 202 202 221 221 221 214 214 214 66 66 66
47790- 2 2 6 2 2 6 50 50 50 62 62 62
47791- 6 6 6 2 2 6 10 10 10 90 90 90
47792- 50 50 50 18 18 18 6 6 6 0 0 0
47793- 0 0 0 0 0 0 0 0 0 0 0 0
47794- 0 0 0 0 0 0 0 0 0 0 0 0
47795- 0 0 0 0 0 0 0 0 0 0 0 0
47796- 0 0 0 0 0 0 0 0 0 0 0 0
47797- 0 0 0 0 0 0 0 0 0 0 0 0
47798- 0 0 0 0 0 0 0 0 0 0 0 0
47799- 0 0 0 0 0 0 0 0 0 0 0 0
47800- 0 0 0 0 0 0 0 0 0 0 0 0
47801- 0 0 0 0 0 0 0 0 0 0 0 0
47802- 0 0 0 0 0 0 0 0 0 0 0 0
47803- 0 0 0 0 0 0 0 0 0 0 0 0
47804- 0 0 0 0 0 0 10 10 10 34 34 34
47805- 74 74 74 74 74 74 2 2 6 6 6 6
47806-144 144 144 198 198 198 190 190 190 178 166 146
47807-154 121 60 156 107 11 156 107 11 168 124 44
47808-174 154 114 187 187 187 190 190 190 210 210 210
47809-246 246 246 253 253 253 253 253 253 182 182 182
47810- 6 6 6 2 2 6 2 2 6 2 2 6
47811- 2 2 6 2 2 6 2 2 6 62 62 62
47812- 74 74 74 34 34 34 14 14 14 0 0 0
47813- 0 0 0 0 0 0 0 0 0 0 0 0
47814- 0 0 0 0 0 0 0 0 0 0 0 0
47815- 0 0 0 0 0 0 0 0 0 0 0 0
47816- 0 0 0 0 0 0 0 0 0 0 0 0
47817- 0 0 0 0 0 0 0 0 0 0 0 0
47818- 0 0 0 0 0 0 0 0 0 0 0 0
47819- 0 0 0 0 0 0 0 0 0 0 0 0
47820- 0 0 0 0 0 0 0 0 0 0 0 0
47821- 0 0 0 0 0 0 0 0 0 0 0 0
47822- 0 0 0 0 0 0 0 0 0 0 0 0
47823- 0 0 0 0 0 0 0 0 0 0 0 0
47824- 0 0 0 10 10 10 22 22 22 54 54 54
47825- 94 94 94 18 18 18 2 2 6 46 46 46
47826-234 234 234 221 221 221 190 190 190 190 190 190
47827-190 190 190 187 187 187 187 187 187 190 190 190
47828-190 190 190 195 195 195 214 214 214 242 242 242
47829-253 253 253 253 253 253 253 253 253 253 253 253
47830- 82 82 82 2 2 6 2 2 6 2 2 6
47831- 2 2 6 2 2 6 2 2 6 14 14 14
47832- 86 86 86 54 54 54 22 22 22 6 6 6
47833- 0 0 0 0 0 0 0 0 0 0 0 0
47834- 0 0 0 0 0 0 0 0 0 0 0 0
47835- 0 0 0 0 0 0 0 0 0 0 0 0
47836- 0 0 0 0 0 0 0 0 0 0 0 0
47837- 0 0 0 0 0 0 0 0 0 0 0 0
47838- 0 0 0 0 0 0 0 0 0 0 0 0
47839- 0 0 0 0 0 0 0 0 0 0 0 0
47840- 0 0 0 0 0 0 0 0 0 0 0 0
47841- 0 0 0 0 0 0 0 0 0 0 0 0
47842- 0 0 0 0 0 0 0 0 0 0 0 0
47843- 0 0 0 0 0 0 0 0 0 0 0 0
47844- 6 6 6 18 18 18 46 46 46 90 90 90
47845- 46 46 46 18 18 18 6 6 6 182 182 182
47846-253 253 253 246 246 246 206 206 206 190 190 190
47847-190 190 190 190 190 190 190 190 190 190 190 190
47848-206 206 206 231 231 231 250 250 250 253 253 253
47849-253 253 253 253 253 253 253 253 253 253 253 253
47850-202 202 202 14 14 14 2 2 6 2 2 6
47851- 2 2 6 2 2 6 2 2 6 2 2 6
47852- 42 42 42 86 86 86 42 42 42 18 18 18
47853- 6 6 6 0 0 0 0 0 0 0 0 0
47854- 0 0 0 0 0 0 0 0 0 0 0 0
47855- 0 0 0 0 0 0 0 0 0 0 0 0
47856- 0 0 0 0 0 0 0 0 0 0 0 0
47857- 0 0 0 0 0 0 0 0 0 0 0 0
47858- 0 0 0 0 0 0 0 0 0 0 0 0
47859- 0 0 0 0 0 0 0 0 0 0 0 0
47860- 0 0 0 0 0 0 0 0 0 0 0 0
47861- 0 0 0 0 0 0 0 0 0 0 0 0
47862- 0 0 0 0 0 0 0 0 0 0 0 0
47863- 0 0 0 0 0 0 0 0 0 6 6 6
47864- 14 14 14 38 38 38 74 74 74 66 66 66
47865- 2 2 6 6 6 6 90 90 90 250 250 250
47866-253 253 253 253 253 253 238 238 238 198 198 198
47867-190 190 190 190 190 190 195 195 195 221 221 221
47868-246 246 246 253 253 253 253 253 253 253 253 253
47869-253 253 253 253 253 253 253 253 253 253 253 253
47870-253 253 253 82 82 82 2 2 6 2 2 6
47871- 2 2 6 2 2 6 2 2 6 2 2 6
47872- 2 2 6 78 78 78 70 70 70 34 34 34
47873- 14 14 14 6 6 6 0 0 0 0 0 0
47874- 0 0 0 0 0 0 0 0 0 0 0 0
47875- 0 0 0 0 0 0 0 0 0 0 0 0
47876- 0 0 0 0 0 0 0 0 0 0 0 0
47877- 0 0 0 0 0 0 0 0 0 0 0 0
47878- 0 0 0 0 0 0 0 0 0 0 0 0
47879- 0 0 0 0 0 0 0 0 0 0 0 0
47880- 0 0 0 0 0 0 0 0 0 0 0 0
47881- 0 0 0 0 0 0 0 0 0 0 0 0
47882- 0 0 0 0 0 0 0 0 0 0 0 0
47883- 0 0 0 0 0 0 0 0 0 14 14 14
47884- 34 34 34 66 66 66 78 78 78 6 6 6
47885- 2 2 6 18 18 18 218 218 218 253 253 253
47886-253 253 253 253 253 253 253 253 253 246 246 246
47887-226 226 226 231 231 231 246 246 246 253 253 253
47888-253 253 253 253 253 253 253 253 253 253 253 253
47889-253 253 253 253 253 253 253 253 253 253 253 253
47890-253 253 253 178 178 178 2 2 6 2 2 6
47891- 2 2 6 2 2 6 2 2 6 2 2 6
47892- 2 2 6 18 18 18 90 90 90 62 62 62
47893- 30 30 30 10 10 10 0 0 0 0 0 0
47894- 0 0 0 0 0 0 0 0 0 0 0 0
47895- 0 0 0 0 0 0 0 0 0 0 0 0
47896- 0 0 0 0 0 0 0 0 0 0 0 0
47897- 0 0 0 0 0 0 0 0 0 0 0 0
47898- 0 0 0 0 0 0 0 0 0 0 0 0
47899- 0 0 0 0 0 0 0 0 0 0 0 0
47900- 0 0 0 0 0 0 0 0 0 0 0 0
47901- 0 0 0 0 0 0 0 0 0 0 0 0
47902- 0 0 0 0 0 0 0 0 0 0 0 0
47903- 0 0 0 0 0 0 10 10 10 26 26 26
47904- 58 58 58 90 90 90 18 18 18 2 2 6
47905- 2 2 6 110 110 110 253 253 253 253 253 253
47906-253 253 253 253 253 253 253 253 253 253 253 253
47907-250 250 250 253 253 253 253 253 253 253 253 253
47908-253 253 253 253 253 253 253 253 253 253 253 253
47909-253 253 253 253 253 253 253 253 253 253 253 253
47910-253 253 253 231 231 231 18 18 18 2 2 6
47911- 2 2 6 2 2 6 2 2 6 2 2 6
47912- 2 2 6 2 2 6 18 18 18 94 94 94
47913- 54 54 54 26 26 26 10 10 10 0 0 0
47914- 0 0 0 0 0 0 0 0 0 0 0 0
47915- 0 0 0 0 0 0 0 0 0 0 0 0
47916- 0 0 0 0 0 0 0 0 0 0 0 0
47917- 0 0 0 0 0 0 0 0 0 0 0 0
47918- 0 0 0 0 0 0 0 0 0 0 0 0
47919- 0 0 0 0 0 0 0 0 0 0 0 0
47920- 0 0 0 0 0 0 0 0 0 0 0 0
47921- 0 0 0 0 0 0 0 0 0 0 0 0
47922- 0 0 0 0 0 0 0 0 0 0 0 0
47923- 0 0 0 6 6 6 22 22 22 50 50 50
47924- 90 90 90 26 26 26 2 2 6 2 2 6
47925- 14 14 14 195 195 195 250 250 250 253 253 253
47926-253 253 253 253 253 253 253 253 253 253 253 253
47927-253 253 253 253 253 253 253 253 253 253 253 253
47928-253 253 253 253 253 253 253 253 253 253 253 253
47929-253 253 253 253 253 253 253 253 253 253 253 253
47930-250 250 250 242 242 242 54 54 54 2 2 6
47931- 2 2 6 2 2 6 2 2 6 2 2 6
47932- 2 2 6 2 2 6 2 2 6 38 38 38
47933- 86 86 86 50 50 50 22 22 22 6 6 6
47934- 0 0 0 0 0 0 0 0 0 0 0 0
47935- 0 0 0 0 0 0 0 0 0 0 0 0
47936- 0 0 0 0 0 0 0 0 0 0 0 0
47937- 0 0 0 0 0 0 0 0 0 0 0 0
47938- 0 0 0 0 0 0 0 0 0 0 0 0
47939- 0 0 0 0 0 0 0 0 0 0 0 0
47940- 0 0 0 0 0 0 0 0 0 0 0 0
47941- 0 0 0 0 0 0 0 0 0 0 0 0
47942- 0 0 0 0 0 0 0 0 0 0 0 0
47943- 6 6 6 14 14 14 38 38 38 82 82 82
47944- 34 34 34 2 2 6 2 2 6 2 2 6
47945- 42 42 42 195 195 195 246 246 246 253 253 253
47946-253 253 253 253 253 253 253 253 253 250 250 250
47947-242 242 242 242 242 242 250 250 250 253 253 253
47948-253 253 253 253 253 253 253 253 253 253 253 253
47949-253 253 253 250 250 250 246 246 246 238 238 238
47950-226 226 226 231 231 231 101 101 101 6 6 6
47951- 2 2 6 2 2 6 2 2 6 2 2 6
47952- 2 2 6 2 2 6 2 2 6 2 2 6
47953- 38 38 38 82 82 82 42 42 42 14 14 14
47954- 6 6 6 0 0 0 0 0 0 0 0 0
47955- 0 0 0 0 0 0 0 0 0 0 0 0
47956- 0 0 0 0 0 0 0 0 0 0 0 0
47957- 0 0 0 0 0 0 0 0 0 0 0 0
47958- 0 0 0 0 0 0 0 0 0 0 0 0
47959- 0 0 0 0 0 0 0 0 0 0 0 0
47960- 0 0 0 0 0 0 0 0 0 0 0 0
47961- 0 0 0 0 0 0 0 0 0 0 0 0
47962- 0 0 0 0 0 0 0 0 0 0 0 0
47963- 10 10 10 26 26 26 62 62 62 66 66 66
47964- 2 2 6 2 2 6 2 2 6 6 6 6
47965- 70 70 70 170 170 170 206 206 206 234 234 234
47966-246 246 246 250 250 250 250 250 250 238 238 238
47967-226 226 226 231 231 231 238 238 238 250 250 250
47968-250 250 250 250 250 250 246 246 246 231 231 231
47969-214 214 214 206 206 206 202 202 202 202 202 202
47970-198 198 198 202 202 202 182 182 182 18 18 18
47971- 2 2 6 2 2 6 2 2 6 2 2 6
47972- 2 2 6 2 2 6 2 2 6 2 2 6
47973- 2 2 6 62 62 62 66 66 66 30 30 30
47974- 10 10 10 0 0 0 0 0 0 0 0 0
47975- 0 0 0 0 0 0 0 0 0 0 0 0
47976- 0 0 0 0 0 0 0 0 0 0 0 0
47977- 0 0 0 0 0 0 0 0 0 0 0 0
47978- 0 0 0 0 0 0 0 0 0 0 0 0
47979- 0 0 0 0 0 0 0 0 0 0 0 0
47980- 0 0 0 0 0 0 0 0 0 0 0 0
47981- 0 0 0 0 0 0 0 0 0 0 0 0
47982- 0 0 0 0 0 0 0 0 0 0 0 0
47983- 14 14 14 42 42 42 82 82 82 18 18 18
47984- 2 2 6 2 2 6 2 2 6 10 10 10
47985- 94 94 94 182 182 182 218 218 218 242 242 242
47986-250 250 250 253 253 253 253 253 253 250 250 250
47987-234 234 234 253 253 253 253 253 253 253 253 253
47988-253 253 253 253 253 253 253 253 253 246 246 246
47989-238 238 238 226 226 226 210 210 210 202 202 202
47990-195 195 195 195 195 195 210 210 210 158 158 158
47991- 6 6 6 14 14 14 50 50 50 14 14 14
47992- 2 2 6 2 2 6 2 2 6 2 2 6
47993- 2 2 6 6 6 6 86 86 86 46 46 46
47994- 18 18 18 6 6 6 0 0 0 0 0 0
47995- 0 0 0 0 0 0 0 0 0 0 0 0
47996- 0 0 0 0 0 0 0 0 0 0 0 0
47997- 0 0 0 0 0 0 0 0 0 0 0 0
47998- 0 0 0 0 0 0 0 0 0 0 0 0
47999- 0 0 0 0 0 0 0 0 0 0 0 0
48000- 0 0 0 0 0 0 0 0 0 0 0 0
48001- 0 0 0 0 0 0 0 0 0 0 0 0
48002- 0 0 0 0 0 0 0 0 0 6 6 6
48003- 22 22 22 54 54 54 70 70 70 2 2 6
48004- 2 2 6 10 10 10 2 2 6 22 22 22
48005-166 166 166 231 231 231 250 250 250 253 253 253
48006-253 253 253 253 253 253 253 253 253 250 250 250
48007-242 242 242 253 253 253 253 253 253 253 253 253
48008-253 253 253 253 253 253 253 253 253 253 253 253
48009-253 253 253 253 253 253 253 253 253 246 246 246
48010-231 231 231 206 206 206 198 198 198 226 226 226
48011- 94 94 94 2 2 6 6 6 6 38 38 38
48012- 30 30 30 2 2 6 2 2 6 2 2 6
48013- 2 2 6 2 2 6 62 62 62 66 66 66
48014- 26 26 26 10 10 10 0 0 0 0 0 0
48015- 0 0 0 0 0 0 0 0 0 0 0 0
48016- 0 0 0 0 0 0 0 0 0 0 0 0
48017- 0 0 0 0 0 0 0 0 0 0 0 0
48018- 0 0 0 0 0 0 0 0 0 0 0 0
48019- 0 0 0 0 0 0 0 0 0 0 0 0
48020- 0 0 0 0 0 0 0 0 0 0 0 0
48021- 0 0 0 0 0 0 0 0 0 0 0 0
48022- 0 0 0 0 0 0 0 0 0 10 10 10
48023- 30 30 30 74 74 74 50 50 50 2 2 6
48024- 26 26 26 26 26 26 2 2 6 106 106 106
48025-238 238 238 253 253 253 253 253 253 253 253 253
48026-253 253 253 253 253 253 253 253 253 253 253 253
48027-253 253 253 253 253 253 253 253 253 253 253 253
48028-253 253 253 253 253 253 253 253 253 253 253 253
48029-253 253 253 253 253 253 253 253 253 253 253 253
48030-253 253 253 246 246 246 218 218 218 202 202 202
48031-210 210 210 14 14 14 2 2 6 2 2 6
48032- 30 30 30 22 22 22 2 2 6 2 2 6
48033- 2 2 6 2 2 6 18 18 18 86 86 86
48034- 42 42 42 14 14 14 0 0 0 0 0 0
48035- 0 0 0 0 0 0 0 0 0 0 0 0
48036- 0 0 0 0 0 0 0 0 0 0 0 0
48037- 0 0 0 0 0 0 0 0 0 0 0 0
48038- 0 0 0 0 0 0 0 0 0 0 0 0
48039- 0 0 0 0 0 0 0 0 0 0 0 0
48040- 0 0 0 0 0 0 0 0 0 0 0 0
48041- 0 0 0 0 0 0 0 0 0 0 0 0
48042- 0 0 0 0 0 0 0 0 0 14 14 14
48043- 42 42 42 90 90 90 22 22 22 2 2 6
48044- 42 42 42 2 2 6 18 18 18 218 218 218
48045-253 253 253 253 253 253 253 253 253 253 253 253
48046-253 253 253 253 253 253 253 253 253 253 253 253
48047-253 253 253 253 253 253 253 253 253 253 253 253
48048-253 253 253 253 253 253 253 253 253 253 253 253
48049-253 253 253 253 253 253 253 253 253 253 253 253
48050-253 253 253 253 253 253 250 250 250 221 221 221
48051-218 218 218 101 101 101 2 2 6 14 14 14
48052- 18 18 18 38 38 38 10 10 10 2 2 6
48053- 2 2 6 2 2 6 2 2 6 78 78 78
48054- 58 58 58 22 22 22 6 6 6 0 0 0
48055- 0 0 0 0 0 0 0 0 0 0 0 0
48056- 0 0 0 0 0 0 0 0 0 0 0 0
48057- 0 0 0 0 0 0 0 0 0 0 0 0
48058- 0 0 0 0 0 0 0 0 0 0 0 0
48059- 0 0 0 0 0 0 0 0 0 0 0 0
48060- 0 0 0 0 0 0 0 0 0 0 0 0
48061- 0 0 0 0 0 0 0 0 0 0 0 0
48062- 0 0 0 0 0 0 6 6 6 18 18 18
48063- 54 54 54 82 82 82 2 2 6 26 26 26
48064- 22 22 22 2 2 6 123 123 123 253 253 253
48065-253 253 253 253 253 253 253 253 253 253 253 253
48066-253 253 253 253 253 253 253 253 253 253 253 253
48067-253 253 253 253 253 253 253 253 253 253 253 253
48068-253 253 253 253 253 253 253 253 253 253 253 253
48069-253 253 253 253 253 253 253 253 253 253 253 253
48070-253 253 253 253 253 253 253 253 253 250 250 250
48071-238 238 238 198 198 198 6 6 6 38 38 38
48072- 58 58 58 26 26 26 38 38 38 2 2 6
48073- 2 2 6 2 2 6 2 2 6 46 46 46
48074- 78 78 78 30 30 30 10 10 10 0 0 0
48075- 0 0 0 0 0 0 0 0 0 0 0 0
48076- 0 0 0 0 0 0 0 0 0 0 0 0
48077- 0 0 0 0 0 0 0 0 0 0 0 0
48078- 0 0 0 0 0 0 0 0 0 0 0 0
48079- 0 0 0 0 0 0 0 0 0 0 0 0
48080- 0 0 0 0 0 0 0 0 0 0 0 0
48081- 0 0 0 0 0 0 0 0 0 0 0 0
48082- 0 0 0 0 0 0 10 10 10 30 30 30
48083- 74 74 74 58 58 58 2 2 6 42 42 42
48084- 2 2 6 22 22 22 231 231 231 253 253 253
48085-253 253 253 253 253 253 253 253 253 253 253 253
48086-253 253 253 253 253 253 253 253 253 250 250 250
48087-253 253 253 253 253 253 253 253 253 253 253 253
48088-253 253 253 253 253 253 253 253 253 253 253 253
48089-253 253 253 253 253 253 253 253 253 253 253 253
48090-253 253 253 253 253 253 253 253 253 253 253 253
48091-253 253 253 246 246 246 46 46 46 38 38 38
48092- 42 42 42 14 14 14 38 38 38 14 14 14
48093- 2 2 6 2 2 6 2 2 6 6 6 6
48094- 86 86 86 46 46 46 14 14 14 0 0 0
48095- 0 0 0 0 0 0 0 0 0 0 0 0
48096- 0 0 0 0 0 0 0 0 0 0 0 0
48097- 0 0 0 0 0 0 0 0 0 0 0 0
48098- 0 0 0 0 0 0 0 0 0 0 0 0
48099- 0 0 0 0 0 0 0 0 0 0 0 0
48100- 0 0 0 0 0 0 0 0 0 0 0 0
48101- 0 0 0 0 0 0 0 0 0 0 0 0
48102- 0 0 0 6 6 6 14 14 14 42 42 42
48103- 90 90 90 18 18 18 18 18 18 26 26 26
48104- 2 2 6 116 116 116 253 253 253 253 253 253
48105-253 253 253 253 253 253 253 253 253 253 253 253
48106-253 253 253 253 253 253 250 250 250 238 238 238
48107-253 253 253 253 253 253 253 253 253 253 253 253
48108-253 253 253 253 253 253 253 253 253 253 253 253
48109-253 253 253 253 253 253 253 253 253 253 253 253
48110-253 253 253 253 253 253 253 253 253 253 253 253
48111-253 253 253 253 253 253 94 94 94 6 6 6
48112- 2 2 6 2 2 6 10 10 10 34 34 34
48113- 2 2 6 2 2 6 2 2 6 2 2 6
48114- 74 74 74 58 58 58 22 22 22 6 6 6
48115- 0 0 0 0 0 0 0 0 0 0 0 0
48116- 0 0 0 0 0 0 0 0 0 0 0 0
48117- 0 0 0 0 0 0 0 0 0 0 0 0
48118- 0 0 0 0 0 0 0 0 0 0 0 0
48119- 0 0 0 0 0 0 0 0 0 0 0 0
48120- 0 0 0 0 0 0 0 0 0 0 0 0
48121- 0 0 0 0 0 0 0 0 0 0 0 0
48122- 0 0 0 10 10 10 26 26 26 66 66 66
48123- 82 82 82 2 2 6 38 38 38 6 6 6
48124- 14 14 14 210 210 210 253 253 253 253 253 253
48125-253 253 253 253 253 253 253 253 253 253 253 253
48126-253 253 253 253 253 253 246 246 246 242 242 242
48127-253 253 253 253 253 253 253 253 253 253 253 253
48128-253 253 253 253 253 253 253 253 253 253 253 253
48129-253 253 253 253 253 253 253 253 253 253 253 253
48130-253 253 253 253 253 253 253 253 253 253 253 253
48131-253 253 253 253 253 253 144 144 144 2 2 6
48132- 2 2 6 2 2 6 2 2 6 46 46 46
48133- 2 2 6 2 2 6 2 2 6 2 2 6
48134- 42 42 42 74 74 74 30 30 30 10 10 10
48135- 0 0 0 0 0 0 0 0 0 0 0 0
48136- 0 0 0 0 0 0 0 0 0 0 0 0
48137- 0 0 0 0 0 0 0 0 0 0 0 0
48138- 0 0 0 0 0 0 0 0 0 0 0 0
48139- 0 0 0 0 0 0 0 0 0 0 0 0
48140- 0 0 0 0 0 0 0 0 0 0 0 0
48141- 0 0 0 0 0 0 0 0 0 0 0 0
48142- 6 6 6 14 14 14 42 42 42 90 90 90
48143- 26 26 26 6 6 6 42 42 42 2 2 6
48144- 74 74 74 250 250 250 253 253 253 253 253 253
48145-253 253 253 253 253 253 253 253 253 253 253 253
48146-253 253 253 253 253 253 242 242 242 242 242 242
48147-253 253 253 253 253 253 253 253 253 253 253 253
48148-253 253 253 253 253 253 253 253 253 253 253 253
48149-253 253 253 253 253 253 253 253 253 253 253 253
48150-253 253 253 253 253 253 253 253 253 253 253 253
48151-253 253 253 253 253 253 182 182 182 2 2 6
48152- 2 2 6 2 2 6 2 2 6 46 46 46
48153- 2 2 6 2 2 6 2 2 6 2 2 6
48154- 10 10 10 86 86 86 38 38 38 10 10 10
48155- 0 0 0 0 0 0 0 0 0 0 0 0
48156- 0 0 0 0 0 0 0 0 0 0 0 0
48157- 0 0 0 0 0 0 0 0 0 0 0 0
48158- 0 0 0 0 0 0 0 0 0 0 0 0
48159- 0 0 0 0 0 0 0 0 0 0 0 0
48160- 0 0 0 0 0 0 0 0 0 0 0 0
48161- 0 0 0 0 0 0 0 0 0 0 0 0
48162- 10 10 10 26 26 26 66 66 66 82 82 82
48163- 2 2 6 22 22 22 18 18 18 2 2 6
48164-149 149 149 253 253 253 253 253 253 253 253 253
48165-253 253 253 253 253 253 253 253 253 253 253 253
48166-253 253 253 253 253 253 234 234 234 242 242 242
48167-253 253 253 253 253 253 253 253 253 253 253 253
48168-253 253 253 253 253 253 253 253 253 253 253 253
48169-253 253 253 253 253 253 253 253 253 253 253 253
48170-253 253 253 253 253 253 253 253 253 253 253 253
48171-253 253 253 253 253 253 206 206 206 2 2 6
48172- 2 2 6 2 2 6 2 2 6 38 38 38
48173- 2 2 6 2 2 6 2 2 6 2 2 6
48174- 6 6 6 86 86 86 46 46 46 14 14 14
48175- 0 0 0 0 0 0 0 0 0 0 0 0
48176- 0 0 0 0 0 0 0 0 0 0 0 0
48177- 0 0 0 0 0 0 0 0 0 0 0 0
48178- 0 0 0 0 0 0 0 0 0 0 0 0
48179- 0 0 0 0 0 0 0 0 0 0 0 0
48180- 0 0 0 0 0 0 0 0 0 0 0 0
48181- 0 0 0 0 0 0 0 0 0 6 6 6
48182- 18 18 18 46 46 46 86 86 86 18 18 18
48183- 2 2 6 34 34 34 10 10 10 6 6 6
48184-210 210 210 253 253 253 253 253 253 253 253 253
48185-253 253 253 253 253 253 253 253 253 253 253 253
48186-253 253 253 253 253 253 234 234 234 242 242 242
48187-253 253 253 253 253 253 253 253 253 253 253 253
48188-253 253 253 253 253 253 253 253 253 253 253 253
48189-253 253 253 253 253 253 253 253 253 253 253 253
48190-253 253 253 253 253 253 253 253 253 253 253 253
48191-253 253 253 253 253 253 221 221 221 6 6 6
48192- 2 2 6 2 2 6 6 6 6 30 30 30
48193- 2 2 6 2 2 6 2 2 6 2 2 6
48194- 2 2 6 82 82 82 54 54 54 18 18 18
48195- 6 6 6 0 0 0 0 0 0 0 0 0
48196- 0 0 0 0 0 0 0 0 0 0 0 0
48197- 0 0 0 0 0 0 0 0 0 0 0 0
48198- 0 0 0 0 0 0 0 0 0 0 0 0
48199- 0 0 0 0 0 0 0 0 0 0 0 0
48200- 0 0 0 0 0 0 0 0 0 0 0 0
48201- 0 0 0 0 0 0 0 0 0 10 10 10
48202- 26 26 26 66 66 66 62 62 62 2 2 6
48203- 2 2 6 38 38 38 10 10 10 26 26 26
48204-238 238 238 253 253 253 253 253 253 253 253 253
48205-253 253 253 253 253 253 253 253 253 253 253 253
48206-253 253 253 253 253 253 231 231 231 238 238 238
48207-253 253 253 253 253 253 253 253 253 253 253 253
48208-253 253 253 253 253 253 253 253 253 253 253 253
48209-253 253 253 253 253 253 253 253 253 253 253 253
48210-253 253 253 253 253 253 253 253 253 253 253 253
48211-253 253 253 253 253 253 231 231 231 6 6 6
48212- 2 2 6 2 2 6 10 10 10 30 30 30
48213- 2 2 6 2 2 6 2 2 6 2 2 6
48214- 2 2 6 66 66 66 58 58 58 22 22 22
48215- 6 6 6 0 0 0 0 0 0 0 0 0
48216- 0 0 0 0 0 0 0 0 0 0 0 0
48217- 0 0 0 0 0 0 0 0 0 0 0 0
48218- 0 0 0 0 0 0 0 0 0 0 0 0
48219- 0 0 0 0 0 0 0 0 0 0 0 0
48220- 0 0 0 0 0 0 0 0 0 0 0 0
48221- 0 0 0 0 0 0 0 0 0 10 10 10
48222- 38 38 38 78 78 78 6 6 6 2 2 6
48223- 2 2 6 46 46 46 14 14 14 42 42 42
48224-246 246 246 253 253 253 253 253 253 253 253 253
48225-253 253 253 253 253 253 253 253 253 253 253 253
48226-253 253 253 253 253 253 231 231 231 242 242 242
48227-253 253 253 253 253 253 253 253 253 253 253 253
48228-253 253 253 253 253 253 253 253 253 253 253 253
48229-253 253 253 253 253 253 253 253 253 253 253 253
48230-253 253 253 253 253 253 253 253 253 253 253 253
48231-253 253 253 253 253 253 234 234 234 10 10 10
48232- 2 2 6 2 2 6 22 22 22 14 14 14
48233- 2 2 6 2 2 6 2 2 6 2 2 6
48234- 2 2 6 66 66 66 62 62 62 22 22 22
48235- 6 6 6 0 0 0 0 0 0 0 0 0
48236- 0 0 0 0 0 0 0 0 0 0 0 0
48237- 0 0 0 0 0 0 0 0 0 0 0 0
48238- 0 0 0 0 0 0 0 0 0 0 0 0
48239- 0 0 0 0 0 0 0 0 0 0 0 0
48240- 0 0 0 0 0 0 0 0 0 0 0 0
48241- 0 0 0 0 0 0 6 6 6 18 18 18
48242- 50 50 50 74 74 74 2 2 6 2 2 6
48243- 14 14 14 70 70 70 34 34 34 62 62 62
48244-250 250 250 253 253 253 253 253 253 253 253 253
48245-253 253 253 253 253 253 253 253 253 253 253 253
48246-253 253 253 253 253 253 231 231 231 246 246 246
48247-253 253 253 253 253 253 253 253 253 253 253 253
48248-253 253 253 253 253 253 253 253 253 253 253 253
48249-253 253 253 253 253 253 253 253 253 253 253 253
48250-253 253 253 253 253 253 253 253 253 253 253 253
48251-253 253 253 253 253 253 234 234 234 14 14 14
48252- 2 2 6 2 2 6 30 30 30 2 2 6
48253- 2 2 6 2 2 6 2 2 6 2 2 6
48254- 2 2 6 66 66 66 62 62 62 22 22 22
48255- 6 6 6 0 0 0 0 0 0 0 0 0
48256- 0 0 0 0 0 0 0 0 0 0 0 0
48257- 0 0 0 0 0 0 0 0 0 0 0 0
48258- 0 0 0 0 0 0 0 0 0 0 0 0
48259- 0 0 0 0 0 0 0 0 0 0 0 0
48260- 0 0 0 0 0 0 0 0 0 0 0 0
48261- 0 0 0 0 0 0 6 6 6 18 18 18
48262- 54 54 54 62 62 62 2 2 6 2 2 6
48263- 2 2 6 30 30 30 46 46 46 70 70 70
48264-250 250 250 253 253 253 253 253 253 253 253 253
48265-253 253 253 253 253 253 253 253 253 253 253 253
48266-253 253 253 253 253 253 231 231 231 246 246 246
48267-253 253 253 253 253 253 253 253 253 253 253 253
48268-253 253 253 253 253 253 253 253 253 253 253 253
48269-253 253 253 253 253 253 253 253 253 253 253 253
48270-253 253 253 253 253 253 253 253 253 253 253 253
48271-253 253 253 253 253 253 226 226 226 10 10 10
48272- 2 2 6 6 6 6 30 30 30 2 2 6
48273- 2 2 6 2 2 6 2 2 6 2 2 6
48274- 2 2 6 66 66 66 58 58 58 22 22 22
48275- 6 6 6 0 0 0 0 0 0 0 0 0
48276- 0 0 0 0 0 0 0 0 0 0 0 0
48277- 0 0 0 0 0 0 0 0 0 0 0 0
48278- 0 0 0 0 0 0 0 0 0 0 0 0
48279- 0 0 0 0 0 0 0 0 0 0 0 0
48280- 0 0 0 0 0 0 0 0 0 0 0 0
48281- 0 0 0 0 0 0 6 6 6 22 22 22
48282- 58 58 58 62 62 62 2 2 6 2 2 6
48283- 2 2 6 2 2 6 30 30 30 78 78 78
48284-250 250 250 253 253 253 253 253 253 253 253 253
48285-253 253 253 253 253 253 253 253 253 253 253 253
48286-253 253 253 253 253 253 231 231 231 246 246 246
48287-253 253 253 253 253 253 253 253 253 253 253 253
48288-253 253 253 253 253 253 253 253 253 253 253 253
48289-253 253 253 253 253 253 253 253 253 253 253 253
48290-253 253 253 253 253 253 253 253 253 253 253 253
48291-253 253 253 253 253 253 206 206 206 2 2 6
48292- 22 22 22 34 34 34 18 14 6 22 22 22
48293- 26 26 26 18 18 18 6 6 6 2 2 6
48294- 2 2 6 82 82 82 54 54 54 18 18 18
48295- 6 6 6 0 0 0 0 0 0 0 0 0
48296- 0 0 0 0 0 0 0 0 0 0 0 0
48297- 0 0 0 0 0 0 0 0 0 0 0 0
48298- 0 0 0 0 0 0 0 0 0 0 0 0
48299- 0 0 0 0 0 0 0 0 0 0 0 0
48300- 0 0 0 0 0 0 0 0 0 0 0 0
48301- 0 0 0 0 0 0 6 6 6 26 26 26
48302- 62 62 62 106 106 106 74 54 14 185 133 11
48303-210 162 10 121 92 8 6 6 6 62 62 62
48304-238 238 238 253 253 253 253 253 253 253 253 253
48305-253 253 253 253 253 253 253 253 253 253 253 253
48306-253 253 253 253 253 253 231 231 231 246 246 246
48307-253 253 253 253 253 253 253 253 253 253 253 253
48308-253 253 253 253 253 253 253 253 253 253 253 253
48309-253 253 253 253 253 253 253 253 253 253 253 253
48310-253 253 253 253 253 253 253 253 253 253 253 253
48311-253 253 253 253 253 253 158 158 158 18 18 18
48312- 14 14 14 2 2 6 2 2 6 2 2 6
48313- 6 6 6 18 18 18 66 66 66 38 38 38
48314- 6 6 6 94 94 94 50 50 50 18 18 18
48315- 6 6 6 0 0 0 0 0 0 0 0 0
48316- 0 0 0 0 0 0 0 0 0 0 0 0
48317- 0 0 0 0 0 0 0 0 0 0 0 0
48318- 0 0 0 0 0 0 0 0 0 0 0 0
48319- 0 0 0 0 0 0 0 0 0 0 0 0
48320- 0 0 0 0 0 0 0 0 0 6 6 6
48321- 10 10 10 10 10 10 18 18 18 38 38 38
48322- 78 78 78 142 134 106 216 158 10 242 186 14
48323-246 190 14 246 190 14 156 118 10 10 10 10
48324- 90 90 90 238 238 238 253 253 253 253 253 253
48325-253 253 253 253 253 253 253 253 253 253 253 253
48326-253 253 253 253 253 253 231 231 231 250 250 250
48327-253 253 253 253 253 253 253 253 253 253 253 253
48328-253 253 253 253 253 253 253 253 253 253 253 253
48329-253 253 253 253 253 253 253 253 253 253 253 253
48330-253 253 253 253 253 253 253 253 253 246 230 190
48331-238 204 91 238 204 91 181 142 44 37 26 9
48332- 2 2 6 2 2 6 2 2 6 2 2 6
48333- 2 2 6 2 2 6 38 38 38 46 46 46
48334- 26 26 26 106 106 106 54 54 54 18 18 18
48335- 6 6 6 0 0 0 0 0 0 0 0 0
48336- 0 0 0 0 0 0 0 0 0 0 0 0
48337- 0 0 0 0 0 0 0 0 0 0 0 0
48338- 0 0 0 0 0 0 0 0 0 0 0 0
48339- 0 0 0 0 0 0 0 0 0 0 0 0
48340- 0 0 0 6 6 6 14 14 14 22 22 22
48341- 30 30 30 38 38 38 50 50 50 70 70 70
48342-106 106 106 190 142 34 226 170 11 242 186 14
48343-246 190 14 246 190 14 246 190 14 154 114 10
48344- 6 6 6 74 74 74 226 226 226 253 253 253
48345-253 253 253 253 253 253 253 253 253 253 253 253
48346-253 253 253 253 253 253 231 231 231 250 250 250
48347-253 253 253 253 253 253 253 253 253 253 253 253
48348-253 253 253 253 253 253 253 253 253 253 253 253
48349-253 253 253 253 253 253 253 253 253 253 253 253
48350-253 253 253 253 253 253 253 253 253 228 184 62
48351-241 196 14 241 208 19 232 195 16 38 30 10
48352- 2 2 6 2 2 6 2 2 6 2 2 6
48353- 2 2 6 6 6 6 30 30 30 26 26 26
48354-203 166 17 154 142 90 66 66 66 26 26 26
48355- 6 6 6 0 0 0 0 0 0 0 0 0
48356- 0 0 0 0 0 0 0 0 0 0 0 0
48357- 0 0 0 0 0 0 0 0 0 0 0 0
48358- 0 0 0 0 0 0 0 0 0 0 0 0
48359- 0 0 0 0 0 0 0 0 0 0 0 0
48360- 6 6 6 18 18 18 38 38 38 58 58 58
48361- 78 78 78 86 86 86 101 101 101 123 123 123
48362-175 146 61 210 150 10 234 174 13 246 186 14
48363-246 190 14 246 190 14 246 190 14 238 190 10
48364-102 78 10 2 2 6 46 46 46 198 198 198
48365-253 253 253 253 253 253 253 253 253 253 253 253
48366-253 253 253 253 253 253 234 234 234 242 242 242
48367-253 253 253 253 253 253 253 253 253 253 253 253
48368-253 253 253 253 253 253 253 253 253 253 253 253
48369-253 253 253 253 253 253 253 253 253 253 253 253
48370-253 253 253 253 253 253 253 253 253 224 178 62
48371-242 186 14 241 196 14 210 166 10 22 18 6
48372- 2 2 6 2 2 6 2 2 6 2 2 6
48373- 2 2 6 2 2 6 6 6 6 121 92 8
48374-238 202 15 232 195 16 82 82 82 34 34 34
48375- 10 10 10 0 0 0 0 0 0 0 0 0
48376- 0 0 0 0 0 0 0 0 0 0 0 0
48377- 0 0 0 0 0 0 0 0 0 0 0 0
48378- 0 0 0 0 0 0 0 0 0 0 0 0
48379- 0 0 0 0 0 0 0 0 0 0 0 0
48380- 14 14 14 38 38 38 70 70 70 154 122 46
48381-190 142 34 200 144 11 197 138 11 197 138 11
48382-213 154 11 226 170 11 242 186 14 246 190 14
48383-246 190 14 246 190 14 246 190 14 246 190 14
48384-225 175 15 46 32 6 2 2 6 22 22 22
48385-158 158 158 250 250 250 253 253 253 253 253 253
48386-253 253 253 253 253 253 253 253 253 253 253 253
48387-253 253 253 253 253 253 253 253 253 253 253 253
48388-253 253 253 253 253 253 253 253 253 253 253 253
48389-253 253 253 253 253 253 253 253 253 253 253 253
48390-253 253 253 250 250 250 242 242 242 224 178 62
48391-239 182 13 236 186 11 213 154 11 46 32 6
48392- 2 2 6 2 2 6 2 2 6 2 2 6
48393- 2 2 6 2 2 6 61 42 6 225 175 15
48394-238 190 10 236 186 11 112 100 78 42 42 42
48395- 14 14 14 0 0 0 0 0 0 0 0 0
48396- 0 0 0 0 0 0 0 0 0 0 0 0
48397- 0 0 0 0 0 0 0 0 0 0 0 0
48398- 0 0 0 0 0 0 0 0 0 0 0 0
48399- 0 0 0 0 0 0 0 0 0 6 6 6
48400- 22 22 22 54 54 54 154 122 46 213 154 11
48401-226 170 11 230 174 11 226 170 11 226 170 11
48402-236 178 12 242 186 14 246 190 14 246 190 14
48403-246 190 14 246 190 14 246 190 14 246 190 14
48404-241 196 14 184 144 12 10 10 10 2 2 6
48405- 6 6 6 116 116 116 242 242 242 253 253 253
48406-253 253 253 253 253 253 253 253 253 253 253 253
48407-253 253 253 253 253 253 253 253 253 253 253 253
48408-253 253 253 253 253 253 253 253 253 253 253 253
48409-253 253 253 253 253 253 253 253 253 253 253 253
48410-253 253 253 231 231 231 198 198 198 214 170 54
48411-236 178 12 236 178 12 210 150 10 137 92 6
48412- 18 14 6 2 2 6 2 2 6 2 2 6
48413- 6 6 6 70 47 6 200 144 11 236 178 12
48414-239 182 13 239 182 13 124 112 88 58 58 58
48415- 22 22 22 6 6 6 0 0 0 0 0 0
48416- 0 0 0 0 0 0 0 0 0 0 0 0
48417- 0 0 0 0 0 0 0 0 0 0 0 0
48418- 0 0 0 0 0 0 0 0 0 0 0 0
48419- 0 0 0 0 0 0 0 0 0 10 10 10
48420- 30 30 30 70 70 70 180 133 36 226 170 11
48421-239 182 13 242 186 14 242 186 14 246 186 14
48422-246 190 14 246 190 14 246 190 14 246 190 14
48423-246 190 14 246 190 14 246 190 14 246 190 14
48424-246 190 14 232 195 16 98 70 6 2 2 6
48425- 2 2 6 2 2 6 66 66 66 221 221 221
48426-253 253 253 253 253 253 253 253 253 253 253 253
48427-253 253 253 253 253 253 253 253 253 253 253 253
48428-253 253 253 253 253 253 253 253 253 253 253 253
48429-253 253 253 253 253 253 253 253 253 253 253 253
48430-253 253 253 206 206 206 198 198 198 214 166 58
48431-230 174 11 230 174 11 216 158 10 192 133 9
48432-163 110 8 116 81 8 102 78 10 116 81 8
48433-167 114 7 197 138 11 226 170 11 239 182 13
48434-242 186 14 242 186 14 162 146 94 78 78 78
48435- 34 34 34 14 14 14 6 6 6 0 0 0
48436- 0 0 0 0 0 0 0 0 0 0 0 0
48437- 0 0 0 0 0 0 0 0 0 0 0 0
48438- 0 0 0 0 0 0 0 0 0 0 0 0
48439- 0 0 0 0 0 0 0 0 0 6 6 6
48440- 30 30 30 78 78 78 190 142 34 226 170 11
48441-239 182 13 246 190 14 246 190 14 246 190 14
48442-246 190 14 246 190 14 246 190 14 246 190 14
48443-246 190 14 246 190 14 246 190 14 246 190 14
48444-246 190 14 241 196 14 203 166 17 22 18 6
48445- 2 2 6 2 2 6 2 2 6 38 38 38
48446-218 218 218 253 253 253 253 253 253 253 253 253
48447-253 253 253 253 253 253 253 253 253 253 253 253
48448-253 253 253 253 253 253 253 253 253 253 253 253
48449-253 253 253 253 253 253 253 253 253 253 253 253
48450-250 250 250 206 206 206 198 198 198 202 162 69
48451-226 170 11 236 178 12 224 166 10 210 150 10
48452-200 144 11 197 138 11 192 133 9 197 138 11
48453-210 150 10 226 170 11 242 186 14 246 190 14
48454-246 190 14 246 186 14 225 175 15 124 112 88
48455- 62 62 62 30 30 30 14 14 14 6 6 6
48456- 0 0 0 0 0 0 0 0 0 0 0 0
48457- 0 0 0 0 0 0 0 0 0 0 0 0
48458- 0 0 0 0 0 0 0 0 0 0 0 0
48459- 0 0 0 0 0 0 0 0 0 10 10 10
48460- 30 30 30 78 78 78 174 135 50 224 166 10
48461-239 182 13 246 190 14 246 190 14 246 190 14
48462-246 190 14 246 190 14 246 190 14 246 190 14
48463-246 190 14 246 190 14 246 190 14 246 190 14
48464-246 190 14 246 190 14 241 196 14 139 102 15
48465- 2 2 6 2 2 6 2 2 6 2 2 6
48466- 78 78 78 250 250 250 253 253 253 253 253 253
48467-253 253 253 253 253 253 253 253 253 253 253 253
48468-253 253 253 253 253 253 253 253 253 253 253 253
48469-253 253 253 253 253 253 253 253 253 253 253 253
48470-250 250 250 214 214 214 198 198 198 190 150 46
48471-219 162 10 236 178 12 234 174 13 224 166 10
48472-216 158 10 213 154 11 213 154 11 216 158 10
48473-226 170 11 239 182 13 246 190 14 246 190 14
48474-246 190 14 246 190 14 242 186 14 206 162 42
48475-101 101 101 58 58 58 30 30 30 14 14 14
48476- 6 6 6 0 0 0 0 0 0 0 0 0
48477- 0 0 0 0 0 0 0 0 0 0 0 0
48478- 0 0 0 0 0 0 0 0 0 0 0 0
48479- 0 0 0 0 0 0 0 0 0 10 10 10
48480- 30 30 30 74 74 74 174 135 50 216 158 10
48481-236 178 12 246 190 14 246 190 14 246 190 14
48482-246 190 14 246 190 14 246 190 14 246 190 14
48483-246 190 14 246 190 14 246 190 14 246 190 14
48484-246 190 14 246 190 14 241 196 14 226 184 13
48485- 61 42 6 2 2 6 2 2 6 2 2 6
48486- 22 22 22 238 238 238 253 253 253 253 253 253
48487-253 253 253 253 253 253 253 253 253 253 253 253
48488-253 253 253 253 253 253 253 253 253 253 253 253
48489-253 253 253 253 253 253 253 253 253 253 253 253
48490-253 253 253 226 226 226 187 187 187 180 133 36
48491-216 158 10 236 178 12 239 182 13 236 178 12
48492-230 174 11 226 170 11 226 170 11 230 174 11
48493-236 178 12 242 186 14 246 190 14 246 190 14
48494-246 190 14 246 190 14 246 186 14 239 182 13
48495-206 162 42 106 106 106 66 66 66 34 34 34
48496- 14 14 14 6 6 6 0 0 0 0 0 0
48497- 0 0 0 0 0 0 0 0 0 0 0 0
48498- 0 0 0 0 0 0 0 0 0 0 0 0
48499- 0 0 0 0 0 0 0 0 0 6 6 6
48500- 26 26 26 70 70 70 163 133 67 213 154 11
48501-236 178 12 246 190 14 246 190 14 246 190 14
48502-246 190 14 246 190 14 246 190 14 246 190 14
48503-246 190 14 246 190 14 246 190 14 246 190 14
48504-246 190 14 246 190 14 246 190 14 241 196 14
48505-190 146 13 18 14 6 2 2 6 2 2 6
48506- 46 46 46 246 246 246 253 253 253 253 253 253
48507-253 253 253 253 253 253 253 253 253 253 253 253
48508-253 253 253 253 253 253 253 253 253 253 253 253
48509-253 253 253 253 253 253 253 253 253 253 253 253
48510-253 253 253 221 221 221 86 86 86 156 107 11
48511-216 158 10 236 178 12 242 186 14 246 186 14
48512-242 186 14 239 182 13 239 182 13 242 186 14
48513-242 186 14 246 186 14 246 190 14 246 190 14
48514-246 190 14 246 190 14 246 190 14 246 190 14
48515-242 186 14 225 175 15 142 122 72 66 66 66
48516- 30 30 30 10 10 10 0 0 0 0 0 0
48517- 0 0 0 0 0 0 0 0 0 0 0 0
48518- 0 0 0 0 0 0 0 0 0 0 0 0
48519- 0 0 0 0 0 0 0 0 0 6 6 6
48520- 26 26 26 70 70 70 163 133 67 210 150 10
48521-236 178 12 246 190 14 246 190 14 246 190 14
48522-246 190 14 246 190 14 246 190 14 246 190 14
48523-246 190 14 246 190 14 246 190 14 246 190 14
48524-246 190 14 246 190 14 246 190 14 246 190 14
48525-232 195 16 121 92 8 34 34 34 106 106 106
48526-221 221 221 253 253 253 253 253 253 253 253 253
48527-253 253 253 253 253 253 253 253 253 253 253 253
48528-253 253 253 253 253 253 253 253 253 253 253 253
48529-253 253 253 253 253 253 253 253 253 253 253 253
48530-242 242 242 82 82 82 18 14 6 163 110 8
48531-216 158 10 236 178 12 242 186 14 246 190 14
48532-246 190 14 246 190 14 246 190 14 246 190 14
48533-246 190 14 246 190 14 246 190 14 246 190 14
48534-246 190 14 246 190 14 246 190 14 246 190 14
48535-246 190 14 246 190 14 242 186 14 163 133 67
48536- 46 46 46 18 18 18 6 6 6 0 0 0
48537- 0 0 0 0 0 0 0 0 0 0 0 0
48538- 0 0 0 0 0 0 0 0 0 0 0 0
48539- 0 0 0 0 0 0 0 0 0 10 10 10
48540- 30 30 30 78 78 78 163 133 67 210 150 10
48541-236 178 12 246 186 14 246 190 14 246 190 14
48542-246 190 14 246 190 14 246 190 14 246 190 14
48543-246 190 14 246 190 14 246 190 14 246 190 14
48544-246 190 14 246 190 14 246 190 14 246 190 14
48545-241 196 14 215 174 15 190 178 144 253 253 253
48546-253 253 253 253 253 253 253 253 253 253 253 253
48547-253 253 253 253 253 253 253 253 253 253 253 253
48548-253 253 253 253 253 253 253 253 253 253 253 253
48549-253 253 253 253 253 253 253 253 253 218 218 218
48550- 58 58 58 2 2 6 22 18 6 167 114 7
48551-216 158 10 236 178 12 246 186 14 246 190 14
48552-246 190 14 246 190 14 246 190 14 246 190 14
48553-246 190 14 246 190 14 246 190 14 246 190 14
48554-246 190 14 246 190 14 246 190 14 246 190 14
48555-246 190 14 246 186 14 242 186 14 190 150 46
48556- 54 54 54 22 22 22 6 6 6 0 0 0
48557- 0 0 0 0 0 0 0 0 0 0 0 0
48558- 0 0 0 0 0 0 0 0 0 0 0 0
48559- 0 0 0 0 0 0 0 0 0 14 14 14
48560- 38 38 38 86 86 86 180 133 36 213 154 11
48561-236 178 12 246 186 14 246 190 14 246 190 14
48562-246 190 14 246 190 14 246 190 14 246 190 14
48563-246 190 14 246 190 14 246 190 14 246 190 14
48564-246 190 14 246 190 14 246 190 14 246 190 14
48565-246 190 14 232 195 16 190 146 13 214 214 214
48566-253 253 253 253 253 253 253 253 253 253 253 253
48567-253 253 253 253 253 253 253 253 253 253 253 253
48568-253 253 253 253 253 253 253 253 253 253 253 253
48569-253 253 253 250 250 250 170 170 170 26 26 26
48570- 2 2 6 2 2 6 37 26 9 163 110 8
48571-219 162 10 239 182 13 246 186 14 246 190 14
48572-246 190 14 246 190 14 246 190 14 246 190 14
48573-246 190 14 246 190 14 246 190 14 246 190 14
48574-246 190 14 246 190 14 246 190 14 246 190 14
48575-246 186 14 236 178 12 224 166 10 142 122 72
48576- 46 46 46 18 18 18 6 6 6 0 0 0
48577- 0 0 0 0 0 0 0 0 0 0 0 0
48578- 0 0 0 0 0 0 0 0 0 0 0 0
48579- 0 0 0 0 0 0 6 6 6 18 18 18
48580- 50 50 50 109 106 95 192 133 9 224 166 10
48581-242 186 14 246 190 14 246 190 14 246 190 14
48582-246 190 14 246 190 14 246 190 14 246 190 14
48583-246 190 14 246 190 14 246 190 14 246 190 14
48584-246 190 14 246 190 14 246 190 14 246 190 14
48585-242 186 14 226 184 13 210 162 10 142 110 46
48586-226 226 226 253 253 253 253 253 253 253 253 253
48587-253 253 253 253 253 253 253 253 253 253 253 253
48588-253 253 253 253 253 253 253 253 253 253 253 253
48589-198 198 198 66 66 66 2 2 6 2 2 6
48590- 2 2 6 2 2 6 50 34 6 156 107 11
48591-219 162 10 239 182 13 246 186 14 246 190 14
48592-246 190 14 246 190 14 246 190 14 246 190 14
48593-246 190 14 246 190 14 246 190 14 246 190 14
48594-246 190 14 246 190 14 246 190 14 242 186 14
48595-234 174 13 213 154 11 154 122 46 66 66 66
48596- 30 30 30 10 10 10 0 0 0 0 0 0
48597- 0 0 0 0 0 0 0 0 0 0 0 0
48598- 0 0 0 0 0 0 0 0 0 0 0 0
48599- 0 0 0 0 0 0 6 6 6 22 22 22
48600- 58 58 58 154 121 60 206 145 10 234 174 13
48601-242 186 14 246 186 14 246 190 14 246 190 14
48602-246 190 14 246 190 14 246 190 14 246 190 14
48603-246 190 14 246 190 14 246 190 14 246 190 14
48604-246 190 14 246 190 14 246 190 14 246 190 14
48605-246 186 14 236 178 12 210 162 10 163 110 8
48606- 61 42 6 138 138 138 218 218 218 250 250 250
48607-253 253 253 253 253 253 253 253 253 250 250 250
48608-242 242 242 210 210 210 144 144 144 66 66 66
48609- 6 6 6 2 2 6 2 2 6 2 2 6
48610- 2 2 6 2 2 6 61 42 6 163 110 8
48611-216 158 10 236 178 12 246 190 14 246 190 14
48612-246 190 14 246 190 14 246 190 14 246 190 14
48613-246 190 14 246 190 14 246 190 14 246 190 14
48614-246 190 14 239 182 13 230 174 11 216 158 10
48615-190 142 34 124 112 88 70 70 70 38 38 38
48616- 18 18 18 6 6 6 0 0 0 0 0 0
48617- 0 0 0 0 0 0 0 0 0 0 0 0
48618- 0 0 0 0 0 0 0 0 0 0 0 0
48619- 0 0 0 0 0 0 6 6 6 22 22 22
48620- 62 62 62 168 124 44 206 145 10 224 166 10
48621-236 178 12 239 182 13 242 186 14 242 186 14
48622-246 186 14 246 190 14 246 190 14 246 190 14
48623-246 190 14 246 190 14 246 190 14 246 190 14
48624-246 190 14 246 190 14 246 190 14 246 190 14
48625-246 190 14 236 178 12 216 158 10 175 118 6
48626- 80 54 7 2 2 6 6 6 6 30 30 30
48627- 54 54 54 62 62 62 50 50 50 38 38 38
48628- 14 14 14 2 2 6 2 2 6 2 2 6
48629- 2 2 6 2 2 6 2 2 6 2 2 6
48630- 2 2 6 6 6 6 80 54 7 167 114 7
48631-213 154 11 236 178 12 246 190 14 246 190 14
48632-246 190 14 246 190 14 246 190 14 246 190 14
48633-246 190 14 242 186 14 239 182 13 239 182 13
48634-230 174 11 210 150 10 174 135 50 124 112 88
48635- 82 82 82 54 54 54 34 34 34 18 18 18
48636- 6 6 6 0 0 0 0 0 0 0 0 0
48637- 0 0 0 0 0 0 0 0 0 0 0 0
48638- 0 0 0 0 0 0 0 0 0 0 0 0
48639- 0 0 0 0 0 0 6 6 6 18 18 18
48640- 50 50 50 158 118 36 192 133 9 200 144 11
48641-216 158 10 219 162 10 224 166 10 226 170 11
48642-230 174 11 236 178 12 239 182 13 239 182 13
48643-242 186 14 246 186 14 246 190 14 246 190 14
48644-246 190 14 246 190 14 246 190 14 246 190 14
48645-246 186 14 230 174 11 210 150 10 163 110 8
48646-104 69 6 10 10 10 2 2 6 2 2 6
48647- 2 2 6 2 2 6 2 2 6 2 2 6
48648- 2 2 6 2 2 6 2 2 6 2 2 6
48649- 2 2 6 2 2 6 2 2 6 2 2 6
48650- 2 2 6 6 6 6 91 60 6 167 114 7
48651-206 145 10 230 174 11 242 186 14 246 190 14
48652-246 190 14 246 190 14 246 186 14 242 186 14
48653-239 182 13 230 174 11 224 166 10 213 154 11
48654-180 133 36 124 112 88 86 86 86 58 58 58
48655- 38 38 38 22 22 22 10 10 10 6 6 6
48656- 0 0 0 0 0 0 0 0 0 0 0 0
48657- 0 0 0 0 0 0 0 0 0 0 0 0
48658- 0 0 0 0 0 0 0 0 0 0 0 0
48659- 0 0 0 0 0 0 0 0 0 14 14 14
48660- 34 34 34 70 70 70 138 110 50 158 118 36
48661-167 114 7 180 123 7 192 133 9 197 138 11
48662-200 144 11 206 145 10 213 154 11 219 162 10
48663-224 166 10 230 174 11 239 182 13 242 186 14
48664-246 186 14 246 186 14 246 186 14 246 186 14
48665-239 182 13 216 158 10 185 133 11 152 99 6
48666-104 69 6 18 14 6 2 2 6 2 2 6
48667- 2 2 6 2 2 6 2 2 6 2 2 6
48668- 2 2 6 2 2 6 2 2 6 2 2 6
48669- 2 2 6 2 2 6 2 2 6 2 2 6
48670- 2 2 6 6 6 6 80 54 7 152 99 6
48671-192 133 9 219 162 10 236 178 12 239 182 13
48672-246 186 14 242 186 14 239 182 13 236 178 12
48673-224 166 10 206 145 10 192 133 9 154 121 60
48674- 94 94 94 62 62 62 42 42 42 22 22 22
48675- 14 14 14 6 6 6 0 0 0 0 0 0
48676- 0 0 0 0 0 0 0 0 0 0 0 0
48677- 0 0 0 0 0 0 0 0 0 0 0 0
48678- 0 0 0 0 0 0 0 0 0 0 0 0
48679- 0 0 0 0 0 0 0 0 0 6 6 6
48680- 18 18 18 34 34 34 58 58 58 78 78 78
48681-101 98 89 124 112 88 142 110 46 156 107 11
48682-163 110 8 167 114 7 175 118 6 180 123 7
48683-185 133 11 197 138 11 210 150 10 219 162 10
48684-226 170 11 236 178 12 236 178 12 234 174 13
48685-219 162 10 197 138 11 163 110 8 130 83 6
48686- 91 60 6 10 10 10 2 2 6 2 2 6
48687- 18 18 18 38 38 38 38 38 38 38 38 38
48688- 38 38 38 38 38 38 38 38 38 38 38 38
48689- 38 38 38 38 38 38 26 26 26 2 2 6
48690- 2 2 6 6 6 6 70 47 6 137 92 6
48691-175 118 6 200 144 11 219 162 10 230 174 11
48692-234 174 13 230 174 11 219 162 10 210 150 10
48693-192 133 9 163 110 8 124 112 88 82 82 82
48694- 50 50 50 30 30 30 14 14 14 6 6 6
48695- 0 0 0 0 0 0 0 0 0 0 0 0
48696- 0 0 0 0 0 0 0 0 0 0 0 0
48697- 0 0 0 0 0 0 0 0 0 0 0 0
48698- 0 0 0 0 0 0 0 0 0 0 0 0
48699- 0 0 0 0 0 0 0 0 0 0 0 0
48700- 6 6 6 14 14 14 22 22 22 34 34 34
48701- 42 42 42 58 58 58 74 74 74 86 86 86
48702-101 98 89 122 102 70 130 98 46 121 87 25
48703-137 92 6 152 99 6 163 110 8 180 123 7
48704-185 133 11 197 138 11 206 145 10 200 144 11
48705-180 123 7 156 107 11 130 83 6 104 69 6
48706- 50 34 6 54 54 54 110 110 110 101 98 89
48707- 86 86 86 82 82 82 78 78 78 78 78 78
48708- 78 78 78 78 78 78 78 78 78 78 78 78
48709- 78 78 78 82 82 82 86 86 86 94 94 94
48710-106 106 106 101 101 101 86 66 34 124 80 6
48711-156 107 11 180 123 7 192 133 9 200 144 11
48712-206 145 10 200 144 11 192 133 9 175 118 6
48713-139 102 15 109 106 95 70 70 70 42 42 42
48714- 22 22 22 10 10 10 0 0 0 0 0 0
48715- 0 0 0 0 0 0 0 0 0 0 0 0
48716- 0 0 0 0 0 0 0 0 0 0 0 0
48717- 0 0 0 0 0 0 0 0 0 0 0 0
48718- 0 0 0 0 0 0 0 0 0 0 0 0
48719- 0 0 0 0 0 0 0 0 0 0 0 0
48720- 0 0 0 0 0 0 6 6 6 10 10 10
48721- 14 14 14 22 22 22 30 30 30 38 38 38
48722- 50 50 50 62 62 62 74 74 74 90 90 90
48723-101 98 89 112 100 78 121 87 25 124 80 6
48724-137 92 6 152 99 6 152 99 6 152 99 6
48725-138 86 6 124 80 6 98 70 6 86 66 30
48726-101 98 89 82 82 82 58 58 58 46 46 46
48727- 38 38 38 34 34 34 34 34 34 34 34 34
48728- 34 34 34 34 34 34 34 34 34 34 34 34
48729- 34 34 34 34 34 34 38 38 38 42 42 42
48730- 54 54 54 82 82 82 94 86 76 91 60 6
48731-134 86 6 156 107 11 167 114 7 175 118 6
48732-175 118 6 167 114 7 152 99 6 121 87 25
48733-101 98 89 62 62 62 34 34 34 18 18 18
48734- 6 6 6 0 0 0 0 0 0 0 0 0
48735- 0 0 0 0 0 0 0 0 0 0 0 0
48736- 0 0 0 0 0 0 0 0 0 0 0 0
48737- 0 0 0 0 0 0 0 0 0 0 0 0
48738- 0 0 0 0 0 0 0 0 0 0 0 0
48739- 0 0 0 0 0 0 0 0 0 0 0 0
48740- 0 0 0 0 0 0 0 0 0 0 0 0
48741- 0 0 0 6 6 6 6 6 6 10 10 10
48742- 18 18 18 22 22 22 30 30 30 42 42 42
48743- 50 50 50 66 66 66 86 86 86 101 98 89
48744-106 86 58 98 70 6 104 69 6 104 69 6
48745-104 69 6 91 60 6 82 62 34 90 90 90
48746- 62 62 62 38 38 38 22 22 22 14 14 14
48747- 10 10 10 10 10 10 10 10 10 10 10 10
48748- 10 10 10 10 10 10 6 6 6 10 10 10
48749- 10 10 10 10 10 10 10 10 10 14 14 14
48750- 22 22 22 42 42 42 70 70 70 89 81 66
48751- 80 54 7 104 69 6 124 80 6 137 92 6
48752-134 86 6 116 81 8 100 82 52 86 86 86
48753- 58 58 58 30 30 30 14 14 14 6 6 6
48754- 0 0 0 0 0 0 0 0 0 0 0 0
48755- 0 0 0 0 0 0 0 0 0 0 0 0
48756- 0 0 0 0 0 0 0 0 0 0 0 0
48757- 0 0 0 0 0 0 0 0 0 0 0 0
48758- 0 0 0 0 0 0 0 0 0 0 0 0
48759- 0 0 0 0 0 0 0 0 0 0 0 0
48760- 0 0 0 0 0 0 0 0 0 0 0 0
48761- 0 0 0 0 0 0 0 0 0 0 0 0
48762- 0 0 0 6 6 6 10 10 10 14 14 14
48763- 18 18 18 26 26 26 38 38 38 54 54 54
48764- 70 70 70 86 86 86 94 86 76 89 81 66
48765- 89 81 66 86 86 86 74 74 74 50 50 50
48766- 30 30 30 14 14 14 6 6 6 0 0 0
48767- 0 0 0 0 0 0 0 0 0 0 0 0
48768- 0 0 0 0 0 0 0 0 0 0 0 0
48769- 0 0 0 0 0 0 0 0 0 0 0 0
48770- 6 6 6 18 18 18 34 34 34 58 58 58
48771- 82 82 82 89 81 66 89 81 66 89 81 66
48772- 94 86 66 94 86 76 74 74 74 50 50 50
48773- 26 26 26 14 14 14 6 6 6 0 0 0
48774- 0 0 0 0 0 0 0 0 0 0 0 0
48775- 0 0 0 0 0 0 0 0 0 0 0 0
48776- 0 0 0 0 0 0 0 0 0 0 0 0
48777- 0 0 0 0 0 0 0 0 0 0 0 0
48778- 0 0 0 0 0 0 0 0 0 0 0 0
48779- 0 0 0 0 0 0 0 0 0 0 0 0
48780- 0 0 0 0 0 0 0 0 0 0 0 0
48781- 0 0 0 0 0 0 0 0 0 0 0 0
48782- 0 0 0 0 0 0 0 0 0 0 0 0
48783- 6 6 6 6 6 6 14 14 14 18 18 18
48784- 30 30 30 38 38 38 46 46 46 54 54 54
48785- 50 50 50 42 42 42 30 30 30 18 18 18
48786- 10 10 10 0 0 0 0 0 0 0 0 0
48787- 0 0 0 0 0 0 0 0 0 0 0 0
48788- 0 0 0 0 0 0 0 0 0 0 0 0
48789- 0 0 0 0 0 0 0 0 0 0 0 0
48790- 0 0 0 6 6 6 14 14 14 26 26 26
48791- 38 38 38 50 50 50 58 58 58 58 58 58
48792- 54 54 54 42 42 42 30 30 30 18 18 18
48793- 10 10 10 0 0 0 0 0 0 0 0 0
48794- 0 0 0 0 0 0 0 0 0 0 0 0
48795- 0 0 0 0 0 0 0 0 0 0 0 0
48796- 0 0 0 0 0 0 0 0 0 0 0 0
48797- 0 0 0 0 0 0 0 0 0 0 0 0
48798- 0 0 0 0 0 0 0 0 0 0 0 0
48799- 0 0 0 0 0 0 0 0 0 0 0 0
48800- 0 0 0 0 0 0 0 0 0 0 0 0
48801- 0 0 0 0 0 0 0 0 0 0 0 0
48802- 0 0 0 0 0 0 0 0 0 0 0 0
48803- 0 0 0 0 0 0 0 0 0 6 6 6
48804- 6 6 6 10 10 10 14 14 14 18 18 18
48805- 18 18 18 14 14 14 10 10 10 6 6 6
48806- 0 0 0 0 0 0 0 0 0 0 0 0
48807- 0 0 0 0 0 0 0 0 0 0 0 0
48808- 0 0 0 0 0 0 0 0 0 0 0 0
48809- 0 0 0 0 0 0 0 0 0 0 0 0
48810- 0 0 0 0 0 0 0 0 0 6 6 6
48811- 14 14 14 18 18 18 22 22 22 22 22 22
48812- 18 18 18 14 14 14 10 10 10 6 6 6
48813- 0 0 0 0 0 0 0 0 0 0 0 0
48814- 0 0 0 0 0 0 0 0 0 0 0 0
48815- 0 0 0 0 0 0 0 0 0 0 0 0
48816- 0 0 0 0 0 0 0 0 0 0 0 0
48817- 0 0 0 0 0 0 0 0 0 0 0 0
48818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48831+4 4 4 4 4 4
48832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48845+4 4 4 4 4 4
48846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48859+4 4 4 4 4 4
48860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48873+4 4 4 4 4 4
48874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48887+4 4 4 4 4 4
48888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48901+4 4 4 4 4 4
48902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48906+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
48907+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
48908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48911+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
48912+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
48913+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
48914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48915+4 4 4 4 4 4
48916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48920+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
48921+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
48922+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48925+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
48926+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
48927+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
48928+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48929+4 4 4 4 4 4
48930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48934+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
48935+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
48936+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
48937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48939+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
48940+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
48941+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
48942+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
48943+4 4 4 4 4 4
48944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48947+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
48948+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
48949+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
48950+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
48951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48952+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
48953+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
48954+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
48955+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
48956+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
48957+4 4 4 4 4 4
48958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48961+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
48962+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
48963+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
48964+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
48965+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
48966+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
48967+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
48968+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
48969+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
48970+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
48971+4 4 4 4 4 4
48972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
48975+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
48976+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
48977+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
48978+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
48979+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
48980+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
48981+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
48982+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
48983+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
48984+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
48985+4 4 4 4 4 4
48986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48988+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
48989+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
48990+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
48991+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
48992+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
48993+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
48994+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
48995+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
48996+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
48997+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
48998+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
48999+4 4 4 4 4 4
49000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49002+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
49003+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
49004+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
49005+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
49006+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
49007+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
49008+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
49009+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
49010+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
49011+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
49012+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
49013+4 4 4 4 4 4
49014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49016+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
49017+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
49018+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
49019+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
49020+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
49021+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
49022+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
49023+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
49024+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
49025+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
49026+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
49027+4 4 4 4 4 4
49028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49030+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
49031+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
49032+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
49033+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
49034+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
49035+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
49036+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
49037+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
49038+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
49039+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
49040+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
49041+4 4 4 4 4 4
49042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49043+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
49044+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
49045+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
49046+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
49047+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
49048+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
49049+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
49050+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
49051+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
49052+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
49053+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
49054+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
49055+4 4 4 4 4 4
49056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49057+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
49058+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
49059+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
49060+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
49061+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
49062+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
49063+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
49064+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
49065+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
49066+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
49067+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
49068+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
49069+0 0 0 4 4 4
49070+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
49071+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
49072+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
49073+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
49074+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
49075+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
49076+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
49077+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
49078+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
49079+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
49080+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
49081+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
49082+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
49083+2 0 0 0 0 0
49084+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
49085+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
49086+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
49087+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
49088+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
49089+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
49090+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
49091+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
49092+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
49093+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
49094+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
49095+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
49096+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
49097+37 38 37 0 0 0
49098+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
49099+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
49100+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
49101+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
49102+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
49103+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
49104+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
49105+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
49106+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
49107+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
49108+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
49109+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
49110+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
49111+85 115 134 4 0 0
49112+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
49113+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
49114+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
49115+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
49116+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
49117+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
49118+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
49119+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
49120+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
49121+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
49122+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
49123+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
49124+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
49125+60 73 81 4 0 0
49126+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
49127+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
49128+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
49129+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
49130+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
49131+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
49132+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
49133+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
49134+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
49135+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
49136+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
49137+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
49138+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
49139+16 19 21 4 0 0
49140+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
49141+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
49142+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
49143+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
49144+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
49145+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
49146+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
49147+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
49148+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
49149+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
49150+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
49151+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
49152+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
49153+4 0 0 4 3 3
49154+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
49155+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
49156+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
49157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
49158+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
49159+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
49160+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
49161+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
49162+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
49163+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
49164+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
49165+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
49166+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
49167+3 2 2 4 4 4
49168+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
49169+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
49170+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
49171+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
49172+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
49173+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
49174+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
49175+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
49176+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
49177+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
49178+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
49179+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
49180+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
49181+4 4 4 4 4 4
49182+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
49183+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
49184+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
49185+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
49186+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
49187+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
49188+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
49189+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
49190+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
49191+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
49192+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
49193+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
49194+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
49195+4 4 4 4 4 4
49196+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
49197+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
49198+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
49199+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
49200+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
49201+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
49202+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
49203+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
49204+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
49205+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
49206+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
49207+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
49208+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
49209+5 5 5 5 5 5
49210+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
49211+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
49212+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
49213+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
49214+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
49215+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49216+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
49217+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
49218+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
49219+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
49220+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
49221+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
49222+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
49223+5 5 5 4 4 4
49224+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
49225+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
49226+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
49227+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
49228+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
49229+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
49230+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
49231+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
49232+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
49233+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
49234+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
49235+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
49236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49237+4 4 4 4 4 4
49238+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
49239+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
49240+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
49241+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
49242+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
49243+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49244+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49245+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
49246+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
49247+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
49248+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
49249+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
49250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49251+4 4 4 4 4 4
49252+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
49253+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
49254+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
49255+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
49256+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
49257+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
49258+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
49259+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
49260+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
49261+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
49262+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
49263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49265+4 4 4 4 4 4
49266+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
49267+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
49268+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
49269+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
49270+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
49271+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49272+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49273+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
49274+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
49275+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
49276+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
49277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49279+4 4 4 4 4 4
49280+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
49281+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
49282+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
49283+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
49284+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
49285+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
49286+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
49287+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
49288+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
49289+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
49290+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49293+4 4 4 4 4 4
49294+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
49295+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
49296+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
49297+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
49298+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
49299+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
49300+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
49301+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
49302+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
49303+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
49304+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
49305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49307+4 4 4 4 4 4
49308+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
49309+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
49310+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
49311+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
49312+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
49313+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
49314+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
49315+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
49316+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
49317+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
49318+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
49319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49321+4 4 4 4 4 4
49322+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
49323+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
49324+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
49325+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
49326+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
49327+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
49328+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
49329+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
49330+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
49331+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
49332+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49335+4 4 4 4 4 4
49336+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
49337+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
49338+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
49339+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
49340+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49341+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
49342+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
49343+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
49344+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
49345+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
49346+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49349+4 4 4 4 4 4
49350+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
49351+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
49352+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
49353+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
49354+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49355+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
49356+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
49357+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
49358+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
49359+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
49360+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49363+4 4 4 4 4 4
49364+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
49365+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
49366+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
49367+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
49368+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49369+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
49370+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
49371+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
49372+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
49373+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49374+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49377+4 4 4 4 4 4
49378+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
49379+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
49380+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
49381+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
49382+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
49383+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
49384+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
49385+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
49386+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49387+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49388+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49391+4 4 4 4 4 4
49392+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
49393+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
49394+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
49395+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
49396+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49397+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
49398+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
49399+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
49400+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
49401+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49402+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49405+4 4 4 4 4 4
49406+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
49407+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
49408+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
49409+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
49410+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
49411+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
49412+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
49413+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
49414+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49415+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49416+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49419+4 4 4 4 4 4
49420+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
49421+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
49422+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
49423+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
49424+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
49425+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
49426+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
49427+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
49428+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
49429+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49430+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49433+4 4 4 4 4 4
49434+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
49435+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
49436+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
49437+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
49438+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
49439+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
49440+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
49441+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
49442+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49443+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49444+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49447+4 4 4 4 4 4
49448+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
49449+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
49450+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
49451+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
49452+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
49453+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
49454+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
49455+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
49456+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
49457+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49458+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49461+4 4 4 4 4 4
49462+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
49463+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
49464+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
49465+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
49466+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
49467+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
49468+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
49469+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
49470+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49471+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49472+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49475+4 4 4 4 4 4
49476+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49477+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
49478+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
49479+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
49480+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
49481+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
49482+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
49483+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
49484+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
49485+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49486+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49489+4 4 4 4 4 4
49490+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
49491+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
49492+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
49493+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
49494+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
49495+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
49496+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49497+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
49498+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49499+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49500+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49503+4 4 4 4 4 4
49504+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49505+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
49506+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
49507+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
49508+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
49509+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
49510+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49511+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
49512+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
49513+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49514+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49517+4 4 4 4 4 4
49518+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
49519+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
49520+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
49521+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
49522+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
49523+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
49524+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
49525+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
49526+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
49527+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49528+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49531+4 4 4 4 4 4
49532+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49533+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
49534+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
49535+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
49536+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
49537+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
49538+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
49539+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
49540+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
49541+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49542+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49545+4 4 4 4 4 4
49546+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
49547+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
49548+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
49549+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
49550+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
49551+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
49552+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
49553+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
49554+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
49555+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49556+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49559+4 4 4 4 4 4
49560+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49561+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
49562+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
49563+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
49564+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
49565+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
49566+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
49567+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
49568+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
49569+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49570+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49573+4 4 4 4 4 4
49574+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
49575+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
49576+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
49577+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
49578+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
49579+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
49580+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
49581+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
49582+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
49583+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
49584+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49587+4 4 4 4 4 4
49588+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
49589+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
49590+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
49591+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
49592+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
49593+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
49594+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
49595+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
49596+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
49597+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
49598+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49601+4 4 4 4 4 4
49602+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
49603+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
49604+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
49605+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
49606+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
49607+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
49608+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49609+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
49610+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
49611+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
49612+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49615+4 4 4 4 4 4
49616+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
49617+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
49618+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
49619+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
49620+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
49621+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
49622+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
49623+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
49624+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
49625+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
49626+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49629+4 4 4 4 4 4
49630+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
49631+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
49632+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
49633+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
49634+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
49635+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
49636+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
49637+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
49638+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
49639+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
49640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49643+4 4 4 4 4 4
49644+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
49645+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
49646+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
49647+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
49648+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
49649+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
49650+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
49651+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
49652+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
49653+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
49654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49657+4 4 4 4 4 4
49658+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
49659+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
49660+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
49661+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
49662+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
49663+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
49664+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
49665+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
49666+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
49667+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
49668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49671+4 4 4 4 4 4
49672+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
49673+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
49674+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
49675+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
49676+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
49677+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
49678+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
49679+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
49680+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
49681+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49685+4 4 4 4 4 4
49686+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
49687+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
49688+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
49689+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
49690+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
49691+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
49692+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
49693+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
49694+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
49695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49699+4 4 4 4 4 4
49700+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
49701+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
49702+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
49703+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
49704+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
49705+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
49706+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
49707+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
49708+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
49709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49713+4 4 4 4 4 4
49714+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
49715+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
49716+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
49717+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
49718+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
49719+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
49720+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
49721+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
49722+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49727+4 4 4 4 4 4
49728+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
49729+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
49730+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
49731+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
49732+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
49733+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
49734+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
49735+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
49736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49741+4 4 4 4 4 4
49742+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
49743+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
49744+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
49745+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
49746+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
49747+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
49748+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
49749+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
49750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49755+4 4 4 4 4 4
49756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
49757+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
49758+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
49759+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
49760+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
49761+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
49762+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
49763+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
49764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49769+4 4 4 4 4 4
49770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49771+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
49772+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
49773+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
49774+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
49775+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
49776+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
49777+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
49778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49783+4 4 4 4 4 4
49784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49785+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
49786+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
49787+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
49788+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
49789+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
49790+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
49791+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49797+4 4 4 4 4 4
49798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49800+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
49801+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
49802+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
49803+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
49804+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
49805+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49811+4 4 4 4 4 4
49812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
49815+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
49816+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
49817+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
49818+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
49819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49825+4 4 4 4 4 4
49826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49829+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
49830+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
49831+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
49832+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
49833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49839+4 4 4 4 4 4
49840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49843+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
49844+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
49845+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
49846+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
49847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49853+4 4 4 4 4 4
49854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49857+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
49858+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
49859+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
49860+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
49861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49867+4 4 4 4 4 4
49868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
49872+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
49873+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
49874+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49881+4 4 4 4 4 4
49882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49886+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
49887+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
49888+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
49889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49895+4 4 4 4 4 4
49896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49900+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
49901+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
49902+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49909+4 4 4 4 4 4
49910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49914+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
49915+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
49916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49923+4 4 4 4 4 4
49924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49928+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
49929+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
49930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49937+4 4 4 4 4 4
49938diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
49939index fe92eed..106e085 100644
49940--- a/drivers/video/mb862xx/mb862xxfb_accel.c
49941+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
49942@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
49943 struct mb862xxfb_par *par = info->par;
49944
49945 if (info->var.bits_per_pixel == 32) {
49946- info->fbops->fb_fillrect = cfb_fillrect;
49947- info->fbops->fb_copyarea = cfb_copyarea;
49948- info->fbops->fb_imageblit = cfb_imageblit;
49949+ pax_open_kernel();
49950+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
49951+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
49952+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
49953+ pax_close_kernel();
49954 } else {
49955 outreg(disp, GC_L0EM, 3);
49956- info->fbops->fb_fillrect = mb86290fb_fillrect;
49957- info->fbops->fb_copyarea = mb86290fb_copyarea;
49958- info->fbops->fb_imageblit = mb86290fb_imageblit;
49959+ pax_open_kernel();
49960+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
49961+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
49962+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
49963+ pax_close_kernel();
49964 }
49965 outreg(draw, GDC_REG_DRAW_BASE, 0);
49966 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
49967diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
49968index ff22871..b129bed 100644
49969--- a/drivers/video/nvidia/nvidia.c
49970+++ b/drivers/video/nvidia/nvidia.c
49971@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
49972 info->fix.line_length = (info->var.xres_virtual *
49973 info->var.bits_per_pixel) >> 3;
49974 if (info->var.accel_flags) {
49975- info->fbops->fb_imageblit = nvidiafb_imageblit;
49976- info->fbops->fb_fillrect = nvidiafb_fillrect;
49977- info->fbops->fb_copyarea = nvidiafb_copyarea;
49978- info->fbops->fb_sync = nvidiafb_sync;
49979+ pax_open_kernel();
49980+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
49981+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
49982+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
49983+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
49984+ pax_close_kernel();
49985 info->pixmap.scan_align = 4;
49986 info->flags &= ~FBINFO_HWACCEL_DISABLED;
49987 info->flags |= FBINFO_READS_FAST;
49988 NVResetGraphics(info);
49989 } else {
49990- info->fbops->fb_imageblit = cfb_imageblit;
49991- info->fbops->fb_fillrect = cfb_fillrect;
49992- info->fbops->fb_copyarea = cfb_copyarea;
49993- info->fbops->fb_sync = NULL;
49994+ pax_open_kernel();
49995+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
49996+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
49997+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
49998+ *(void **)&info->fbops->fb_sync = NULL;
49999+ pax_close_kernel();
50000 info->pixmap.scan_align = 1;
50001 info->flags |= FBINFO_HWACCEL_DISABLED;
50002 info->flags &= ~FBINFO_READS_FAST;
50003@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
50004 info->pixmap.size = 8 * 1024;
50005 info->pixmap.flags = FB_PIXMAP_SYSTEM;
50006
50007- if (!hwcur)
50008- info->fbops->fb_cursor = NULL;
50009+ if (!hwcur) {
50010+ pax_open_kernel();
50011+ *(void **)&info->fbops->fb_cursor = NULL;
50012+ pax_close_kernel();
50013+ }
50014
50015 info->var.accel_flags = (!noaccel);
50016
50017diff --git a/drivers/video/output.c b/drivers/video/output.c
50018index 0d6f2cd..6285b97 100644
50019--- a/drivers/video/output.c
50020+++ b/drivers/video/output.c
50021@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
50022 new_dev->props = op;
50023 new_dev->dev.class = &video_output_class;
50024 new_dev->dev.parent = dev;
50025- dev_set_name(&new_dev->dev, name);
50026+ dev_set_name(&new_dev->dev, "%s", name);
50027 dev_set_drvdata(&new_dev->dev, devdata);
50028 ret_code = device_register(&new_dev->dev);
50029 if (ret_code) {
50030diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
50031index 05c2dc3..ea1f391 100644
50032--- a/drivers/video/s1d13xxxfb.c
50033+++ b/drivers/video/s1d13xxxfb.c
50034@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
50035
50036 switch(prod_id) {
50037 case S1D13506_PROD_ID: /* activate acceleration */
50038- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
50039- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
50040+ pax_open_kernel();
50041+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
50042+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
50043+ pax_close_kernel();
50044 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
50045 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
50046 break;
50047diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
50048index b2b33fc..f9f4658 100644
50049--- a/drivers/video/smscufx.c
50050+++ b/drivers/video/smscufx.c
50051@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
50052 fb_deferred_io_cleanup(info);
50053 kfree(info->fbdefio);
50054 info->fbdefio = NULL;
50055- info->fbops->fb_mmap = ufx_ops_mmap;
50056+ pax_open_kernel();
50057+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
50058+ pax_close_kernel();
50059 }
50060
50061 pr_debug("released /dev/fb%d user=%d count=%d",
50062diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
50063index ec03e72..f578436 100644
50064--- a/drivers/video/udlfb.c
50065+++ b/drivers/video/udlfb.c
50066@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
50067 dlfb_urb_completion(urb);
50068
50069 error:
50070- atomic_add(bytes_sent, &dev->bytes_sent);
50071- atomic_add(bytes_identical, &dev->bytes_identical);
50072- atomic_add(width*height*2, &dev->bytes_rendered);
50073+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
50074+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
50075+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
50076 end_cycles = get_cycles();
50077- atomic_add(((unsigned int) ((end_cycles - start_cycles)
50078+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
50079 >> 10)), /* Kcycles */
50080 &dev->cpu_kcycles_used);
50081
50082@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
50083 dlfb_urb_completion(urb);
50084
50085 error:
50086- atomic_add(bytes_sent, &dev->bytes_sent);
50087- atomic_add(bytes_identical, &dev->bytes_identical);
50088- atomic_add(bytes_rendered, &dev->bytes_rendered);
50089+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
50090+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
50091+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
50092 end_cycles = get_cycles();
50093- atomic_add(((unsigned int) ((end_cycles - start_cycles)
50094+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
50095 >> 10)), /* Kcycles */
50096 &dev->cpu_kcycles_used);
50097 }
50098@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
50099 fb_deferred_io_cleanup(info);
50100 kfree(info->fbdefio);
50101 info->fbdefio = NULL;
50102- info->fbops->fb_mmap = dlfb_ops_mmap;
50103+ pax_open_kernel();
50104+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
50105+ pax_close_kernel();
50106 }
50107
50108 pr_warn("released /dev/fb%d user=%d count=%d\n",
50109@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
50110 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50111 struct dlfb_data *dev = fb_info->par;
50112 return snprintf(buf, PAGE_SIZE, "%u\n",
50113- atomic_read(&dev->bytes_rendered));
50114+ atomic_read_unchecked(&dev->bytes_rendered));
50115 }
50116
50117 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
50118@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
50119 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50120 struct dlfb_data *dev = fb_info->par;
50121 return snprintf(buf, PAGE_SIZE, "%u\n",
50122- atomic_read(&dev->bytes_identical));
50123+ atomic_read_unchecked(&dev->bytes_identical));
50124 }
50125
50126 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
50127@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
50128 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50129 struct dlfb_data *dev = fb_info->par;
50130 return snprintf(buf, PAGE_SIZE, "%u\n",
50131- atomic_read(&dev->bytes_sent));
50132+ atomic_read_unchecked(&dev->bytes_sent));
50133 }
50134
50135 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
50136@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
50137 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50138 struct dlfb_data *dev = fb_info->par;
50139 return snprintf(buf, PAGE_SIZE, "%u\n",
50140- atomic_read(&dev->cpu_kcycles_used));
50141+ atomic_read_unchecked(&dev->cpu_kcycles_used));
50142 }
50143
50144 static ssize_t edid_show(
50145@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
50146 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50147 struct dlfb_data *dev = fb_info->par;
50148
50149- atomic_set(&dev->bytes_rendered, 0);
50150- atomic_set(&dev->bytes_identical, 0);
50151- atomic_set(&dev->bytes_sent, 0);
50152- atomic_set(&dev->cpu_kcycles_used, 0);
50153+ atomic_set_unchecked(&dev->bytes_rendered, 0);
50154+ atomic_set_unchecked(&dev->bytes_identical, 0);
50155+ atomic_set_unchecked(&dev->bytes_sent, 0);
50156+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
50157
50158 return count;
50159 }
50160diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
50161index e328a61..1b08ecb 100644
50162--- a/drivers/video/uvesafb.c
50163+++ b/drivers/video/uvesafb.c
50164@@ -19,6 +19,7 @@
50165 #include <linux/io.h>
50166 #include <linux/mutex.h>
50167 #include <linux/slab.h>
50168+#include <linux/moduleloader.h>
50169 #include <video/edid.h>
50170 #include <video/uvesafb.h>
50171 #ifdef CONFIG_X86
50172@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
50173 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
50174 par->pmi_setpal = par->ypan = 0;
50175 } else {
50176+
50177+#ifdef CONFIG_PAX_KERNEXEC
50178+#ifdef CONFIG_MODULES
50179+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
50180+#endif
50181+ if (!par->pmi_code) {
50182+ par->pmi_setpal = par->ypan = 0;
50183+ return 0;
50184+ }
50185+#endif
50186+
50187 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
50188 + task->t.regs.edi);
50189+
50190+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50191+ pax_open_kernel();
50192+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
50193+ pax_close_kernel();
50194+
50195+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
50196+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
50197+#else
50198 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
50199 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
50200+#endif
50201+
50202 printk(KERN_INFO "uvesafb: protected mode interface info at "
50203 "%04x:%04x\n",
50204 (u16)task->t.regs.es, (u16)task->t.regs.edi);
50205@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
50206 par->ypan = ypan;
50207
50208 if (par->pmi_setpal || par->ypan) {
50209+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
50210 if (__supported_pte_mask & _PAGE_NX) {
50211 par->pmi_setpal = par->ypan = 0;
50212 printk(KERN_WARNING "uvesafb: NX protection is actively."
50213 "We have better not to use the PMI.\n");
50214- } else {
50215+ } else
50216+#endif
50217 uvesafb_vbe_getpmi(task, par);
50218- }
50219 }
50220 #else
50221 /* The protected mode interface is not available on non-x86. */
50222@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
50223 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
50224
50225 /* Disable blanking if the user requested so. */
50226- if (!blank)
50227- info->fbops->fb_blank = NULL;
50228+ if (!blank) {
50229+ pax_open_kernel();
50230+ *(void **)&info->fbops->fb_blank = NULL;
50231+ pax_close_kernel();
50232+ }
50233
50234 /*
50235 * Find out how much IO memory is required for the mode with
50236@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
50237 info->flags = FBINFO_FLAG_DEFAULT |
50238 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
50239
50240- if (!par->ypan)
50241- info->fbops->fb_pan_display = NULL;
50242+ if (!par->ypan) {
50243+ pax_open_kernel();
50244+ *(void **)&info->fbops->fb_pan_display = NULL;
50245+ pax_close_kernel();
50246+ }
50247 }
50248
50249 static void uvesafb_init_mtrr(struct fb_info *info)
50250@@ -1836,6 +1866,11 @@ out:
50251 if (par->vbe_modes)
50252 kfree(par->vbe_modes);
50253
50254+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50255+ if (par->pmi_code)
50256+ module_free_exec(NULL, par->pmi_code);
50257+#endif
50258+
50259 framebuffer_release(info);
50260 return err;
50261 }
50262@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
50263 kfree(par->vbe_state_orig);
50264 if (par->vbe_state_saved)
50265 kfree(par->vbe_state_saved);
50266+
50267+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50268+ if (par->pmi_code)
50269+ module_free_exec(NULL, par->pmi_code);
50270+#endif
50271+
50272 }
50273
50274 framebuffer_release(info);
50275diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
50276index 501b340..d80aa17 100644
50277--- a/drivers/video/vesafb.c
50278+++ b/drivers/video/vesafb.c
50279@@ -9,6 +9,7 @@
50280 */
50281
50282 #include <linux/module.h>
50283+#include <linux/moduleloader.h>
50284 #include <linux/kernel.h>
50285 #include <linux/errno.h>
50286 #include <linux/string.h>
50287@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
50288 static int vram_total __initdata; /* Set total amount of memory */
50289 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
50290 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
50291-static void (*pmi_start)(void) __read_mostly;
50292-static void (*pmi_pal) (void) __read_mostly;
50293+static void (*pmi_start)(void) __read_only;
50294+static void (*pmi_pal) (void) __read_only;
50295 static int depth __read_mostly;
50296 static int vga_compat __read_mostly;
50297 /* --------------------------------------------------------------------- */
50298@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
50299 unsigned int size_vmode;
50300 unsigned int size_remap;
50301 unsigned int size_total;
50302+ void *pmi_code = NULL;
50303
50304 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
50305 return -ENODEV;
50306@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
50307 size_remap = size_total;
50308 vesafb_fix.smem_len = size_remap;
50309
50310-#ifndef __i386__
50311- screen_info.vesapm_seg = 0;
50312-#endif
50313-
50314 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
50315 printk(KERN_WARNING
50316 "vesafb: cannot reserve video memory at 0x%lx\n",
50317@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
50318 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
50319 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
50320
50321+#ifdef __i386__
50322+
50323+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50324+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
50325+ if (!pmi_code)
50326+#elif !defined(CONFIG_PAX_KERNEXEC)
50327+ if (0)
50328+#endif
50329+
50330+#endif
50331+ screen_info.vesapm_seg = 0;
50332+
50333 if (screen_info.vesapm_seg) {
50334- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
50335- screen_info.vesapm_seg,screen_info.vesapm_off);
50336+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
50337+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
50338 }
50339
50340 if (screen_info.vesapm_seg < 0xc000)
50341@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
50342
50343 if (ypan || pmi_setpal) {
50344 unsigned short *pmi_base;
50345+
50346 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
50347- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
50348- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
50349+
50350+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50351+ pax_open_kernel();
50352+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
50353+#else
50354+ pmi_code = pmi_base;
50355+#endif
50356+
50357+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
50358+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
50359+
50360+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50361+ pmi_start = ktva_ktla(pmi_start);
50362+ pmi_pal = ktva_ktla(pmi_pal);
50363+ pax_close_kernel();
50364+#endif
50365+
50366 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
50367 if (pmi_base[3]) {
50368 printk(KERN_INFO "vesafb: pmi: ports = ");
50369@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
50370 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
50371 (ypan ? FBINFO_HWACCEL_YPAN : 0);
50372
50373- if (!ypan)
50374- info->fbops->fb_pan_display = NULL;
50375+ if (!ypan) {
50376+ pax_open_kernel();
50377+ *(void **)&info->fbops->fb_pan_display = NULL;
50378+ pax_close_kernel();
50379+ }
50380
50381 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
50382 err = -ENOMEM;
50383@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
50384 info->node, info->fix.id);
50385 return 0;
50386 err:
50387+
50388+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50389+ module_free_exec(NULL, pmi_code);
50390+#endif
50391+
50392 if (info->screen_base)
50393 iounmap(info->screen_base);
50394 framebuffer_release(info);
50395diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
50396index 88714ae..16c2e11 100644
50397--- a/drivers/video/via/via_clock.h
50398+++ b/drivers/video/via/via_clock.h
50399@@ -56,7 +56,7 @@ struct via_clock {
50400
50401 void (*set_engine_pll_state)(u8 state);
50402 void (*set_engine_pll)(struct via_pll_config config);
50403-};
50404+} __no_const;
50405
50406
50407 static inline u32 get_pll_internal_frequency(u32 ref_freq,
50408diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
50409index fef20db..d28b1ab 100644
50410--- a/drivers/xen/xenfs/xenstored.c
50411+++ b/drivers/xen/xenfs/xenstored.c
50412@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
50413 static int xsd_kva_open(struct inode *inode, struct file *file)
50414 {
50415 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
50416+#ifdef CONFIG_GRKERNSEC_HIDESYM
50417+ NULL);
50418+#else
50419 xen_store_interface);
50420+#endif
50421+
50422 if (!file->private_data)
50423 return -ENOMEM;
50424 return 0;
50425diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
50426index 055562c..fdfb10d 100644
50427--- a/fs/9p/vfs_addr.c
50428+++ b/fs/9p/vfs_addr.c
50429@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
50430
50431 retval = v9fs_file_write_internal(inode,
50432 v9inode->writeback_fid,
50433- (__force const char __user *)buffer,
50434+ (const char __force_user *)buffer,
50435 len, &offset, 0);
50436 if (retval > 0)
50437 retval = 0;
50438diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
50439index d86edc8..40ff2fb 100644
50440--- a/fs/9p/vfs_inode.c
50441+++ b/fs/9p/vfs_inode.c
50442@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50443 void
50444 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
50445 {
50446- char *s = nd_get_link(nd);
50447+ const char *s = nd_get_link(nd);
50448
50449 p9_debug(P9_DEBUG_VFS, " %s %s\n",
50450 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
50451diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
50452index 370b24c..ff0be7b 100644
50453--- a/fs/Kconfig.binfmt
50454+++ b/fs/Kconfig.binfmt
50455@@ -103,7 +103,7 @@ config HAVE_AOUT
50456
50457 config BINFMT_AOUT
50458 tristate "Kernel support for a.out and ECOFF binaries"
50459- depends on HAVE_AOUT
50460+ depends on HAVE_AOUT && BROKEN
50461 ---help---
50462 A.out (Assembler.OUTput) is a set of formats for libraries and
50463 executables used in the earliest versions of UNIX. Linux used
50464diff --git a/fs/aio.c b/fs/aio.c
50465index 2bbcacf..8614116 100644
50466--- a/fs/aio.c
50467+++ b/fs/aio.c
50468@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
50469 size += sizeof(struct io_event) * nr_events;
50470 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
50471
50472- if (nr_pages < 0)
50473+ if (nr_pages <= 0)
50474 return -EINVAL;
50475
50476 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
50477@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
50478 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
50479 {
50480 ssize_t ret;
50481+ struct iovec iovstack;
50482
50483 kiocb->ki_nr_segs = kiocb->ki_nbytes;
50484
50485@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
50486 if (compat)
50487 ret = compat_rw_copy_check_uvector(rw,
50488 (struct compat_iovec __user *)kiocb->ki_buf,
50489- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
50490+ kiocb->ki_nr_segs, 1, &iovstack,
50491 &kiocb->ki_iovec);
50492 else
50493 #endif
50494 ret = rw_copy_check_uvector(rw,
50495 (struct iovec __user *)kiocb->ki_buf,
50496- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
50497+ kiocb->ki_nr_segs, 1, &iovstack,
50498 &kiocb->ki_iovec);
50499 if (ret < 0)
50500 return ret;
50501
50502+ if (kiocb->ki_iovec == &iovstack) {
50503+ kiocb->ki_inline_vec = iovstack;
50504+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
50505+ }
50506+
50507 /* ki_nbytes now reflect bytes instead of segs */
50508 kiocb->ki_nbytes = ret;
50509 return 0;
50510diff --git a/fs/attr.c b/fs/attr.c
50511index 1449adb..a2038c2 100644
50512--- a/fs/attr.c
50513+++ b/fs/attr.c
50514@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
50515 unsigned long limit;
50516
50517 limit = rlimit(RLIMIT_FSIZE);
50518+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
50519 if (limit != RLIM_INFINITY && offset > limit)
50520 goto out_sig;
50521 if (offset > inode->i_sb->s_maxbytes)
50522diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
50523index 3db70da..7aeec5b 100644
50524--- a/fs/autofs4/waitq.c
50525+++ b/fs/autofs4/waitq.c
50526@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
50527 {
50528 unsigned long sigpipe, flags;
50529 mm_segment_t fs;
50530- const char *data = (const char *)addr;
50531+ const char __user *data = (const char __force_user *)addr;
50532 ssize_t wr = 0;
50533
50534 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
50535@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
50536 return 1;
50537 }
50538
50539+#ifdef CONFIG_GRKERNSEC_HIDESYM
50540+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
50541+#endif
50542+
50543 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
50544 enum autofs_notify notify)
50545 {
50546@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
50547
50548 /* If this is a direct mount request create a dummy name */
50549 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
50550+#ifdef CONFIG_GRKERNSEC_HIDESYM
50551+ /* this name does get written to userland via autofs4_write() */
50552+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
50553+#else
50554 qstr.len = sprintf(name, "%p", dentry);
50555+#endif
50556 else {
50557 qstr.len = autofs4_getpath(sbi, dentry, &name);
50558 if (!qstr.len) {
50559diff --git a/fs/befs/endian.h b/fs/befs/endian.h
50560index 2722387..c8dd2a7 100644
50561--- a/fs/befs/endian.h
50562+++ b/fs/befs/endian.h
50563@@ -11,7 +11,7 @@
50564
50565 #include <asm/byteorder.h>
50566
50567-static inline u64
50568+static inline u64 __intentional_overflow(-1)
50569 fs64_to_cpu(const struct super_block *sb, fs64 n)
50570 {
50571 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
50572@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
50573 return (__force fs64)cpu_to_be64(n);
50574 }
50575
50576-static inline u32
50577+static inline u32 __intentional_overflow(-1)
50578 fs32_to_cpu(const struct super_block *sb, fs32 n)
50579 {
50580 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
50581diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
50582index f95dddc..b1e2c1c 100644
50583--- a/fs/befs/linuxvfs.c
50584+++ b/fs/befs/linuxvfs.c
50585@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
50586 {
50587 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
50588 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
50589- char *link = nd_get_link(nd);
50590+ const char *link = nd_get_link(nd);
50591 if (!IS_ERR(link))
50592 kfree(link);
50593 }
50594diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
50595index bce8769..7fc7544 100644
50596--- a/fs/binfmt_aout.c
50597+++ b/fs/binfmt_aout.c
50598@@ -16,6 +16,7 @@
50599 #include <linux/string.h>
50600 #include <linux/fs.h>
50601 #include <linux/file.h>
50602+#include <linux/security.h>
50603 #include <linux/stat.h>
50604 #include <linux/fcntl.h>
50605 #include <linux/ptrace.h>
50606@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
50607 #endif
50608 # define START_STACK(u) ((void __user *)u.start_stack)
50609
50610+ memset(&dump, 0, sizeof(dump));
50611+
50612 fs = get_fs();
50613 set_fs(KERNEL_DS);
50614 has_dumped = 1;
50615@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
50616
50617 /* If the size of the dump file exceeds the rlimit, then see what would happen
50618 if we wrote the stack, but not the data area. */
50619+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
50620 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
50621 dump.u_dsize = 0;
50622
50623 /* Make sure we have enough room to write the stack and data areas. */
50624+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
50625 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
50626 dump.u_ssize = 0;
50627
50628@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
50629 rlim = rlimit(RLIMIT_DATA);
50630 if (rlim >= RLIM_INFINITY)
50631 rlim = ~0;
50632+
50633+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
50634 if (ex.a_data + ex.a_bss > rlim)
50635 return -ENOMEM;
50636
50637@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
50638
50639 install_exec_creds(bprm);
50640
50641+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50642+ current->mm->pax_flags = 0UL;
50643+#endif
50644+
50645+#ifdef CONFIG_PAX_PAGEEXEC
50646+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
50647+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
50648+
50649+#ifdef CONFIG_PAX_EMUTRAMP
50650+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
50651+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
50652+#endif
50653+
50654+#ifdef CONFIG_PAX_MPROTECT
50655+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
50656+ current->mm->pax_flags |= MF_PAX_MPROTECT;
50657+#endif
50658+
50659+ }
50660+#endif
50661+
50662 if (N_MAGIC(ex) == OMAGIC) {
50663 unsigned long text_addr, map_size;
50664 loff_t pos;
50665@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
50666 }
50667
50668 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
50669- PROT_READ | PROT_WRITE | PROT_EXEC,
50670+ PROT_READ | PROT_WRITE,
50671 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
50672 fd_offset + ex.a_text);
50673 if (error != N_DATADDR(ex)) {
50674diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
50675index f8a0b0e..6f036ed 100644
50676--- a/fs/binfmt_elf.c
50677+++ b/fs/binfmt_elf.c
50678@@ -34,6 +34,7 @@
50679 #include <linux/utsname.h>
50680 #include <linux/coredump.h>
50681 #include <linux/sched.h>
50682+#include <linux/xattr.h>
50683 #include <asm/uaccess.h>
50684 #include <asm/param.h>
50685 #include <asm/page.h>
50686@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
50687 #define elf_core_dump NULL
50688 #endif
50689
50690+#ifdef CONFIG_PAX_MPROTECT
50691+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
50692+#endif
50693+
50694+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50695+static void elf_handle_mmap(struct file *file);
50696+#endif
50697+
50698 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
50699 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
50700 #else
50701@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
50702 .load_binary = load_elf_binary,
50703 .load_shlib = load_elf_library,
50704 .core_dump = elf_core_dump,
50705+
50706+#ifdef CONFIG_PAX_MPROTECT
50707+ .handle_mprotect= elf_handle_mprotect,
50708+#endif
50709+
50710+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50711+ .handle_mmap = elf_handle_mmap,
50712+#endif
50713+
50714 .min_coredump = ELF_EXEC_PAGESIZE,
50715 };
50716
50717@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
50718
50719 static int set_brk(unsigned long start, unsigned long end)
50720 {
50721+ unsigned long e = end;
50722+
50723 start = ELF_PAGEALIGN(start);
50724 end = ELF_PAGEALIGN(end);
50725 if (end > start) {
50726@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
50727 if (BAD_ADDR(addr))
50728 return addr;
50729 }
50730- current->mm->start_brk = current->mm->brk = end;
50731+ current->mm->start_brk = current->mm->brk = e;
50732 return 0;
50733 }
50734
50735@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
50736 elf_addr_t __user *u_rand_bytes;
50737 const char *k_platform = ELF_PLATFORM;
50738 const char *k_base_platform = ELF_BASE_PLATFORM;
50739- unsigned char k_rand_bytes[16];
50740+ u32 k_rand_bytes[4];
50741 int items;
50742 elf_addr_t *elf_info;
50743 int ei_index = 0;
50744 const struct cred *cred = current_cred();
50745 struct vm_area_struct *vma;
50746+ unsigned long saved_auxv[AT_VECTOR_SIZE];
50747
50748 /*
50749 * In some cases (e.g. Hyper-Threading), we want to avoid L1
50750@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
50751 * Generate 16 random bytes for userspace PRNG seeding.
50752 */
50753 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
50754- u_rand_bytes = (elf_addr_t __user *)
50755- STACK_ALLOC(p, sizeof(k_rand_bytes));
50756+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
50757+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
50758+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
50759+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
50760+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
50761+ u_rand_bytes = (elf_addr_t __user *) p;
50762 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
50763 return -EFAULT;
50764
50765@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
50766 return -EFAULT;
50767 current->mm->env_end = p;
50768
50769+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
50770+
50771 /* Put the elf_info on the stack in the right place. */
50772 sp = (elf_addr_t __user *)envp + 1;
50773- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
50774+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
50775 return -EFAULT;
50776 return 0;
50777 }
50778@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
50779 an ELF header */
50780
50781 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50782- struct file *interpreter, unsigned long *interp_map_addr,
50783- unsigned long no_base)
50784+ struct file *interpreter, unsigned long no_base)
50785 {
50786 struct elf_phdr *elf_phdata;
50787 struct elf_phdr *eppnt;
50788- unsigned long load_addr = 0;
50789+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
50790 int load_addr_set = 0;
50791 unsigned long last_bss = 0, elf_bss = 0;
50792- unsigned long error = ~0UL;
50793+ unsigned long error = -EINVAL;
50794 unsigned long total_size;
50795 int retval, i, size;
50796
50797@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50798 goto out_close;
50799 }
50800
50801+#ifdef CONFIG_PAX_SEGMEXEC
50802+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
50803+ pax_task_size = SEGMEXEC_TASK_SIZE;
50804+#endif
50805+
50806 eppnt = elf_phdata;
50807 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
50808 if (eppnt->p_type == PT_LOAD) {
50809@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50810 map_addr = elf_map(interpreter, load_addr + vaddr,
50811 eppnt, elf_prot, elf_type, total_size);
50812 total_size = 0;
50813- if (!*interp_map_addr)
50814- *interp_map_addr = map_addr;
50815 error = map_addr;
50816 if (BAD_ADDR(map_addr))
50817 goto out_close;
50818@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50819 k = load_addr + eppnt->p_vaddr;
50820 if (BAD_ADDR(k) ||
50821 eppnt->p_filesz > eppnt->p_memsz ||
50822- eppnt->p_memsz > TASK_SIZE ||
50823- TASK_SIZE - eppnt->p_memsz < k) {
50824+ eppnt->p_memsz > pax_task_size ||
50825+ pax_task_size - eppnt->p_memsz < k) {
50826 error = -ENOMEM;
50827 goto out_close;
50828 }
50829@@ -538,6 +567,315 @@ out:
50830 return error;
50831 }
50832
50833+#ifdef CONFIG_PAX_PT_PAX_FLAGS
50834+#ifdef CONFIG_PAX_SOFTMODE
50835+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
50836+{
50837+ unsigned long pax_flags = 0UL;
50838+
50839+#ifdef CONFIG_PAX_PAGEEXEC
50840+ if (elf_phdata->p_flags & PF_PAGEEXEC)
50841+ pax_flags |= MF_PAX_PAGEEXEC;
50842+#endif
50843+
50844+#ifdef CONFIG_PAX_SEGMEXEC
50845+ if (elf_phdata->p_flags & PF_SEGMEXEC)
50846+ pax_flags |= MF_PAX_SEGMEXEC;
50847+#endif
50848+
50849+#ifdef CONFIG_PAX_EMUTRAMP
50850+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
50851+ pax_flags |= MF_PAX_EMUTRAMP;
50852+#endif
50853+
50854+#ifdef CONFIG_PAX_MPROTECT
50855+ if (elf_phdata->p_flags & PF_MPROTECT)
50856+ pax_flags |= MF_PAX_MPROTECT;
50857+#endif
50858+
50859+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50860+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
50861+ pax_flags |= MF_PAX_RANDMMAP;
50862+#endif
50863+
50864+ return pax_flags;
50865+}
50866+#endif
50867+
50868+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
50869+{
50870+ unsigned long pax_flags = 0UL;
50871+
50872+#ifdef CONFIG_PAX_PAGEEXEC
50873+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
50874+ pax_flags |= MF_PAX_PAGEEXEC;
50875+#endif
50876+
50877+#ifdef CONFIG_PAX_SEGMEXEC
50878+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
50879+ pax_flags |= MF_PAX_SEGMEXEC;
50880+#endif
50881+
50882+#ifdef CONFIG_PAX_EMUTRAMP
50883+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
50884+ pax_flags |= MF_PAX_EMUTRAMP;
50885+#endif
50886+
50887+#ifdef CONFIG_PAX_MPROTECT
50888+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
50889+ pax_flags |= MF_PAX_MPROTECT;
50890+#endif
50891+
50892+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50893+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
50894+ pax_flags |= MF_PAX_RANDMMAP;
50895+#endif
50896+
50897+ return pax_flags;
50898+}
50899+#endif
50900+
50901+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
50902+#ifdef CONFIG_PAX_SOFTMODE
50903+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
50904+{
50905+ unsigned long pax_flags = 0UL;
50906+
50907+#ifdef CONFIG_PAX_PAGEEXEC
50908+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
50909+ pax_flags |= MF_PAX_PAGEEXEC;
50910+#endif
50911+
50912+#ifdef CONFIG_PAX_SEGMEXEC
50913+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
50914+ pax_flags |= MF_PAX_SEGMEXEC;
50915+#endif
50916+
50917+#ifdef CONFIG_PAX_EMUTRAMP
50918+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
50919+ pax_flags |= MF_PAX_EMUTRAMP;
50920+#endif
50921+
50922+#ifdef CONFIG_PAX_MPROTECT
50923+ if (pax_flags_softmode & MF_PAX_MPROTECT)
50924+ pax_flags |= MF_PAX_MPROTECT;
50925+#endif
50926+
50927+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50928+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
50929+ pax_flags |= MF_PAX_RANDMMAP;
50930+#endif
50931+
50932+ return pax_flags;
50933+}
50934+#endif
50935+
50936+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
50937+{
50938+ unsigned long pax_flags = 0UL;
50939+
50940+#ifdef CONFIG_PAX_PAGEEXEC
50941+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
50942+ pax_flags |= MF_PAX_PAGEEXEC;
50943+#endif
50944+
50945+#ifdef CONFIG_PAX_SEGMEXEC
50946+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
50947+ pax_flags |= MF_PAX_SEGMEXEC;
50948+#endif
50949+
50950+#ifdef CONFIG_PAX_EMUTRAMP
50951+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
50952+ pax_flags |= MF_PAX_EMUTRAMP;
50953+#endif
50954+
50955+#ifdef CONFIG_PAX_MPROTECT
50956+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
50957+ pax_flags |= MF_PAX_MPROTECT;
50958+#endif
50959+
50960+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50961+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
50962+ pax_flags |= MF_PAX_RANDMMAP;
50963+#endif
50964+
50965+ return pax_flags;
50966+}
50967+#endif
50968+
50969+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50970+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
50971+{
50972+ unsigned long pax_flags = 0UL;
50973+
50974+#ifdef CONFIG_PAX_EI_PAX
50975+
50976+#ifdef CONFIG_PAX_PAGEEXEC
50977+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
50978+ pax_flags |= MF_PAX_PAGEEXEC;
50979+#endif
50980+
50981+#ifdef CONFIG_PAX_SEGMEXEC
50982+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
50983+ pax_flags |= MF_PAX_SEGMEXEC;
50984+#endif
50985+
50986+#ifdef CONFIG_PAX_EMUTRAMP
50987+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
50988+ pax_flags |= MF_PAX_EMUTRAMP;
50989+#endif
50990+
50991+#ifdef CONFIG_PAX_MPROTECT
50992+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
50993+ pax_flags |= MF_PAX_MPROTECT;
50994+#endif
50995+
50996+#ifdef CONFIG_PAX_ASLR
50997+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
50998+ pax_flags |= MF_PAX_RANDMMAP;
50999+#endif
51000+
51001+#else
51002+
51003+#ifdef CONFIG_PAX_PAGEEXEC
51004+ pax_flags |= MF_PAX_PAGEEXEC;
51005+#endif
51006+
51007+#ifdef CONFIG_PAX_SEGMEXEC
51008+ pax_flags |= MF_PAX_SEGMEXEC;
51009+#endif
51010+
51011+#ifdef CONFIG_PAX_MPROTECT
51012+ pax_flags |= MF_PAX_MPROTECT;
51013+#endif
51014+
51015+#ifdef CONFIG_PAX_RANDMMAP
51016+ if (randomize_va_space)
51017+ pax_flags |= MF_PAX_RANDMMAP;
51018+#endif
51019+
51020+#endif
51021+
51022+ return pax_flags;
51023+}
51024+
51025+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
51026+{
51027+
51028+#ifdef CONFIG_PAX_PT_PAX_FLAGS
51029+ unsigned long i;
51030+
51031+ for (i = 0UL; i < elf_ex->e_phnum; i++)
51032+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
51033+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
51034+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
51035+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
51036+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
51037+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
51038+ return ~0UL;
51039+
51040+#ifdef CONFIG_PAX_SOFTMODE
51041+ if (pax_softmode)
51042+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
51043+ else
51044+#endif
51045+
51046+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
51047+ break;
51048+ }
51049+#endif
51050+
51051+ return ~0UL;
51052+}
51053+
51054+static unsigned long pax_parse_xattr_pax(struct file * const file)
51055+{
51056+
51057+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
51058+ ssize_t xattr_size, i;
51059+ unsigned char xattr_value[sizeof("pemrs") - 1];
51060+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
51061+
51062+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
51063+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
51064+ return ~0UL;
51065+
51066+ for (i = 0; i < xattr_size; i++)
51067+ switch (xattr_value[i]) {
51068+ default:
51069+ return ~0UL;
51070+
51071+#define parse_flag(option1, option2, flag) \
51072+ case option1: \
51073+ if (pax_flags_hardmode & MF_PAX_##flag) \
51074+ return ~0UL; \
51075+ pax_flags_hardmode |= MF_PAX_##flag; \
51076+ break; \
51077+ case option2: \
51078+ if (pax_flags_softmode & MF_PAX_##flag) \
51079+ return ~0UL; \
51080+ pax_flags_softmode |= MF_PAX_##flag; \
51081+ break;
51082+
51083+ parse_flag('p', 'P', PAGEEXEC);
51084+ parse_flag('e', 'E', EMUTRAMP);
51085+ parse_flag('m', 'M', MPROTECT);
51086+ parse_flag('r', 'R', RANDMMAP);
51087+ parse_flag('s', 'S', SEGMEXEC);
51088+
51089+#undef parse_flag
51090+ }
51091+
51092+ if (pax_flags_hardmode & pax_flags_softmode)
51093+ return ~0UL;
51094+
51095+#ifdef CONFIG_PAX_SOFTMODE
51096+ if (pax_softmode)
51097+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
51098+ else
51099+#endif
51100+
51101+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
51102+#else
51103+ return ~0UL;
51104+#endif
51105+
51106+}
51107+
51108+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
51109+{
51110+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
51111+
51112+ pax_flags = pax_parse_ei_pax(elf_ex);
51113+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
51114+ xattr_pax_flags = pax_parse_xattr_pax(file);
51115+
51116+ if (pt_pax_flags == ~0UL)
51117+ pt_pax_flags = xattr_pax_flags;
51118+ else if (xattr_pax_flags == ~0UL)
51119+ xattr_pax_flags = pt_pax_flags;
51120+ if (pt_pax_flags != xattr_pax_flags)
51121+ return -EINVAL;
51122+ if (pt_pax_flags != ~0UL)
51123+ pax_flags = pt_pax_flags;
51124+
51125+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
51126+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51127+ if ((__supported_pte_mask & _PAGE_NX))
51128+ pax_flags &= ~MF_PAX_SEGMEXEC;
51129+ else
51130+ pax_flags &= ~MF_PAX_PAGEEXEC;
51131+ }
51132+#endif
51133+
51134+ if (0 > pax_check_flags(&pax_flags))
51135+ return -EINVAL;
51136+
51137+ current->mm->pax_flags = pax_flags;
51138+ return 0;
51139+}
51140+#endif
51141+
51142 /*
51143 * These are the functions used to load ELF style executables and shared
51144 * libraries. There is no binary dependent code anywhere else.
51145@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
51146 {
51147 unsigned int random_variable = 0;
51148
51149+#ifdef CONFIG_PAX_RANDUSTACK
51150+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
51151+ return stack_top - current->mm->delta_stack;
51152+#endif
51153+
51154 if ((current->flags & PF_RANDOMIZE) &&
51155 !(current->personality & ADDR_NO_RANDOMIZE)) {
51156 random_variable = get_random_int() & STACK_RND_MASK;
51157@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
51158 unsigned long load_addr = 0, load_bias = 0;
51159 int load_addr_set = 0;
51160 char * elf_interpreter = NULL;
51161- unsigned long error;
51162+ unsigned long error = 0;
51163 struct elf_phdr *elf_ppnt, *elf_phdata;
51164 unsigned long elf_bss, elf_brk;
51165 int retval, i;
51166@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
51167 unsigned long start_code, end_code, start_data, end_data;
51168 unsigned long reloc_func_desc __maybe_unused = 0;
51169 int executable_stack = EXSTACK_DEFAULT;
51170- unsigned long def_flags = 0;
51171 struct pt_regs *regs = current_pt_regs();
51172 struct {
51173 struct elfhdr elf_ex;
51174 struct elfhdr interp_elf_ex;
51175 } *loc;
51176+ unsigned long pax_task_size = TASK_SIZE;
51177
51178 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
51179 if (!loc) {
51180@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
51181 goto out_free_dentry;
51182
51183 /* OK, This is the point of no return */
51184- current->mm->def_flags = def_flags;
51185+
51186+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51187+ current->mm->pax_flags = 0UL;
51188+#endif
51189+
51190+#ifdef CONFIG_PAX_DLRESOLVE
51191+ current->mm->call_dl_resolve = 0UL;
51192+#endif
51193+
51194+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
51195+ current->mm->call_syscall = 0UL;
51196+#endif
51197+
51198+#ifdef CONFIG_PAX_ASLR
51199+ current->mm->delta_mmap = 0UL;
51200+ current->mm->delta_stack = 0UL;
51201+#endif
51202+
51203+ current->mm->def_flags = 0;
51204+
51205+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51206+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
51207+ send_sig(SIGKILL, current, 0);
51208+ goto out_free_dentry;
51209+ }
51210+#endif
51211+
51212+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
51213+ pax_set_initial_flags(bprm);
51214+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
51215+ if (pax_set_initial_flags_func)
51216+ (pax_set_initial_flags_func)(bprm);
51217+#endif
51218+
51219+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51220+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
51221+ current->mm->context.user_cs_limit = PAGE_SIZE;
51222+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
51223+ }
51224+#endif
51225+
51226+#ifdef CONFIG_PAX_SEGMEXEC
51227+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
51228+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
51229+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
51230+ pax_task_size = SEGMEXEC_TASK_SIZE;
51231+ current->mm->def_flags |= VM_NOHUGEPAGE;
51232+ }
51233+#endif
51234+
51235+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
51236+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51237+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
51238+ put_cpu();
51239+ }
51240+#endif
51241
51242 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
51243 may depend on the personality. */
51244 SET_PERSONALITY(loc->elf_ex);
51245+
51246+#ifdef CONFIG_PAX_ASLR
51247+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
51248+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
51249+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
51250+ }
51251+#endif
51252+
51253+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51254+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51255+ executable_stack = EXSTACK_DISABLE_X;
51256+ current->personality &= ~READ_IMPLIES_EXEC;
51257+ } else
51258+#endif
51259+
51260 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
51261 current->personality |= READ_IMPLIES_EXEC;
51262
51263@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
51264 #else
51265 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
51266 #endif
51267+
51268+#ifdef CONFIG_PAX_RANDMMAP
51269+ /* PaX: randomize base address at the default exe base if requested */
51270+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
51271+#ifdef CONFIG_SPARC64
51272+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
51273+#else
51274+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
51275+#endif
51276+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
51277+ elf_flags |= MAP_FIXED;
51278+ }
51279+#endif
51280+
51281 }
51282
51283 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
51284@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
51285 * allowed task size. Note that p_filesz must always be
51286 * <= p_memsz so it is only necessary to check p_memsz.
51287 */
51288- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
51289- elf_ppnt->p_memsz > TASK_SIZE ||
51290- TASK_SIZE - elf_ppnt->p_memsz < k) {
51291+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
51292+ elf_ppnt->p_memsz > pax_task_size ||
51293+ pax_task_size - elf_ppnt->p_memsz < k) {
51294 /* set_brk can never work. Avoid overflows. */
51295 send_sig(SIGKILL, current, 0);
51296 retval = -EINVAL;
51297@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
51298 goto out_free_dentry;
51299 }
51300 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
51301- send_sig(SIGSEGV, current, 0);
51302- retval = -EFAULT; /* Nobody gets to see this, but.. */
51303- goto out_free_dentry;
51304+ /*
51305+ * This bss-zeroing can fail if the ELF
51306+ * file specifies odd protections. So
51307+ * we don't check the return value
51308+ */
51309 }
51310
51311+#ifdef CONFIG_PAX_RANDMMAP
51312+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
51313+ unsigned long start, size, flags;
51314+ vm_flags_t vm_flags;
51315+
51316+ start = ELF_PAGEALIGN(elf_brk);
51317+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
51318+ flags = MAP_FIXED | MAP_PRIVATE;
51319+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
51320+
51321+ down_write(&current->mm->mmap_sem);
51322+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
51323+ retval = -ENOMEM;
51324+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
51325+// if (current->personality & ADDR_NO_RANDOMIZE)
51326+// vm_flags |= VM_READ | VM_MAYREAD;
51327+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
51328+ retval = IS_ERR_VALUE(start) ? start : 0;
51329+ }
51330+ up_write(&current->mm->mmap_sem);
51331+ if (retval == 0)
51332+ retval = set_brk(start + size, start + size + PAGE_SIZE);
51333+ if (retval < 0) {
51334+ send_sig(SIGKILL, current, 0);
51335+ goto out_free_dentry;
51336+ }
51337+ }
51338+#endif
51339+
51340 if (elf_interpreter) {
51341- unsigned long interp_map_addr = 0;
51342-
51343 elf_entry = load_elf_interp(&loc->interp_elf_ex,
51344 interpreter,
51345- &interp_map_addr,
51346 load_bias);
51347 if (!IS_ERR((void *)elf_entry)) {
51348 /*
51349@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
51350 * Decide what to dump of a segment, part, all or none.
51351 */
51352 static unsigned long vma_dump_size(struct vm_area_struct *vma,
51353- unsigned long mm_flags)
51354+ unsigned long mm_flags, long signr)
51355 {
51356 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
51357
51358@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
51359 if (vma->vm_file == NULL)
51360 return 0;
51361
51362- if (FILTER(MAPPED_PRIVATE))
51363+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
51364 goto whole;
51365
51366 /*
51367@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
51368 {
51369 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
51370 int i = 0;
51371- do
51372+ do {
51373 i += 2;
51374- while (auxv[i - 2] != AT_NULL);
51375+ } while (auxv[i - 2] != AT_NULL);
51376 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
51377 }
51378
51379@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
51380 {
51381 mm_segment_t old_fs = get_fs();
51382 set_fs(KERNEL_DS);
51383- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
51384+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
51385 set_fs(old_fs);
51386 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
51387 }
51388@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
51389 }
51390
51391 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
51392- unsigned long mm_flags)
51393+ struct coredump_params *cprm)
51394 {
51395 struct vm_area_struct *vma;
51396 size_t size = 0;
51397
51398 for (vma = first_vma(current, gate_vma); vma != NULL;
51399 vma = next_vma(vma, gate_vma))
51400- size += vma_dump_size(vma, mm_flags);
51401+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
51402 return size;
51403 }
51404
51405@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51406
51407 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
51408
51409- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
51410+ offset += elf_core_vma_data_size(gate_vma, cprm);
51411 offset += elf_core_extra_data_size();
51412 e_shoff = offset;
51413
51414@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
51415 offset = dataoff;
51416
51417 size += sizeof(*elf);
51418+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51419 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
51420 goto end_coredump;
51421
51422 size += sizeof(*phdr4note);
51423+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51424 if (size > cprm->limit
51425 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
51426 goto end_coredump;
51427@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51428 phdr.p_offset = offset;
51429 phdr.p_vaddr = vma->vm_start;
51430 phdr.p_paddr = 0;
51431- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
51432+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
51433 phdr.p_memsz = vma->vm_end - vma->vm_start;
51434 offset += phdr.p_filesz;
51435 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
51436@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51437 phdr.p_align = ELF_EXEC_PAGESIZE;
51438
51439 size += sizeof(phdr);
51440+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51441 if (size > cprm->limit
51442 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
51443 goto end_coredump;
51444@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51445 unsigned long addr;
51446 unsigned long end;
51447
51448- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
51449+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
51450
51451 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
51452 struct page *page;
51453@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51454 page = get_dump_page(addr);
51455 if (page) {
51456 void *kaddr = kmap(page);
51457+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
51458 stop = ((size += PAGE_SIZE) > cprm->limit) ||
51459 !dump_write(cprm->file, kaddr,
51460 PAGE_SIZE);
51461@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51462
51463 if (e_phnum == PN_XNUM) {
51464 size += sizeof(*shdr4extnum);
51465+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51466 if (size > cprm->limit
51467 || !dump_write(cprm->file, shdr4extnum,
51468 sizeof(*shdr4extnum)))
51469@@ -2231,6 +2691,167 @@ out:
51470
51471 #endif /* CONFIG_ELF_CORE */
51472
51473+#ifdef CONFIG_PAX_MPROTECT
51474+/* PaX: non-PIC ELF libraries need relocations on their executable segments
51475+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
51476+ * we'll remove VM_MAYWRITE for good on RELRO segments.
51477+ *
51478+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
51479+ * basis because we want to allow the common case and not the special ones.
51480+ */
51481+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
51482+{
51483+ struct elfhdr elf_h;
51484+ struct elf_phdr elf_p;
51485+ unsigned long i;
51486+ unsigned long oldflags;
51487+ bool is_textrel_rw, is_textrel_rx, is_relro;
51488+
51489+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
51490+ return;
51491+
51492+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
51493+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
51494+
51495+#ifdef CONFIG_PAX_ELFRELOCS
51496+ /* possible TEXTREL */
51497+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
51498+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
51499+#else
51500+ is_textrel_rw = false;
51501+ is_textrel_rx = false;
51502+#endif
51503+
51504+ /* possible RELRO */
51505+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
51506+
51507+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
51508+ return;
51509+
51510+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
51511+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
51512+
51513+#ifdef CONFIG_PAX_ETEXECRELOCS
51514+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
51515+#else
51516+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
51517+#endif
51518+
51519+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
51520+ !elf_check_arch(&elf_h) ||
51521+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
51522+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
51523+ return;
51524+
51525+ for (i = 0UL; i < elf_h.e_phnum; i++) {
51526+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
51527+ return;
51528+ switch (elf_p.p_type) {
51529+ case PT_DYNAMIC:
51530+ if (!is_textrel_rw && !is_textrel_rx)
51531+ continue;
51532+ i = 0UL;
51533+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
51534+ elf_dyn dyn;
51535+
51536+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
51537+ break;
51538+ if (dyn.d_tag == DT_NULL)
51539+ break;
51540+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
51541+ gr_log_textrel(vma);
51542+ if (is_textrel_rw)
51543+ vma->vm_flags |= VM_MAYWRITE;
51544+ else
51545+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
51546+ vma->vm_flags &= ~VM_MAYWRITE;
51547+ break;
51548+ }
51549+ i++;
51550+ }
51551+ is_textrel_rw = false;
51552+ is_textrel_rx = false;
51553+ continue;
51554+
51555+ case PT_GNU_RELRO:
51556+ if (!is_relro)
51557+ continue;
51558+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
51559+ vma->vm_flags &= ~VM_MAYWRITE;
51560+ is_relro = false;
51561+ continue;
51562+
51563+#ifdef CONFIG_PAX_PT_PAX_FLAGS
51564+ case PT_PAX_FLAGS: {
51565+ const char *msg_mprotect = "", *msg_emutramp = "";
51566+ char *buffer_lib, *buffer_exe;
51567+
51568+ if (elf_p.p_flags & PF_NOMPROTECT)
51569+ msg_mprotect = "MPROTECT disabled";
51570+
51571+#ifdef CONFIG_PAX_EMUTRAMP
51572+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
51573+ msg_emutramp = "EMUTRAMP enabled";
51574+#endif
51575+
51576+ if (!msg_mprotect[0] && !msg_emutramp[0])
51577+ continue;
51578+
51579+ if (!printk_ratelimit())
51580+ continue;
51581+
51582+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
51583+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
51584+ if (buffer_lib && buffer_exe) {
51585+ char *path_lib, *path_exe;
51586+
51587+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
51588+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
51589+
51590+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
51591+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
51592+
51593+ }
51594+ free_page((unsigned long)buffer_exe);
51595+ free_page((unsigned long)buffer_lib);
51596+ continue;
51597+ }
51598+#endif
51599+
51600+ }
51601+ }
51602+}
51603+#endif
51604+
51605+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51606+
51607+extern int grsec_enable_log_rwxmaps;
51608+
51609+static void elf_handle_mmap(struct file *file)
51610+{
51611+ struct elfhdr elf_h;
51612+ struct elf_phdr elf_p;
51613+ unsigned long i;
51614+
51615+ if (!grsec_enable_log_rwxmaps)
51616+ return;
51617+
51618+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
51619+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
51620+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
51621+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
51622+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
51623+ return;
51624+
51625+ for (i = 0UL; i < elf_h.e_phnum; i++) {
51626+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
51627+ return;
51628+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
51629+ gr_log_ptgnustack(file);
51630+ }
51631+}
51632+#endif
51633+
51634 static int __init init_elf_binfmt(void)
51635 {
51636 register_binfmt(&elf_format);
51637diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
51638index d50bbe5..af3b649 100644
51639--- a/fs/binfmt_flat.c
51640+++ b/fs/binfmt_flat.c
51641@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
51642 realdatastart = (unsigned long) -ENOMEM;
51643 printk("Unable to allocate RAM for process data, errno %d\n",
51644 (int)-realdatastart);
51645+ down_write(&current->mm->mmap_sem);
51646 vm_munmap(textpos, text_len);
51647+ up_write(&current->mm->mmap_sem);
51648 ret = realdatastart;
51649 goto err;
51650 }
51651@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
51652 }
51653 if (IS_ERR_VALUE(result)) {
51654 printk("Unable to read data+bss, errno %d\n", (int)-result);
51655+ down_write(&current->mm->mmap_sem);
51656 vm_munmap(textpos, text_len);
51657 vm_munmap(realdatastart, len);
51658+ up_write(&current->mm->mmap_sem);
51659 ret = result;
51660 goto err;
51661 }
51662@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
51663 }
51664 if (IS_ERR_VALUE(result)) {
51665 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
51666+ down_write(&current->mm->mmap_sem);
51667 vm_munmap(textpos, text_len + data_len + extra +
51668 MAX_SHARED_LIBS * sizeof(unsigned long));
51669+ up_write(&current->mm->mmap_sem);
51670 ret = result;
51671 goto err;
51672 }
51673diff --git a/fs/bio.c b/fs/bio.c
51674index 94bbc04..6fe78a4 100644
51675--- a/fs/bio.c
51676+++ b/fs/bio.c
51677@@ -1096,7 +1096,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
51678 /*
51679 * Overflow, abort
51680 */
51681- if (end < start)
51682+ if (end < start || end - start > INT_MAX - nr_pages)
51683 return ERR_PTR(-EINVAL);
51684
51685 nr_pages += end - start;
51686@@ -1230,7 +1230,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
51687 /*
51688 * Overflow, abort
51689 */
51690- if (end < start)
51691+ if (end < start || end - start > INT_MAX - nr_pages)
51692 return ERR_PTR(-EINVAL);
51693
51694 nr_pages += end - start;
51695@@ -1492,7 +1492,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
51696 const int read = bio_data_dir(bio) == READ;
51697 struct bio_map_data *bmd = bio->bi_private;
51698 int i;
51699- char *p = bmd->sgvecs[0].iov_base;
51700+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
51701
51702 bio_for_each_segment_all(bvec, bio, i) {
51703 char *addr = page_address(bvec->bv_page);
51704diff --git a/fs/block_dev.c b/fs/block_dev.c
51705index 85f5c85..d6f0b1a 100644
51706--- a/fs/block_dev.c
51707+++ b/fs/block_dev.c
51708@@ -658,7 +658,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
51709 else if (bdev->bd_contains == bdev)
51710 return true; /* is a whole device which isn't held */
51711
51712- else if (whole->bd_holder == bd_may_claim)
51713+ else if (whole->bd_holder == (void *)bd_may_claim)
51714 return true; /* is a partition of a device that is being partitioned */
51715 else if (whole->bd_holder != NULL)
51716 return false; /* is a partition of a held device */
51717diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
51718index 7fb054b..ad36c67 100644
51719--- a/fs/btrfs/ctree.c
51720+++ b/fs/btrfs/ctree.c
51721@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
51722 free_extent_buffer(buf);
51723 add_root_to_dirty_list(root);
51724 } else {
51725- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
51726- parent_start = parent->start;
51727- else
51728+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
51729+ if (parent)
51730+ parent_start = parent->start;
51731+ else
51732+ parent_start = 0;
51733+ } else
51734 parent_start = 0;
51735
51736 WARN_ON(trans->transid != btrfs_header_generation(parent));
51737diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
51738index 0f81d67..0ad55fe 100644
51739--- a/fs/btrfs/ioctl.c
51740+++ b/fs/btrfs/ioctl.c
51741@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
51742 for (i = 0; i < num_types; i++) {
51743 struct btrfs_space_info *tmp;
51744
51745+ /* Don't copy in more than we allocated */
51746 if (!slot_count)
51747 break;
51748
51749+ slot_count--;
51750+
51751 info = NULL;
51752 rcu_read_lock();
51753 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
51754@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
51755 memcpy(dest, &space, sizeof(space));
51756 dest++;
51757 space_args.total_spaces++;
51758- slot_count--;
51759 }
51760- if (!slot_count)
51761- break;
51762 }
51763 up_read(&info->groups_sem);
51764 }
51765diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
51766index f0857e0..e7023c5 100644
51767--- a/fs/btrfs/super.c
51768+++ b/fs/btrfs/super.c
51769@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
51770 function, line, errstr);
51771 return;
51772 }
51773- ACCESS_ONCE(trans->transaction->aborted) = errno;
51774+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
51775 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
51776 }
51777 /*
51778diff --git a/fs/buffer.c b/fs/buffer.c
51779index d2a4d1b..df798ca 100644
51780--- a/fs/buffer.c
51781+++ b/fs/buffer.c
51782@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
51783 bh_cachep = kmem_cache_create("buffer_head",
51784 sizeof(struct buffer_head), 0,
51785 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
51786- SLAB_MEM_SPREAD),
51787+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
51788 NULL);
51789
51790 /*
51791diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
51792index 622f469..e8d2d55 100644
51793--- a/fs/cachefiles/bind.c
51794+++ b/fs/cachefiles/bind.c
51795@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
51796 args);
51797
51798 /* start by checking things over */
51799- ASSERT(cache->fstop_percent >= 0 &&
51800- cache->fstop_percent < cache->fcull_percent &&
51801+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
51802 cache->fcull_percent < cache->frun_percent &&
51803 cache->frun_percent < 100);
51804
51805- ASSERT(cache->bstop_percent >= 0 &&
51806- cache->bstop_percent < cache->bcull_percent &&
51807+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
51808 cache->bcull_percent < cache->brun_percent &&
51809 cache->brun_percent < 100);
51810
51811diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
51812index 0a1467b..6a53245 100644
51813--- a/fs/cachefiles/daemon.c
51814+++ b/fs/cachefiles/daemon.c
51815@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
51816 if (n > buflen)
51817 return -EMSGSIZE;
51818
51819- if (copy_to_user(_buffer, buffer, n) != 0)
51820+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
51821 return -EFAULT;
51822
51823 return n;
51824@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
51825 if (test_bit(CACHEFILES_DEAD, &cache->flags))
51826 return -EIO;
51827
51828- if (datalen < 0 || datalen > PAGE_SIZE - 1)
51829+ if (datalen > PAGE_SIZE - 1)
51830 return -EOPNOTSUPP;
51831
51832 /* drag the command string into the kernel so we can parse it */
51833@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
51834 if (args[0] != '%' || args[1] != '\0')
51835 return -EINVAL;
51836
51837- if (fstop < 0 || fstop >= cache->fcull_percent)
51838+ if (fstop >= cache->fcull_percent)
51839 return cachefiles_daemon_range_error(cache, args);
51840
51841 cache->fstop_percent = fstop;
51842@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
51843 if (args[0] != '%' || args[1] != '\0')
51844 return -EINVAL;
51845
51846- if (bstop < 0 || bstop >= cache->bcull_percent)
51847+ if (bstop >= cache->bcull_percent)
51848 return cachefiles_daemon_range_error(cache, args);
51849
51850 cache->bstop_percent = bstop;
51851diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
51852index 4938251..7e01445 100644
51853--- a/fs/cachefiles/internal.h
51854+++ b/fs/cachefiles/internal.h
51855@@ -59,7 +59,7 @@ struct cachefiles_cache {
51856 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
51857 struct rb_root active_nodes; /* active nodes (can't be culled) */
51858 rwlock_t active_lock; /* lock for active_nodes */
51859- atomic_t gravecounter; /* graveyard uniquifier */
51860+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
51861 unsigned frun_percent; /* when to stop culling (% files) */
51862 unsigned fcull_percent; /* when to start culling (% files) */
51863 unsigned fstop_percent; /* when to stop allocating (% files) */
51864@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
51865 * proc.c
51866 */
51867 #ifdef CONFIG_CACHEFILES_HISTOGRAM
51868-extern atomic_t cachefiles_lookup_histogram[HZ];
51869-extern atomic_t cachefiles_mkdir_histogram[HZ];
51870-extern atomic_t cachefiles_create_histogram[HZ];
51871+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
51872+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
51873+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
51874
51875 extern int __init cachefiles_proc_init(void);
51876 extern void cachefiles_proc_cleanup(void);
51877 static inline
51878-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
51879+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
51880 {
51881 unsigned long jif = jiffies - start_jif;
51882 if (jif >= HZ)
51883 jif = HZ - 1;
51884- atomic_inc(&histogram[jif]);
51885+ atomic_inc_unchecked(&histogram[jif]);
51886 }
51887
51888 #else
51889diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
51890index 8c01c5fc..15f982e 100644
51891--- a/fs/cachefiles/namei.c
51892+++ b/fs/cachefiles/namei.c
51893@@ -317,7 +317,7 @@ try_again:
51894 /* first step is to make up a grave dentry in the graveyard */
51895 sprintf(nbuffer, "%08x%08x",
51896 (uint32_t) get_seconds(),
51897- (uint32_t) atomic_inc_return(&cache->gravecounter));
51898+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
51899
51900 /* do the multiway lock magic */
51901 trap = lock_rename(cache->graveyard, dir);
51902diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
51903index eccd339..4c1d995 100644
51904--- a/fs/cachefiles/proc.c
51905+++ b/fs/cachefiles/proc.c
51906@@ -14,9 +14,9 @@
51907 #include <linux/seq_file.h>
51908 #include "internal.h"
51909
51910-atomic_t cachefiles_lookup_histogram[HZ];
51911-atomic_t cachefiles_mkdir_histogram[HZ];
51912-atomic_t cachefiles_create_histogram[HZ];
51913+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
51914+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
51915+atomic_unchecked_t cachefiles_create_histogram[HZ];
51916
51917 /*
51918 * display the latency histogram
51919@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
51920 return 0;
51921 default:
51922 index = (unsigned long) v - 3;
51923- x = atomic_read(&cachefiles_lookup_histogram[index]);
51924- y = atomic_read(&cachefiles_mkdir_histogram[index]);
51925- z = atomic_read(&cachefiles_create_histogram[index]);
51926+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
51927+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
51928+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
51929 if (x == 0 && y == 0 && z == 0)
51930 return 0;
51931
51932diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
51933index 317f9ee..3d24511 100644
51934--- a/fs/cachefiles/rdwr.c
51935+++ b/fs/cachefiles/rdwr.c
51936@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
51937 old_fs = get_fs();
51938 set_fs(KERNEL_DS);
51939 ret = file->f_op->write(
51940- file, (const void __user *) data, len, &pos);
51941+ file, (const void __force_user *) data, len, &pos);
51942 set_fs(old_fs);
51943 kunmap(page);
51944 file_end_write(file);
51945diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
51946index f02d82b..2632cf86 100644
51947--- a/fs/ceph/dir.c
51948+++ b/fs/ceph/dir.c
51949@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
51950 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
51951 struct ceph_mds_client *mdsc = fsc->mdsc;
51952 unsigned frag = fpos_frag(filp->f_pos);
51953- int off = fpos_off(filp->f_pos);
51954+ unsigned int off = fpos_off(filp->f_pos);
51955 int err;
51956 u32 ftype;
51957 struct ceph_mds_reply_info_parsed *rinfo;
51958diff --git a/fs/ceph/super.c b/fs/ceph/super.c
51959index 7d377c9..3fb6559 100644
51960--- a/fs/ceph/super.c
51961+++ b/fs/ceph/super.c
51962@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
51963 /*
51964 * construct our own bdi so we can control readahead, etc.
51965 */
51966-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
51967+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
51968
51969 static int ceph_register_bdi(struct super_block *sb,
51970 struct ceph_fs_client *fsc)
51971@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
51972 default_backing_dev_info.ra_pages;
51973
51974 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
51975- atomic_long_inc_return(&bdi_seq));
51976+ atomic_long_inc_return_unchecked(&bdi_seq));
51977 if (!err)
51978 sb->s_bdi = &fsc->backing_dev_info;
51979 return err;
51980diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
51981index d597483..747901b 100644
51982--- a/fs/cifs/cifs_debug.c
51983+++ b/fs/cifs/cifs_debug.c
51984@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
51985
51986 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
51987 #ifdef CONFIG_CIFS_STATS2
51988- atomic_set(&totBufAllocCount, 0);
51989- atomic_set(&totSmBufAllocCount, 0);
51990+ atomic_set_unchecked(&totBufAllocCount, 0);
51991+ atomic_set_unchecked(&totSmBufAllocCount, 0);
51992 #endif /* CONFIG_CIFS_STATS2 */
51993 spin_lock(&cifs_tcp_ses_lock);
51994 list_for_each(tmp1, &cifs_tcp_ses_list) {
51995@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
51996 tcon = list_entry(tmp3,
51997 struct cifs_tcon,
51998 tcon_list);
51999- atomic_set(&tcon->num_smbs_sent, 0);
52000+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
52001 if (server->ops->clear_stats)
52002 server->ops->clear_stats(tcon);
52003 }
52004@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
52005 smBufAllocCount.counter, cifs_min_small);
52006 #ifdef CONFIG_CIFS_STATS2
52007 seq_printf(m, "Total Large %d Small %d Allocations\n",
52008- atomic_read(&totBufAllocCount),
52009- atomic_read(&totSmBufAllocCount));
52010+ atomic_read_unchecked(&totBufAllocCount),
52011+ atomic_read_unchecked(&totSmBufAllocCount));
52012 #endif /* CONFIG_CIFS_STATS2 */
52013
52014 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
52015@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
52016 if (tcon->need_reconnect)
52017 seq_puts(m, "\tDISCONNECTED ");
52018 seq_printf(m, "\nSMBs: %d",
52019- atomic_read(&tcon->num_smbs_sent));
52020+ atomic_read_unchecked(&tcon->num_smbs_sent));
52021 if (server->ops->print_stats)
52022 server->ops->print_stats(m, tcon);
52023 }
52024diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
52025index 3752b9f..8db5569 100644
52026--- a/fs/cifs/cifsfs.c
52027+++ b/fs/cifs/cifsfs.c
52028@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
52029 */
52030 cifs_req_cachep = kmem_cache_create("cifs_request",
52031 CIFSMaxBufSize + max_hdr_size, 0,
52032- SLAB_HWCACHE_ALIGN, NULL);
52033+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
52034 if (cifs_req_cachep == NULL)
52035 return -ENOMEM;
52036
52037@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
52038 efficient to alloc 1 per page off the slab compared to 17K (5page)
52039 alloc of large cifs buffers even when page debugging is on */
52040 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
52041- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
52042+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
52043 NULL);
52044 if (cifs_sm_req_cachep == NULL) {
52045 mempool_destroy(cifs_req_poolp);
52046@@ -1147,8 +1147,8 @@ init_cifs(void)
52047 atomic_set(&bufAllocCount, 0);
52048 atomic_set(&smBufAllocCount, 0);
52049 #ifdef CONFIG_CIFS_STATS2
52050- atomic_set(&totBufAllocCount, 0);
52051- atomic_set(&totSmBufAllocCount, 0);
52052+ atomic_set_unchecked(&totBufAllocCount, 0);
52053+ atomic_set_unchecked(&totSmBufAllocCount, 0);
52054 #endif /* CONFIG_CIFS_STATS2 */
52055
52056 atomic_set(&midCount, 0);
52057diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
52058index ea3a0b3..0194e39 100644
52059--- a/fs/cifs/cifsglob.h
52060+++ b/fs/cifs/cifsglob.h
52061@@ -752,35 +752,35 @@ struct cifs_tcon {
52062 __u16 Flags; /* optional support bits */
52063 enum statusEnum tidStatus;
52064 #ifdef CONFIG_CIFS_STATS
52065- atomic_t num_smbs_sent;
52066+ atomic_unchecked_t num_smbs_sent;
52067 union {
52068 struct {
52069- atomic_t num_writes;
52070- atomic_t num_reads;
52071- atomic_t num_flushes;
52072- atomic_t num_oplock_brks;
52073- atomic_t num_opens;
52074- atomic_t num_closes;
52075- atomic_t num_deletes;
52076- atomic_t num_mkdirs;
52077- atomic_t num_posixopens;
52078- atomic_t num_posixmkdirs;
52079- atomic_t num_rmdirs;
52080- atomic_t num_renames;
52081- atomic_t num_t2renames;
52082- atomic_t num_ffirst;
52083- atomic_t num_fnext;
52084- atomic_t num_fclose;
52085- atomic_t num_hardlinks;
52086- atomic_t num_symlinks;
52087- atomic_t num_locks;
52088- atomic_t num_acl_get;
52089- atomic_t num_acl_set;
52090+ atomic_unchecked_t num_writes;
52091+ atomic_unchecked_t num_reads;
52092+ atomic_unchecked_t num_flushes;
52093+ atomic_unchecked_t num_oplock_brks;
52094+ atomic_unchecked_t num_opens;
52095+ atomic_unchecked_t num_closes;
52096+ atomic_unchecked_t num_deletes;
52097+ atomic_unchecked_t num_mkdirs;
52098+ atomic_unchecked_t num_posixopens;
52099+ atomic_unchecked_t num_posixmkdirs;
52100+ atomic_unchecked_t num_rmdirs;
52101+ atomic_unchecked_t num_renames;
52102+ atomic_unchecked_t num_t2renames;
52103+ atomic_unchecked_t num_ffirst;
52104+ atomic_unchecked_t num_fnext;
52105+ atomic_unchecked_t num_fclose;
52106+ atomic_unchecked_t num_hardlinks;
52107+ atomic_unchecked_t num_symlinks;
52108+ atomic_unchecked_t num_locks;
52109+ atomic_unchecked_t num_acl_get;
52110+ atomic_unchecked_t num_acl_set;
52111 } cifs_stats;
52112 #ifdef CONFIG_CIFS_SMB2
52113 struct {
52114- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
52115- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
52116+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
52117+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
52118 } smb2_stats;
52119 #endif /* CONFIG_CIFS_SMB2 */
52120 } stats;
52121@@ -1081,7 +1081,7 @@ convert_delimiter(char *path, char delim)
52122 }
52123
52124 #ifdef CONFIG_CIFS_STATS
52125-#define cifs_stats_inc atomic_inc
52126+#define cifs_stats_inc atomic_inc_unchecked
52127
52128 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
52129 unsigned int bytes)
52130@@ -1446,8 +1446,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
52131 /* Various Debug counters */
52132 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
52133 #ifdef CONFIG_CIFS_STATS2
52134-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
52135-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
52136+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
52137+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
52138 #endif
52139 GLOBAL_EXTERN atomic_t smBufAllocCount;
52140 GLOBAL_EXTERN atomic_t midCount;
52141diff --git a/fs/cifs/link.c b/fs/cifs/link.c
52142index b83c3f5..6437caa 100644
52143--- a/fs/cifs/link.c
52144+++ b/fs/cifs/link.c
52145@@ -616,7 +616,7 @@ symlink_exit:
52146
52147 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
52148 {
52149- char *p = nd_get_link(nd);
52150+ const char *p = nd_get_link(nd);
52151 if (!IS_ERR(p))
52152 kfree(p);
52153 }
52154diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
52155index 1bec014..f329411 100644
52156--- a/fs/cifs/misc.c
52157+++ b/fs/cifs/misc.c
52158@@ -169,7 +169,7 @@ cifs_buf_get(void)
52159 memset(ret_buf, 0, buf_size + 3);
52160 atomic_inc(&bufAllocCount);
52161 #ifdef CONFIG_CIFS_STATS2
52162- atomic_inc(&totBufAllocCount);
52163+ atomic_inc_unchecked(&totBufAllocCount);
52164 #endif /* CONFIG_CIFS_STATS2 */
52165 }
52166
52167@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
52168 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
52169 atomic_inc(&smBufAllocCount);
52170 #ifdef CONFIG_CIFS_STATS2
52171- atomic_inc(&totSmBufAllocCount);
52172+ atomic_inc_unchecked(&totSmBufAllocCount);
52173 #endif /* CONFIG_CIFS_STATS2 */
52174
52175 }
52176diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
52177index 3efdb9d..e845a5e 100644
52178--- a/fs/cifs/smb1ops.c
52179+++ b/fs/cifs/smb1ops.c
52180@@ -591,27 +591,27 @@ static void
52181 cifs_clear_stats(struct cifs_tcon *tcon)
52182 {
52183 #ifdef CONFIG_CIFS_STATS
52184- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
52185- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
52186- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
52187- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
52188- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
52189- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
52190- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
52191- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
52192- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
52193- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
52194- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
52195- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
52196- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
52197- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
52198- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
52199- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
52200- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
52201- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
52202- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
52203- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
52204- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
52205+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
52206+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
52207+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
52208+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
52209+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
52210+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
52211+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
52212+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
52213+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
52214+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
52215+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
52216+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
52217+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
52218+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
52219+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
52220+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
52221+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
52222+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
52223+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
52224+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
52225+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
52226 #endif
52227 }
52228
52229@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
52230 {
52231 #ifdef CONFIG_CIFS_STATS
52232 seq_printf(m, " Oplocks breaks: %d",
52233- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
52234+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
52235 seq_printf(m, "\nReads: %d Bytes: %llu",
52236- atomic_read(&tcon->stats.cifs_stats.num_reads),
52237+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
52238 (long long)(tcon->bytes_read));
52239 seq_printf(m, "\nWrites: %d Bytes: %llu",
52240- atomic_read(&tcon->stats.cifs_stats.num_writes),
52241+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
52242 (long long)(tcon->bytes_written));
52243 seq_printf(m, "\nFlushes: %d",
52244- atomic_read(&tcon->stats.cifs_stats.num_flushes));
52245+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
52246 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
52247- atomic_read(&tcon->stats.cifs_stats.num_locks),
52248- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
52249- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
52250+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
52251+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
52252+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
52253 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
52254- atomic_read(&tcon->stats.cifs_stats.num_opens),
52255- atomic_read(&tcon->stats.cifs_stats.num_closes),
52256- atomic_read(&tcon->stats.cifs_stats.num_deletes));
52257+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
52258+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
52259+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
52260 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
52261- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
52262- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
52263+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
52264+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
52265 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
52266- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
52267- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
52268+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
52269+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
52270 seq_printf(m, "\nRenames: %d T2 Renames %d",
52271- atomic_read(&tcon->stats.cifs_stats.num_renames),
52272- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
52273+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
52274+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
52275 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
52276- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
52277- atomic_read(&tcon->stats.cifs_stats.num_fnext),
52278- atomic_read(&tcon->stats.cifs_stats.num_fclose));
52279+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
52280+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
52281+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
52282 #endif
52283 }
52284
52285diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
52286index f2e76f3..c44fac7 100644
52287--- a/fs/cifs/smb2ops.c
52288+++ b/fs/cifs/smb2ops.c
52289@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
52290 #ifdef CONFIG_CIFS_STATS
52291 int i;
52292 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
52293- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
52294- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
52295+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
52296+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
52297 }
52298 #endif
52299 }
52300@@ -284,66 +284,66 @@ static void
52301 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
52302 {
52303 #ifdef CONFIG_CIFS_STATS
52304- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
52305- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
52306+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
52307+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
52308 seq_printf(m, "\nNegotiates: %d sent %d failed",
52309- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
52310- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
52311+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
52312+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
52313 seq_printf(m, "\nSessionSetups: %d sent %d failed",
52314- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
52315- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
52316+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
52317+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
52318 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
52319 seq_printf(m, "\nLogoffs: %d sent %d failed",
52320- atomic_read(&sent[SMB2_LOGOFF_HE]),
52321- atomic_read(&failed[SMB2_LOGOFF_HE]));
52322+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
52323+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
52324 seq_printf(m, "\nTreeConnects: %d sent %d failed",
52325- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
52326- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
52327+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
52328+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
52329 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
52330- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
52331- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
52332+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
52333+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
52334 seq_printf(m, "\nCreates: %d sent %d failed",
52335- atomic_read(&sent[SMB2_CREATE_HE]),
52336- atomic_read(&failed[SMB2_CREATE_HE]));
52337+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
52338+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
52339 seq_printf(m, "\nCloses: %d sent %d failed",
52340- atomic_read(&sent[SMB2_CLOSE_HE]),
52341- atomic_read(&failed[SMB2_CLOSE_HE]));
52342+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
52343+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
52344 seq_printf(m, "\nFlushes: %d sent %d failed",
52345- atomic_read(&sent[SMB2_FLUSH_HE]),
52346- atomic_read(&failed[SMB2_FLUSH_HE]));
52347+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
52348+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
52349 seq_printf(m, "\nReads: %d sent %d failed",
52350- atomic_read(&sent[SMB2_READ_HE]),
52351- atomic_read(&failed[SMB2_READ_HE]));
52352+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
52353+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
52354 seq_printf(m, "\nWrites: %d sent %d failed",
52355- atomic_read(&sent[SMB2_WRITE_HE]),
52356- atomic_read(&failed[SMB2_WRITE_HE]));
52357+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
52358+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
52359 seq_printf(m, "\nLocks: %d sent %d failed",
52360- atomic_read(&sent[SMB2_LOCK_HE]),
52361- atomic_read(&failed[SMB2_LOCK_HE]));
52362+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
52363+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
52364 seq_printf(m, "\nIOCTLs: %d sent %d failed",
52365- atomic_read(&sent[SMB2_IOCTL_HE]),
52366- atomic_read(&failed[SMB2_IOCTL_HE]));
52367+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
52368+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
52369 seq_printf(m, "\nCancels: %d sent %d failed",
52370- atomic_read(&sent[SMB2_CANCEL_HE]),
52371- atomic_read(&failed[SMB2_CANCEL_HE]));
52372+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
52373+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
52374 seq_printf(m, "\nEchos: %d sent %d failed",
52375- atomic_read(&sent[SMB2_ECHO_HE]),
52376- atomic_read(&failed[SMB2_ECHO_HE]));
52377+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
52378+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
52379 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
52380- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
52381- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
52382+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
52383+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
52384 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
52385- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
52386- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
52387+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
52388+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
52389 seq_printf(m, "\nQueryInfos: %d sent %d failed",
52390- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
52391- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
52392+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
52393+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
52394 seq_printf(m, "\nSetInfos: %d sent %d failed",
52395- atomic_read(&sent[SMB2_SET_INFO_HE]),
52396- atomic_read(&failed[SMB2_SET_INFO_HE]));
52397+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
52398+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
52399 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
52400- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
52401- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
52402+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
52403+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
52404 #endif
52405 }
52406
52407diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
52408index 2b95ce2..d079d75 100644
52409--- a/fs/cifs/smb2pdu.c
52410+++ b/fs/cifs/smb2pdu.c
52411@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
52412 default:
52413 cifs_dbg(VFS, "info level %u isn't supported\n",
52414 srch_inf->info_level);
52415- rc = -EINVAL;
52416- goto qdir_exit;
52417+ return -EINVAL;
52418 }
52419
52420 req->FileIndex = cpu_to_le32(index);
52421diff --git a/fs/coda/cache.c b/fs/coda/cache.c
52422index 1da168c..8bc7ff6 100644
52423--- a/fs/coda/cache.c
52424+++ b/fs/coda/cache.c
52425@@ -24,7 +24,7 @@
52426 #include "coda_linux.h"
52427 #include "coda_cache.h"
52428
52429-static atomic_t permission_epoch = ATOMIC_INIT(0);
52430+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
52431
52432 /* replace or extend an acl cache hit */
52433 void coda_cache_enter(struct inode *inode, int mask)
52434@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
52435 struct coda_inode_info *cii = ITOC(inode);
52436
52437 spin_lock(&cii->c_lock);
52438- cii->c_cached_epoch = atomic_read(&permission_epoch);
52439+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
52440 if (!uid_eq(cii->c_uid, current_fsuid())) {
52441 cii->c_uid = current_fsuid();
52442 cii->c_cached_perm = mask;
52443@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
52444 {
52445 struct coda_inode_info *cii = ITOC(inode);
52446 spin_lock(&cii->c_lock);
52447- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
52448+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
52449 spin_unlock(&cii->c_lock);
52450 }
52451
52452 /* remove all acl caches */
52453 void coda_cache_clear_all(struct super_block *sb)
52454 {
52455- atomic_inc(&permission_epoch);
52456+ atomic_inc_unchecked(&permission_epoch);
52457 }
52458
52459
52460@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
52461 spin_lock(&cii->c_lock);
52462 hit = (mask & cii->c_cached_perm) == mask &&
52463 uid_eq(cii->c_uid, current_fsuid()) &&
52464- cii->c_cached_epoch == atomic_read(&permission_epoch);
52465+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
52466 spin_unlock(&cii->c_lock);
52467
52468 return hit;
52469diff --git a/fs/compat.c b/fs/compat.c
52470index fc3b55d..7b568ae 100644
52471--- a/fs/compat.c
52472+++ b/fs/compat.c
52473@@ -54,7 +54,7 @@
52474 #include <asm/ioctls.h>
52475 #include "internal.h"
52476
52477-int compat_log = 1;
52478+int compat_log = 0;
52479
52480 int compat_printk(const char *fmt, ...)
52481 {
52482@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
52483
52484 set_fs(KERNEL_DS);
52485 /* The __user pointer cast is valid because of the set_fs() */
52486- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
52487+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
52488 set_fs(oldfs);
52489 /* truncating is ok because it's a user address */
52490 if (!ret)
52491@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
52492 goto out;
52493
52494 ret = -EINVAL;
52495- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
52496+ if (nr_segs > UIO_MAXIOV)
52497 goto out;
52498 if (nr_segs > fast_segs) {
52499 ret = -ENOMEM;
52500@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
52501
52502 struct compat_readdir_callback {
52503 struct compat_old_linux_dirent __user *dirent;
52504+ struct file * file;
52505 int result;
52506 };
52507
52508@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
52509 buf->result = -EOVERFLOW;
52510 return -EOVERFLOW;
52511 }
52512+
52513+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52514+ return 0;
52515+
52516 buf->result++;
52517 dirent = buf->dirent;
52518 if (!access_ok(VERIFY_WRITE, dirent,
52519@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
52520
52521 buf.result = 0;
52522 buf.dirent = dirent;
52523+ buf.file = f.file;
52524
52525 error = vfs_readdir(f.file, compat_fillonedir, &buf);
52526 if (buf.result)
52527@@ -899,6 +905,7 @@ struct compat_linux_dirent {
52528 struct compat_getdents_callback {
52529 struct compat_linux_dirent __user *current_dir;
52530 struct compat_linux_dirent __user *previous;
52531+ struct file * file;
52532 int count;
52533 int error;
52534 };
52535@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
52536 buf->error = -EOVERFLOW;
52537 return -EOVERFLOW;
52538 }
52539+
52540+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52541+ return 0;
52542+
52543 dirent = buf->previous;
52544 if (dirent) {
52545 if (__put_user(offset, &dirent->d_off))
52546@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
52547 buf.previous = NULL;
52548 buf.count = count;
52549 buf.error = 0;
52550+ buf.file = f.file;
52551
52552 error = vfs_readdir(f.file, compat_filldir, &buf);
52553 if (error >= 0)
52554@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
52555 struct compat_getdents_callback64 {
52556 struct linux_dirent64 __user *current_dir;
52557 struct linux_dirent64 __user *previous;
52558+ struct file * file;
52559 int count;
52560 int error;
52561 };
52562@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
52563 buf->error = -EINVAL; /* only used if we fail.. */
52564 if (reclen > buf->count)
52565 return -EINVAL;
52566+
52567+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52568+ return 0;
52569+
52570 dirent = buf->previous;
52571
52572 if (dirent) {
52573@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
52574 buf.previous = NULL;
52575 buf.count = count;
52576 buf.error = 0;
52577+ buf.file = f.file;
52578
52579 error = vfs_readdir(f.file, compat_filldir64, &buf);
52580 if (error >= 0)
52581 error = buf.error;
52582 lastdirent = buf.previous;
52583 if (lastdirent) {
52584- typeof(lastdirent->d_off) d_off = f.file->f_pos;
52585+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
52586 if (__put_user_unaligned(d_off, &lastdirent->d_off))
52587 error = -EFAULT;
52588 else
52589diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
52590index a81147e..20bf2b5 100644
52591--- a/fs/compat_binfmt_elf.c
52592+++ b/fs/compat_binfmt_elf.c
52593@@ -30,11 +30,13 @@
52594 #undef elf_phdr
52595 #undef elf_shdr
52596 #undef elf_note
52597+#undef elf_dyn
52598 #undef elf_addr_t
52599 #define elfhdr elf32_hdr
52600 #define elf_phdr elf32_phdr
52601 #define elf_shdr elf32_shdr
52602 #define elf_note elf32_note
52603+#define elf_dyn Elf32_Dyn
52604 #define elf_addr_t Elf32_Addr
52605
52606 /*
52607diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
52608index 996cdc5..15e2f33 100644
52609--- a/fs/compat_ioctl.c
52610+++ b/fs/compat_ioctl.c
52611@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
52612 return -EFAULT;
52613 if (__get_user(udata, &ss32->iomem_base))
52614 return -EFAULT;
52615- ss.iomem_base = compat_ptr(udata);
52616+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
52617 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
52618 __get_user(ss.port_high, &ss32->port_high))
52619 return -EFAULT;
52620@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
52621 for (i = 0; i < nmsgs; i++) {
52622 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
52623 return -EFAULT;
52624- if (get_user(datap, &umsgs[i].buf) ||
52625- put_user(compat_ptr(datap), &tmsgs[i].buf))
52626+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
52627+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
52628 return -EFAULT;
52629 }
52630 return sys_ioctl(fd, cmd, (unsigned long)tdata);
52631@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
52632 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
52633 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
52634 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
52635- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
52636+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
52637 return -EFAULT;
52638
52639 return ioctl_preallocate(file, p);
52640@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
52641 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
52642 {
52643 unsigned int a, b;
52644- a = *(unsigned int *)p;
52645- b = *(unsigned int *)q;
52646+ a = *(const unsigned int *)p;
52647+ b = *(const unsigned int *)q;
52648 if (a > b)
52649 return 1;
52650 if (a < b)
52651diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
52652index 7aabc6a..34c1197 100644
52653--- a/fs/configfs/dir.c
52654+++ b/fs/configfs/dir.c
52655@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
52656 }
52657 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
52658 struct configfs_dirent *next;
52659- const char * name;
52660+ const unsigned char * name;
52661+ char d_name[sizeof(next->s_dentry->d_iname)];
52662 int len;
52663 struct inode *inode = NULL;
52664
52665@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
52666 continue;
52667
52668 name = configfs_get_name(next);
52669- len = strlen(name);
52670+ if (next->s_dentry && name == next->s_dentry->d_iname) {
52671+ len = next->s_dentry->d_name.len;
52672+ memcpy(d_name, name, len);
52673+ name = d_name;
52674+ } else
52675+ len = strlen(name);
52676
52677 /*
52678 * We'll have a dentry and an inode for
52679diff --git a/fs/coredump.c b/fs/coredump.c
52680index dafafba..10b3b27 100644
52681--- a/fs/coredump.c
52682+++ b/fs/coredump.c
52683@@ -52,7 +52,7 @@ struct core_name {
52684 char *corename;
52685 int used, size;
52686 };
52687-static atomic_t call_count = ATOMIC_INIT(1);
52688+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
52689
52690 /* The maximal length of core_pattern is also specified in sysctl.c */
52691
52692@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
52693 {
52694 char *old_corename = cn->corename;
52695
52696- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
52697+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
52698 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
52699
52700 if (!cn->corename) {
52701@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
52702 int pid_in_pattern = 0;
52703 int err = 0;
52704
52705- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
52706+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
52707 cn->corename = kmalloc(cn->size, GFP_KERNEL);
52708 cn->used = 0;
52709
52710@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
52711 struct pipe_inode_info *pipe = file->private_data;
52712
52713 pipe_lock(pipe);
52714- pipe->readers++;
52715- pipe->writers--;
52716+ atomic_inc(&pipe->readers);
52717+ atomic_dec(&pipe->writers);
52718 wake_up_interruptible_sync(&pipe->wait);
52719 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52720 pipe_unlock(pipe);
52721@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
52722 * We actually want wait_event_freezable() but then we need
52723 * to clear TIF_SIGPENDING and improve dump_interrupted().
52724 */
52725- wait_event_interruptible(pipe->wait, pipe->readers == 1);
52726+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
52727
52728 pipe_lock(pipe);
52729- pipe->readers--;
52730- pipe->writers++;
52731+ atomic_dec(&pipe->readers);
52732+ atomic_inc(&pipe->writers);
52733 pipe_unlock(pipe);
52734 }
52735
52736@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
52737 struct files_struct *displaced;
52738 bool need_nonrelative = false;
52739 bool core_dumped = false;
52740- static atomic_t core_dump_count = ATOMIC_INIT(0);
52741+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
52742+ long signr = siginfo->si_signo;
52743 struct coredump_params cprm = {
52744 .siginfo = siginfo,
52745 .regs = signal_pt_regs(),
52746@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
52747 .mm_flags = mm->flags,
52748 };
52749
52750- audit_core_dumps(siginfo->si_signo);
52751+ audit_core_dumps(signr);
52752+
52753+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
52754+ gr_handle_brute_attach(cprm.mm_flags);
52755
52756 binfmt = mm->binfmt;
52757 if (!binfmt || !binfmt->core_dump)
52758@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
52759 need_nonrelative = true;
52760 }
52761
52762- retval = coredump_wait(siginfo->si_signo, &core_state);
52763+ retval = coredump_wait(signr, &core_state);
52764 if (retval < 0)
52765 goto fail_creds;
52766
52767@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
52768 }
52769 cprm.limit = RLIM_INFINITY;
52770
52771- dump_count = atomic_inc_return(&core_dump_count);
52772+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
52773 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
52774 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
52775 task_tgid_vnr(current), current->comm);
52776@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
52777 } else {
52778 struct inode *inode;
52779
52780+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
52781+
52782 if (cprm.limit < binfmt->min_coredump)
52783 goto fail_unlock;
52784
52785@@ -666,7 +672,7 @@ close_fail:
52786 filp_close(cprm.file, NULL);
52787 fail_dropcount:
52788 if (ispipe)
52789- atomic_dec(&core_dump_count);
52790+ atomic_dec_unchecked(&core_dump_count);
52791 fail_unlock:
52792 kfree(cn.corename);
52793 fail_corename:
52794@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
52795 {
52796 return !dump_interrupted() &&
52797 access_ok(VERIFY_READ, addr, nr) &&
52798- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
52799+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
52800 }
52801 EXPORT_SYMBOL(dump_write);
52802
52803diff --git a/fs/dcache.c b/fs/dcache.c
52804index f09b908..04b9690 100644
52805--- a/fs/dcache.c
52806+++ b/fs/dcache.c
52807@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
52808 mempages -= reserve;
52809
52810 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
52811- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
52812+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
52813+ SLAB_NO_SANITIZE, NULL);
52814
52815 dcache_init();
52816 inode_init();
52817diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
52818index c7c83ff..bda9461 100644
52819--- a/fs/debugfs/inode.c
52820+++ b/fs/debugfs/inode.c
52821@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
52822 */
52823 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
52824 {
52825+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
52826+ return __create_file(name, S_IFDIR | S_IRWXU,
52827+#else
52828 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
52829+#endif
52830 parent, NULL, NULL);
52831 }
52832 EXPORT_SYMBOL_GPL(debugfs_create_dir);
52833diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
52834index 5eab400..810a3f5 100644
52835--- a/fs/ecryptfs/inode.c
52836+++ b/fs/ecryptfs/inode.c
52837@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
52838 old_fs = get_fs();
52839 set_fs(get_ds());
52840 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
52841- (char __user *)lower_buf,
52842+ (char __force_user *)lower_buf,
52843 PATH_MAX);
52844 set_fs(old_fs);
52845 if (rc < 0)
52846@@ -706,7 +706,7 @@ out:
52847 static void
52848 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
52849 {
52850- char *buf = nd_get_link(nd);
52851+ const char *buf = nd_get_link(nd);
52852 if (!IS_ERR(buf)) {
52853 /* Free the char* */
52854 kfree(buf);
52855diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
52856index e4141f2..d8263e8 100644
52857--- a/fs/ecryptfs/miscdev.c
52858+++ b/fs/ecryptfs/miscdev.c
52859@@ -304,7 +304,7 @@ check_list:
52860 goto out_unlock_msg_ctx;
52861 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
52862 if (msg_ctx->msg) {
52863- if (copy_to_user(&buf[i], packet_length, packet_length_size))
52864+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
52865 goto out_unlock_msg_ctx;
52866 i += packet_length_size;
52867 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
52868diff --git a/fs/exec.c b/fs/exec.c
52869index ffd7a81..3c84660 100644
52870--- a/fs/exec.c
52871+++ b/fs/exec.c
52872@@ -55,8 +55,20 @@
52873 #include <linux/pipe_fs_i.h>
52874 #include <linux/oom.h>
52875 #include <linux/compat.h>
52876+#include <linux/random.h>
52877+#include <linux/seq_file.h>
52878+#include <linux/coredump.h>
52879+#include <linux/mman.h>
52880+
52881+#ifdef CONFIG_PAX_REFCOUNT
52882+#include <linux/kallsyms.h>
52883+#include <linux/kdebug.h>
52884+#endif
52885+
52886+#include <trace/events/fs.h>
52887
52888 #include <asm/uaccess.h>
52889+#include <asm/sections.h>
52890 #include <asm/mmu_context.h>
52891 #include <asm/tlb.h>
52892
52893@@ -66,17 +78,32 @@
52894
52895 #include <trace/events/sched.h>
52896
52897+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52898+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
52899+{
52900+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
52901+}
52902+#endif
52903+
52904+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
52905+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
52906+EXPORT_SYMBOL(pax_set_initial_flags_func);
52907+#endif
52908+
52909 int suid_dumpable = 0;
52910
52911 static LIST_HEAD(formats);
52912 static DEFINE_RWLOCK(binfmt_lock);
52913
52914+extern int gr_process_kernel_exec_ban(void);
52915+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
52916+
52917 void __register_binfmt(struct linux_binfmt * fmt, int insert)
52918 {
52919 BUG_ON(!fmt);
52920 write_lock(&binfmt_lock);
52921- insert ? list_add(&fmt->lh, &formats) :
52922- list_add_tail(&fmt->lh, &formats);
52923+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
52924+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
52925 write_unlock(&binfmt_lock);
52926 }
52927
52928@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
52929 void unregister_binfmt(struct linux_binfmt * fmt)
52930 {
52931 write_lock(&binfmt_lock);
52932- list_del(&fmt->lh);
52933+ pax_list_del((struct list_head *)&fmt->lh);
52934 write_unlock(&binfmt_lock);
52935 }
52936
52937@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
52938 int write)
52939 {
52940 struct page *page;
52941- int ret;
52942
52943-#ifdef CONFIG_STACK_GROWSUP
52944- if (write) {
52945- ret = expand_downwards(bprm->vma, pos);
52946- if (ret < 0)
52947- return NULL;
52948- }
52949-#endif
52950- ret = get_user_pages(current, bprm->mm, pos,
52951- 1, write, 1, &page, NULL);
52952- if (ret <= 0)
52953+ if (0 > expand_downwards(bprm->vma, pos))
52954+ return NULL;
52955+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
52956 return NULL;
52957
52958 if (write) {
52959@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
52960 if (size <= ARG_MAX)
52961 return page;
52962
52963+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52964+ // only allow 512KB for argv+env on suid/sgid binaries
52965+ // to prevent easy ASLR exhaustion
52966+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
52967+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
52968+ (size > (512 * 1024))) {
52969+ put_page(page);
52970+ return NULL;
52971+ }
52972+#endif
52973+
52974 /*
52975 * Limit to 1/4-th the stack size for the argv+env strings.
52976 * This ensures that:
52977@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
52978 vma->vm_end = STACK_TOP_MAX;
52979 vma->vm_start = vma->vm_end - PAGE_SIZE;
52980 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
52981+
52982+#ifdef CONFIG_PAX_SEGMEXEC
52983+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
52984+#endif
52985+
52986 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
52987 INIT_LIST_HEAD(&vma->anon_vma_chain);
52988
52989@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
52990 mm->stack_vm = mm->total_vm = 1;
52991 up_write(&mm->mmap_sem);
52992 bprm->p = vma->vm_end - sizeof(void *);
52993+
52994+#ifdef CONFIG_PAX_RANDUSTACK
52995+ if (randomize_va_space)
52996+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
52997+#endif
52998+
52999 return 0;
53000 err:
53001 up_write(&mm->mmap_sem);
53002@@ -396,7 +437,7 @@ struct user_arg_ptr {
53003 } ptr;
53004 };
53005
53006-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
53007+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
53008 {
53009 const char __user *native;
53010
53011@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
53012 compat_uptr_t compat;
53013
53014 if (get_user(compat, argv.ptr.compat + nr))
53015- return ERR_PTR(-EFAULT);
53016+ return (const char __force_user *)ERR_PTR(-EFAULT);
53017
53018 return compat_ptr(compat);
53019 }
53020 #endif
53021
53022 if (get_user(native, argv.ptr.native + nr))
53023- return ERR_PTR(-EFAULT);
53024+ return (const char __force_user *)ERR_PTR(-EFAULT);
53025
53026 return native;
53027 }
53028@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
53029 if (!p)
53030 break;
53031
53032- if (IS_ERR(p))
53033+ if (IS_ERR((const char __force_kernel *)p))
53034 return -EFAULT;
53035
53036 if (i >= max)
53037@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
53038
53039 ret = -EFAULT;
53040 str = get_user_arg_ptr(argv, argc);
53041- if (IS_ERR(str))
53042+ if (IS_ERR((const char __force_kernel *)str))
53043 goto out;
53044
53045 len = strnlen_user(str, MAX_ARG_STRLEN);
53046@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
53047 int r;
53048 mm_segment_t oldfs = get_fs();
53049 struct user_arg_ptr argv = {
53050- .ptr.native = (const char __user *const __user *)__argv,
53051+ .ptr.native = (const char __force_user * const __force_user *)__argv,
53052 };
53053
53054 set_fs(KERNEL_DS);
53055@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53056 unsigned long new_end = old_end - shift;
53057 struct mmu_gather tlb;
53058
53059- BUG_ON(new_start > new_end);
53060+ if (new_start >= new_end || new_start < mmap_min_addr)
53061+ return -ENOMEM;
53062
53063 /*
53064 * ensure there are no vmas between where we want to go
53065@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53066 if (vma != find_vma(mm, new_start))
53067 return -EFAULT;
53068
53069+#ifdef CONFIG_PAX_SEGMEXEC
53070+ BUG_ON(pax_find_mirror_vma(vma));
53071+#endif
53072+
53073 /*
53074 * cover the whole range: [new_start, old_end)
53075 */
53076@@ -607,7 +653,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53077 return -ENOMEM;
53078
53079 lru_add_drain();
53080- tlb_gather_mmu(&tlb, mm, 0);
53081+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
53082 if (new_end > old_start) {
53083 /*
53084 * when the old and new regions overlap clear from new_end.
53085@@ -624,7 +670,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53086 free_pgd_range(&tlb, old_start, old_end, new_end,
53087 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
53088 }
53089- tlb_finish_mmu(&tlb, new_end, old_end);
53090+ tlb_finish_mmu(&tlb, old_start, old_end);
53091
53092 /*
53093 * Shrink the vma to just the new range. Always succeeds.
53094@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
53095 stack_top = arch_align_stack(stack_top);
53096 stack_top = PAGE_ALIGN(stack_top);
53097
53098- if (unlikely(stack_top < mmap_min_addr) ||
53099- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
53100- return -ENOMEM;
53101-
53102 stack_shift = vma->vm_end - stack_top;
53103
53104 bprm->p -= stack_shift;
53105@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
53106 bprm->exec -= stack_shift;
53107
53108 down_write(&mm->mmap_sem);
53109+
53110+ /* Move stack pages down in memory. */
53111+ if (stack_shift) {
53112+ ret = shift_arg_pages(vma, stack_shift);
53113+ if (ret)
53114+ goto out_unlock;
53115+ }
53116+
53117 vm_flags = VM_STACK_FLAGS;
53118
53119+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
53120+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
53121+ vm_flags &= ~VM_EXEC;
53122+
53123+#ifdef CONFIG_PAX_MPROTECT
53124+ if (mm->pax_flags & MF_PAX_MPROTECT)
53125+ vm_flags &= ~VM_MAYEXEC;
53126+#endif
53127+
53128+ }
53129+#endif
53130+
53131 /*
53132 * Adjust stack execute permissions; explicitly enable for
53133 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
53134@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
53135 goto out_unlock;
53136 BUG_ON(prev != vma);
53137
53138- /* Move stack pages down in memory. */
53139- if (stack_shift) {
53140- ret = shift_arg_pages(vma, stack_shift);
53141- if (ret)
53142- goto out_unlock;
53143- }
53144-
53145 /* mprotect_fixup is overkill to remove the temporary stack flags */
53146 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
53147
53148@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
53149 #endif
53150 current->mm->start_stack = bprm->p;
53151 ret = expand_stack(vma, stack_base);
53152+
53153+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
53154+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
53155+ unsigned long size;
53156+ vm_flags_t vm_flags;
53157+
53158+ size = STACK_TOP - vma->vm_end;
53159+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
53160+
53161+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
53162+
53163+#ifdef CONFIG_X86
53164+ if (!ret) {
53165+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
53166+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
53167+ }
53168+#endif
53169+
53170+ }
53171+#endif
53172+
53173 if (ret)
53174 ret = -EFAULT;
53175
53176@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
53177
53178 fsnotify_open(file);
53179
53180+ trace_open_exec(name);
53181+
53182 err = deny_write_access(file);
53183 if (err)
53184 goto exit;
53185@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
53186 old_fs = get_fs();
53187 set_fs(get_ds());
53188 /* The cast to a user pointer is valid due to the set_fs() */
53189- result = vfs_read(file, (void __user *)addr, count, &pos);
53190+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
53191 set_fs(old_fs);
53192 return result;
53193 }
53194@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
53195 }
53196 rcu_read_unlock();
53197
53198- if (p->fs->users > n_fs) {
53199+ if (atomic_read(&p->fs->users) > n_fs) {
53200 bprm->unsafe |= LSM_UNSAFE_SHARE;
53201 } else {
53202 res = -EAGAIN;
53203@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
53204
53205 EXPORT_SYMBOL(search_binary_handler);
53206
53207+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53208+static DEFINE_PER_CPU(u64, exec_counter);
53209+static int __init init_exec_counters(void)
53210+{
53211+ unsigned int cpu;
53212+
53213+ for_each_possible_cpu(cpu) {
53214+ per_cpu(exec_counter, cpu) = (u64)cpu;
53215+ }
53216+
53217+ return 0;
53218+}
53219+early_initcall(init_exec_counters);
53220+static inline void increment_exec_counter(void)
53221+{
53222+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
53223+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
53224+}
53225+#else
53226+static inline void increment_exec_counter(void) {}
53227+#endif
53228+
53229+extern void gr_handle_exec_args(struct linux_binprm *bprm,
53230+ struct user_arg_ptr argv);
53231+
53232 /*
53233 * sys_execve() executes a new program.
53234 */
53235@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
53236 struct user_arg_ptr argv,
53237 struct user_arg_ptr envp)
53238 {
53239+#ifdef CONFIG_GRKERNSEC
53240+ struct file *old_exec_file;
53241+ struct acl_subject_label *old_acl;
53242+ struct rlimit old_rlim[RLIM_NLIMITS];
53243+#endif
53244 struct linux_binprm *bprm;
53245 struct file *file;
53246 struct files_struct *displaced;
53247@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
53248 int retval;
53249 const struct cred *cred = current_cred();
53250
53251+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
53252+
53253 /*
53254 * We move the actual failure in case of RLIMIT_NPROC excess from
53255 * set*uid() to execve() because too many poorly written programs
53256@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
53257 if (IS_ERR(file))
53258 goto out_unmark;
53259
53260+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
53261+ retval = -EPERM;
53262+ goto out_file;
53263+ }
53264+
53265 sched_exec();
53266
53267 bprm->file = file;
53268 bprm->filename = filename;
53269 bprm->interp = filename;
53270
53271+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
53272+ retval = -EACCES;
53273+ goto out_file;
53274+ }
53275+
53276 retval = bprm_mm_init(bprm);
53277 if (retval)
53278 goto out_file;
53279@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
53280 if (retval < 0)
53281 goto out;
53282
53283+#ifdef CONFIG_GRKERNSEC
53284+ old_acl = current->acl;
53285+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
53286+ old_exec_file = current->exec_file;
53287+ get_file(file);
53288+ current->exec_file = file;
53289+#endif
53290+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53291+ /* limit suid stack to 8MB
53292+ * we saved the old limits above and will restore them if this exec fails
53293+ */
53294+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
53295+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
53296+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
53297+#endif
53298+
53299+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
53300+ retval = -EPERM;
53301+ goto out_fail;
53302+ }
53303+
53304+ if (!gr_tpe_allow(file)) {
53305+ retval = -EACCES;
53306+ goto out_fail;
53307+ }
53308+
53309+ if (gr_check_crash_exec(file)) {
53310+ retval = -EACCES;
53311+ goto out_fail;
53312+ }
53313+
53314+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
53315+ bprm->unsafe);
53316+ if (retval < 0)
53317+ goto out_fail;
53318+
53319 retval = copy_strings_kernel(1, &bprm->filename, bprm);
53320 if (retval < 0)
53321- goto out;
53322+ goto out_fail;
53323
53324 bprm->exec = bprm->p;
53325 retval = copy_strings(bprm->envc, envp, bprm);
53326 if (retval < 0)
53327- goto out;
53328+ goto out_fail;
53329
53330 retval = copy_strings(bprm->argc, argv, bprm);
53331 if (retval < 0)
53332- goto out;
53333+ goto out_fail;
53334+
53335+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
53336+
53337+ gr_handle_exec_args(bprm, argv);
53338
53339 retval = search_binary_handler(bprm);
53340 if (retval < 0)
53341- goto out;
53342+ goto out_fail;
53343+#ifdef CONFIG_GRKERNSEC
53344+ if (old_exec_file)
53345+ fput(old_exec_file);
53346+#endif
53347
53348 /* execve succeeded */
53349+
53350+ increment_exec_counter();
53351 current->fs->in_exec = 0;
53352 current->in_execve = 0;
53353 acct_update_integrals(current);
53354@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
53355 put_files_struct(displaced);
53356 return retval;
53357
53358+out_fail:
53359+#ifdef CONFIG_GRKERNSEC
53360+ current->acl = old_acl;
53361+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
53362+ fput(current->exec_file);
53363+ current->exec_file = old_exec_file;
53364+#endif
53365+
53366 out:
53367 if (bprm->mm) {
53368 acct_arg_size(bprm, 0);
53369@@ -1701,3 +1875,287 @@ asmlinkage long compat_sys_execve(const char __user * filename,
53370 return error;
53371 }
53372 #endif
53373+
53374+int pax_check_flags(unsigned long *flags)
53375+{
53376+ int retval = 0;
53377+
53378+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
53379+ if (*flags & MF_PAX_SEGMEXEC)
53380+ {
53381+ *flags &= ~MF_PAX_SEGMEXEC;
53382+ retval = -EINVAL;
53383+ }
53384+#endif
53385+
53386+ if ((*flags & MF_PAX_PAGEEXEC)
53387+
53388+#ifdef CONFIG_PAX_PAGEEXEC
53389+ && (*flags & MF_PAX_SEGMEXEC)
53390+#endif
53391+
53392+ )
53393+ {
53394+ *flags &= ~MF_PAX_PAGEEXEC;
53395+ retval = -EINVAL;
53396+ }
53397+
53398+ if ((*flags & MF_PAX_MPROTECT)
53399+
53400+#ifdef CONFIG_PAX_MPROTECT
53401+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
53402+#endif
53403+
53404+ )
53405+ {
53406+ *flags &= ~MF_PAX_MPROTECT;
53407+ retval = -EINVAL;
53408+ }
53409+
53410+ if ((*flags & MF_PAX_EMUTRAMP)
53411+
53412+#ifdef CONFIG_PAX_EMUTRAMP
53413+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
53414+#endif
53415+
53416+ )
53417+ {
53418+ *flags &= ~MF_PAX_EMUTRAMP;
53419+ retval = -EINVAL;
53420+ }
53421+
53422+ return retval;
53423+}
53424+
53425+EXPORT_SYMBOL(pax_check_flags);
53426+
53427+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
53428+char *pax_get_path(const struct path *path, char *buf, int buflen)
53429+{
53430+ char *pathname = d_path(path, buf, buflen);
53431+
53432+ if (IS_ERR(pathname))
53433+ goto toolong;
53434+
53435+ pathname = mangle_path(buf, pathname, "\t\n\\");
53436+ if (!pathname)
53437+ goto toolong;
53438+
53439+ *pathname = 0;
53440+ return buf;
53441+
53442+toolong:
53443+ return "<path too long>";
53444+}
53445+EXPORT_SYMBOL(pax_get_path);
53446+
53447+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
53448+{
53449+ struct task_struct *tsk = current;
53450+ struct mm_struct *mm = current->mm;
53451+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
53452+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
53453+ char *path_exec = NULL;
53454+ char *path_fault = NULL;
53455+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
53456+ siginfo_t info = { };
53457+
53458+ if (buffer_exec && buffer_fault) {
53459+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
53460+
53461+ down_read(&mm->mmap_sem);
53462+ vma = mm->mmap;
53463+ while (vma && (!vma_exec || !vma_fault)) {
53464+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
53465+ vma_exec = vma;
53466+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
53467+ vma_fault = vma;
53468+ vma = vma->vm_next;
53469+ }
53470+ if (vma_exec)
53471+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
53472+ if (vma_fault) {
53473+ start = vma_fault->vm_start;
53474+ end = vma_fault->vm_end;
53475+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
53476+ if (vma_fault->vm_file)
53477+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
53478+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
53479+ path_fault = "<heap>";
53480+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
53481+ path_fault = "<stack>";
53482+ else
53483+ path_fault = "<anonymous mapping>";
53484+ }
53485+ up_read(&mm->mmap_sem);
53486+ }
53487+ if (tsk->signal->curr_ip)
53488+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
53489+ else
53490+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
53491+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
53492+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
53493+ free_page((unsigned long)buffer_exec);
53494+ free_page((unsigned long)buffer_fault);
53495+ pax_report_insns(regs, pc, sp);
53496+ info.si_signo = SIGKILL;
53497+ info.si_errno = 0;
53498+ info.si_code = SI_KERNEL;
53499+ info.si_pid = 0;
53500+ info.si_uid = 0;
53501+ do_coredump(&info);
53502+}
53503+#endif
53504+
53505+#ifdef CONFIG_PAX_REFCOUNT
53506+void pax_report_refcount_overflow(struct pt_regs *regs)
53507+{
53508+ if (current->signal->curr_ip)
53509+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
53510+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
53511+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
53512+ else
53513+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
53514+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
53515+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
53516+ preempt_disable();
53517+ show_regs(regs);
53518+ preempt_enable();
53519+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
53520+}
53521+#endif
53522+
53523+#ifdef CONFIG_PAX_USERCOPY
53524+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
53525+static noinline int check_stack_object(const void *obj, unsigned long len)
53526+{
53527+ const void * const stack = task_stack_page(current);
53528+ const void * const stackend = stack + THREAD_SIZE;
53529+
53530+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
53531+ const void *frame = NULL;
53532+ const void *oldframe;
53533+#endif
53534+
53535+ if (obj + len < obj)
53536+ return -1;
53537+
53538+ if (obj + len <= stack || stackend <= obj)
53539+ return 0;
53540+
53541+ if (obj < stack || stackend < obj + len)
53542+ return -1;
53543+
53544+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
53545+ oldframe = __builtin_frame_address(1);
53546+ if (oldframe)
53547+ frame = __builtin_frame_address(2);
53548+ /*
53549+ low ----------------------------------------------> high
53550+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
53551+ ^----------------^
53552+ allow copies only within here
53553+ */
53554+ while (stack <= frame && frame < stackend) {
53555+ /* if obj + len extends past the last frame, this
53556+ check won't pass and the next frame will be 0,
53557+ causing us to bail out and correctly report
53558+ the copy as invalid
53559+ */
53560+ if (obj + len <= frame)
53561+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
53562+ oldframe = frame;
53563+ frame = *(const void * const *)frame;
53564+ }
53565+ return -1;
53566+#else
53567+ return 1;
53568+#endif
53569+}
53570+
53571+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
53572+{
53573+ if (current->signal->curr_ip)
53574+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
53575+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
53576+ else
53577+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
53578+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
53579+ dump_stack();
53580+ gr_handle_kernel_exploit();
53581+ do_group_exit(SIGKILL);
53582+}
53583+#endif
53584+
53585+#ifdef CONFIG_PAX_USERCOPY
53586+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
53587+{
53588+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
53589+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
53590+#ifdef CONFIG_MODULES
53591+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
53592+#else
53593+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
53594+#endif
53595+
53596+#else
53597+ unsigned long textlow = (unsigned long)_stext;
53598+ unsigned long texthigh = (unsigned long)_etext;
53599+#endif
53600+
53601+ if (high <= textlow || low > texthigh)
53602+ return false;
53603+ else
53604+ return true;
53605+}
53606+#endif
53607+
53608+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
53609+{
53610+
53611+#ifdef CONFIG_PAX_USERCOPY
53612+ const char *type;
53613+
53614+ if (!n)
53615+ return;
53616+
53617+ type = check_heap_object(ptr, n);
53618+ if (!type) {
53619+ int ret = check_stack_object(ptr, n);
53620+ if (ret == 1 || ret == 2)
53621+ return;
53622+ if (ret == 0) {
53623+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
53624+ type = "<kernel text>";
53625+ else
53626+ return;
53627+ } else
53628+ type = "<process stack>";
53629+ }
53630+
53631+ pax_report_usercopy(ptr, n, to_user, type);
53632+#endif
53633+
53634+}
53635+EXPORT_SYMBOL(__check_object_size);
53636+
53637+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
53638+void pax_track_stack(void)
53639+{
53640+ unsigned long sp = (unsigned long)&sp;
53641+ if (sp < current_thread_info()->lowest_stack &&
53642+ sp > (unsigned long)task_stack_page(current))
53643+ current_thread_info()->lowest_stack = sp;
53644+}
53645+EXPORT_SYMBOL(pax_track_stack);
53646+#endif
53647+
53648+#ifdef CONFIG_PAX_SIZE_OVERFLOW
53649+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
53650+{
53651+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
53652+ dump_stack();
53653+ do_group_exit(SIGKILL);
53654+}
53655+EXPORT_SYMBOL(report_size_overflow);
53656+#endif
53657diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
53658index 9f9992b..8b59411 100644
53659--- a/fs/ext2/balloc.c
53660+++ b/fs/ext2/balloc.c
53661@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
53662
53663 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
53664 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
53665- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
53666+ if (free_blocks < root_blocks + 1 &&
53667 !uid_eq(sbi->s_resuid, current_fsuid()) &&
53668 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
53669- !in_group_p (sbi->s_resgid))) {
53670+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
53671 return 0;
53672 }
53673 return 1;
53674diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
53675index 22548f5..41521d8 100644
53676--- a/fs/ext3/balloc.c
53677+++ b/fs/ext3/balloc.c
53678@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
53679
53680 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
53681 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
53682- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
53683+ if (free_blocks < root_blocks + 1 &&
53684 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
53685 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
53686- !in_group_p (sbi->s_resgid))) {
53687+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
53688 return 0;
53689 }
53690 return 1;
53691diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
53692index 3742e4c..69a797f 100644
53693--- a/fs/ext4/balloc.c
53694+++ b/fs/ext4/balloc.c
53695@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
53696 /* Hm, nope. Are (enough) root reserved clusters available? */
53697 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
53698 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
53699- capable(CAP_SYS_RESOURCE) ||
53700- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
53701+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
53702+ capable_nolog(CAP_SYS_RESOURCE)) {
53703
53704 if (free_clusters >= (nclusters + dirty_clusters +
53705 resv_clusters))
53706diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
53707index 5aae3d1..b5da7f8 100644
53708--- a/fs/ext4/ext4.h
53709+++ b/fs/ext4/ext4.h
53710@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
53711 unsigned long s_mb_last_start;
53712
53713 /* stats for buddy allocator */
53714- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
53715- atomic_t s_bal_success; /* we found long enough chunks */
53716- atomic_t s_bal_allocated; /* in blocks */
53717- atomic_t s_bal_ex_scanned; /* total extents scanned */
53718- atomic_t s_bal_goals; /* goal hits */
53719- atomic_t s_bal_breaks; /* too long searches */
53720- atomic_t s_bal_2orders; /* 2^order hits */
53721+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
53722+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
53723+ atomic_unchecked_t s_bal_allocated; /* in blocks */
53724+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
53725+ atomic_unchecked_t s_bal_goals; /* goal hits */
53726+ atomic_unchecked_t s_bal_breaks; /* too long searches */
53727+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
53728 spinlock_t s_bal_lock;
53729 unsigned long s_mb_buddies_generated;
53730 unsigned long long s_mb_generation_time;
53731- atomic_t s_mb_lost_chunks;
53732- atomic_t s_mb_preallocated;
53733- atomic_t s_mb_discarded;
53734+ atomic_unchecked_t s_mb_lost_chunks;
53735+ atomic_unchecked_t s_mb_preallocated;
53736+ atomic_unchecked_t s_mb_discarded;
53737 atomic_t s_lock_busy;
53738
53739 /* locality groups */
53740diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
53741index 59c6750..a549154 100644
53742--- a/fs/ext4/mballoc.c
53743+++ b/fs/ext4/mballoc.c
53744@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
53745 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
53746
53747 if (EXT4_SB(sb)->s_mb_stats)
53748- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
53749+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
53750
53751 break;
53752 }
53753@@ -2170,7 +2170,7 @@ repeat:
53754 ac->ac_status = AC_STATUS_CONTINUE;
53755 ac->ac_flags |= EXT4_MB_HINT_FIRST;
53756 cr = 3;
53757- atomic_inc(&sbi->s_mb_lost_chunks);
53758+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
53759 goto repeat;
53760 }
53761 }
53762@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
53763 if (sbi->s_mb_stats) {
53764 ext4_msg(sb, KERN_INFO,
53765 "mballoc: %u blocks %u reqs (%u success)",
53766- atomic_read(&sbi->s_bal_allocated),
53767- atomic_read(&sbi->s_bal_reqs),
53768- atomic_read(&sbi->s_bal_success));
53769+ atomic_read_unchecked(&sbi->s_bal_allocated),
53770+ atomic_read_unchecked(&sbi->s_bal_reqs),
53771+ atomic_read_unchecked(&sbi->s_bal_success));
53772 ext4_msg(sb, KERN_INFO,
53773 "mballoc: %u extents scanned, %u goal hits, "
53774 "%u 2^N hits, %u breaks, %u lost",
53775- atomic_read(&sbi->s_bal_ex_scanned),
53776- atomic_read(&sbi->s_bal_goals),
53777- atomic_read(&sbi->s_bal_2orders),
53778- atomic_read(&sbi->s_bal_breaks),
53779- atomic_read(&sbi->s_mb_lost_chunks));
53780+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
53781+ atomic_read_unchecked(&sbi->s_bal_goals),
53782+ atomic_read_unchecked(&sbi->s_bal_2orders),
53783+ atomic_read_unchecked(&sbi->s_bal_breaks),
53784+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
53785 ext4_msg(sb, KERN_INFO,
53786 "mballoc: %lu generated and it took %Lu",
53787 sbi->s_mb_buddies_generated,
53788 sbi->s_mb_generation_time);
53789 ext4_msg(sb, KERN_INFO,
53790 "mballoc: %u preallocated, %u discarded",
53791- atomic_read(&sbi->s_mb_preallocated),
53792- atomic_read(&sbi->s_mb_discarded));
53793+ atomic_read_unchecked(&sbi->s_mb_preallocated),
53794+ atomic_read_unchecked(&sbi->s_mb_discarded));
53795 }
53796
53797 free_percpu(sbi->s_locality_groups);
53798@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
53799 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
53800
53801 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
53802- atomic_inc(&sbi->s_bal_reqs);
53803- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
53804+ atomic_inc_unchecked(&sbi->s_bal_reqs);
53805+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
53806 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
53807- atomic_inc(&sbi->s_bal_success);
53808- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
53809+ atomic_inc_unchecked(&sbi->s_bal_success);
53810+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
53811 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
53812 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
53813- atomic_inc(&sbi->s_bal_goals);
53814+ atomic_inc_unchecked(&sbi->s_bal_goals);
53815 if (ac->ac_found > sbi->s_mb_max_to_scan)
53816- atomic_inc(&sbi->s_bal_breaks);
53817+ atomic_inc_unchecked(&sbi->s_bal_breaks);
53818 }
53819
53820 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
53821@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
53822 trace_ext4_mb_new_inode_pa(ac, pa);
53823
53824 ext4_mb_use_inode_pa(ac, pa);
53825- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
53826+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
53827
53828 ei = EXT4_I(ac->ac_inode);
53829 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
53830@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
53831 trace_ext4_mb_new_group_pa(ac, pa);
53832
53833 ext4_mb_use_group_pa(ac, pa);
53834- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
53835+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
53836
53837 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
53838 lg = ac->ac_lg;
53839@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
53840 * from the bitmap and continue.
53841 */
53842 }
53843- atomic_add(free, &sbi->s_mb_discarded);
53844+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
53845
53846 return err;
53847 }
53848@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
53849 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
53850 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
53851 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
53852- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
53853+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
53854 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
53855
53856 return 0;
53857diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
53858index 214461e..3614c89 100644
53859--- a/fs/ext4/mmp.c
53860+++ b/fs/ext4/mmp.c
53861@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
53862 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
53863 const char *function, unsigned int line, const char *msg)
53864 {
53865- __ext4_warning(sb, function, line, msg);
53866+ __ext4_warning(sb, function, line, "%s", msg);
53867 __ext4_warning(sb, function, line,
53868 "MMP failure info: last update time: %llu, last update "
53869 "node: %s, last update device: %s\n",
53870diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
53871index 49d3c01..9579efd 100644
53872--- a/fs/ext4/resize.c
53873+++ b/fs/ext4/resize.c
53874@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
53875 ext4_fsblk_t end = start + input->blocks_count;
53876 ext4_group_t group = input->group;
53877 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
53878- unsigned overhead = ext4_group_overhead_blocks(sb, group);
53879- ext4_fsblk_t metaend = start + overhead;
53880+ unsigned overhead;
53881+ ext4_fsblk_t metaend;
53882 struct buffer_head *bh = NULL;
53883 ext4_grpblk_t free_blocks_count, offset;
53884 int err = -EINVAL;
53885
53886+ if (group != sbi->s_groups_count) {
53887+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
53888+ input->group, sbi->s_groups_count);
53889+ return -EINVAL;
53890+ }
53891+
53892+ overhead = ext4_group_overhead_blocks(sb, group);
53893+ metaend = start + overhead;
53894 input->free_blocks_count = free_blocks_count =
53895 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
53896
53897@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
53898 free_blocks_count, input->reserved_blocks);
53899
53900 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
53901- if (group != sbi->s_groups_count)
53902- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
53903- input->group, sbi->s_groups_count);
53904- else if (offset != 0)
53905+ if (offset != 0)
53906 ext4_warning(sb, "Last group not full");
53907 else if (input->reserved_blocks > input->blocks_count / 5)
53908 ext4_warning(sb, "Reserved blocks too high (%u)",
53909diff --git a/fs/ext4/super.c b/fs/ext4/super.c
53910index 3f7c39e..227f24f 100644
53911--- a/fs/ext4/super.c
53912+++ b/fs/ext4/super.c
53913@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
53914 }
53915
53916 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
53917-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
53918+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
53919 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
53920
53921 #ifdef CONFIG_QUOTA
53922@@ -2372,7 +2372,7 @@ struct ext4_attr {
53923 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
53924 const char *, size_t);
53925 int offset;
53926-};
53927+} __do_const;
53928
53929 static int parse_strtoull(const char *buf,
53930 unsigned long long max, unsigned long long *value)
53931diff --git a/fs/fcntl.c b/fs/fcntl.c
53932index 6599222..e7bf0de 100644
53933--- a/fs/fcntl.c
53934+++ b/fs/fcntl.c
53935@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
53936 if (err)
53937 return err;
53938
53939+ if (gr_handle_chroot_fowner(pid, type))
53940+ return -ENOENT;
53941+ if (gr_check_protected_task_fowner(pid, type))
53942+ return -EACCES;
53943+
53944 f_modown(filp, pid, type, force);
53945 return 0;
53946 }
53947diff --git a/fs/fhandle.c b/fs/fhandle.c
53948index 999ff5c..41f4109 100644
53949--- a/fs/fhandle.c
53950+++ b/fs/fhandle.c
53951@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
53952 } else
53953 retval = 0;
53954 /* copy the mount id */
53955- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
53956- sizeof(*mnt_id)) ||
53957+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
53958 copy_to_user(ufh, handle,
53959 sizeof(struct file_handle) + handle_bytes))
53960 retval = -EFAULT;
53961diff --git a/fs/file.c b/fs/file.c
53962index 4a78f98..9447397 100644
53963--- a/fs/file.c
53964+++ b/fs/file.c
53965@@ -16,6 +16,7 @@
53966 #include <linux/slab.h>
53967 #include <linux/vmalloc.h>
53968 #include <linux/file.h>
53969+#include <linux/security.h>
53970 #include <linux/fdtable.h>
53971 #include <linux/bitops.h>
53972 #include <linux/interrupt.h>
53973@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
53974 if (!file)
53975 return __close_fd(files, fd);
53976
53977+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
53978 if (fd >= rlimit(RLIMIT_NOFILE))
53979 return -EBADF;
53980
53981@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
53982 if (unlikely(oldfd == newfd))
53983 return -EINVAL;
53984
53985+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
53986 if (newfd >= rlimit(RLIMIT_NOFILE))
53987 return -EBADF;
53988
53989@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
53990 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
53991 {
53992 int err;
53993+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
53994 if (from >= rlimit(RLIMIT_NOFILE))
53995 return -EINVAL;
53996 err = alloc_fd(from, flags);
53997diff --git a/fs/filesystems.c b/fs/filesystems.c
53998index 92567d9..fcd8cbf 100644
53999--- a/fs/filesystems.c
54000+++ b/fs/filesystems.c
54001@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
54002 int len = dot ? dot - name : strlen(name);
54003
54004 fs = __get_fs_type(name, len);
54005+#ifdef CONFIG_GRKERNSEC_MODHARDEN
54006+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
54007+#else
54008 if (!fs && (request_module("fs-%.*s", len, name) == 0))
54009+#endif
54010 fs = __get_fs_type(name, len);
54011
54012 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
54013diff --git a/fs/fs_struct.c b/fs/fs_struct.c
54014index d8ac61d..79a36f0 100644
54015--- a/fs/fs_struct.c
54016+++ b/fs/fs_struct.c
54017@@ -4,6 +4,7 @@
54018 #include <linux/path.h>
54019 #include <linux/slab.h>
54020 #include <linux/fs_struct.h>
54021+#include <linux/grsecurity.h>
54022 #include "internal.h"
54023
54024 /*
54025@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
54026 write_seqcount_begin(&fs->seq);
54027 old_root = fs->root;
54028 fs->root = *path;
54029+ gr_set_chroot_entries(current, path);
54030 write_seqcount_end(&fs->seq);
54031 spin_unlock(&fs->lock);
54032 if (old_root.dentry)
54033@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
54034 int hits = 0;
54035 spin_lock(&fs->lock);
54036 write_seqcount_begin(&fs->seq);
54037+ /* this root replacement is only done by pivot_root,
54038+ leave grsec's chroot tagging alone for this task
54039+ so that a pivoted root isn't treated as a chroot
54040+ */
54041 hits += replace_path(&fs->root, old_root, new_root);
54042 hits += replace_path(&fs->pwd, old_root, new_root);
54043 write_seqcount_end(&fs->seq);
54044@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
54045 task_lock(tsk);
54046 spin_lock(&fs->lock);
54047 tsk->fs = NULL;
54048- kill = !--fs->users;
54049+ gr_clear_chroot_entries(tsk);
54050+ kill = !atomic_dec_return(&fs->users);
54051 spin_unlock(&fs->lock);
54052 task_unlock(tsk);
54053 if (kill)
54054@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
54055 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
54056 /* We don't need to lock fs - think why ;-) */
54057 if (fs) {
54058- fs->users = 1;
54059+ atomic_set(&fs->users, 1);
54060 fs->in_exec = 0;
54061 spin_lock_init(&fs->lock);
54062 seqcount_init(&fs->seq);
54063@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
54064 spin_lock(&old->lock);
54065 fs->root = old->root;
54066 path_get(&fs->root);
54067+ /* instead of calling gr_set_chroot_entries here,
54068+ we call it from every caller of this function
54069+ */
54070 fs->pwd = old->pwd;
54071 path_get(&fs->pwd);
54072 spin_unlock(&old->lock);
54073@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
54074
54075 task_lock(current);
54076 spin_lock(&fs->lock);
54077- kill = !--fs->users;
54078+ kill = !atomic_dec_return(&fs->users);
54079 current->fs = new_fs;
54080+ gr_set_chroot_entries(current, &new_fs->root);
54081 spin_unlock(&fs->lock);
54082 task_unlock(current);
54083
54084@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
54085
54086 int current_umask(void)
54087 {
54088- return current->fs->umask;
54089+ return current->fs->umask | gr_acl_umask();
54090 }
54091 EXPORT_SYMBOL(current_umask);
54092
54093 /* to be mentioned only in INIT_TASK */
54094 struct fs_struct init_fs = {
54095- .users = 1,
54096+ .users = ATOMIC_INIT(1),
54097 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
54098 .seq = SEQCNT_ZERO,
54099 .umask = 0022,
54100diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
54101index e2cba1f..17a25bb 100644
54102--- a/fs/fscache/cookie.c
54103+++ b/fs/fscache/cookie.c
54104@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
54105 parent ? (char *) parent->def->name : "<no-parent>",
54106 def->name, netfs_data);
54107
54108- fscache_stat(&fscache_n_acquires);
54109+ fscache_stat_unchecked(&fscache_n_acquires);
54110
54111 /* if there's no parent cookie, then we don't create one here either */
54112 if (!parent) {
54113- fscache_stat(&fscache_n_acquires_null);
54114+ fscache_stat_unchecked(&fscache_n_acquires_null);
54115 _leave(" [no parent]");
54116 return NULL;
54117 }
54118@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
54119 /* allocate and initialise a cookie */
54120 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
54121 if (!cookie) {
54122- fscache_stat(&fscache_n_acquires_oom);
54123+ fscache_stat_unchecked(&fscache_n_acquires_oom);
54124 _leave(" [ENOMEM]");
54125 return NULL;
54126 }
54127@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
54128
54129 switch (cookie->def->type) {
54130 case FSCACHE_COOKIE_TYPE_INDEX:
54131- fscache_stat(&fscache_n_cookie_index);
54132+ fscache_stat_unchecked(&fscache_n_cookie_index);
54133 break;
54134 case FSCACHE_COOKIE_TYPE_DATAFILE:
54135- fscache_stat(&fscache_n_cookie_data);
54136+ fscache_stat_unchecked(&fscache_n_cookie_data);
54137 break;
54138 default:
54139- fscache_stat(&fscache_n_cookie_special);
54140+ fscache_stat_unchecked(&fscache_n_cookie_special);
54141 break;
54142 }
54143
54144@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
54145 if (fscache_acquire_non_index_cookie(cookie) < 0) {
54146 atomic_dec(&parent->n_children);
54147 __fscache_cookie_put(cookie);
54148- fscache_stat(&fscache_n_acquires_nobufs);
54149+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
54150 _leave(" = NULL");
54151 return NULL;
54152 }
54153 }
54154
54155- fscache_stat(&fscache_n_acquires_ok);
54156+ fscache_stat_unchecked(&fscache_n_acquires_ok);
54157 _leave(" = %p", cookie);
54158 return cookie;
54159 }
54160@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
54161 cache = fscache_select_cache_for_object(cookie->parent);
54162 if (!cache) {
54163 up_read(&fscache_addremove_sem);
54164- fscache_stat(&fscache_n_acquires_no_cache);
54165+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
54166 _leave(" = -ENOMEDIUM [no cache]");
54167 return -ENOMEDIUM;
54168 }
54169@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
54170 object = cache->ops->alloc_object(cache, cookie);
54171 fscache_stat_d(&fscache_n_cop_alloc_object);
54172 if (IS_ERR(object)) {
54173- fscache_stat(&fscache_n_object_no_alloc);
54174+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
54175 ret = PTR_ERR(object);
54176 goto error;
54177 }
54178
54179- fscache_stat(&fscache_n_object_alloc);
54180+ fscache_stat_unchecked(&fscache_n_object_alloc);
54181
54182 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
54183
54184@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
54185
54186 _enter("{%s}", cookie->def->name);
54187
54188- fscache_stat(&fscache_n_invalidates);
54189+ fscache_stat_unchecked(&fscache_n_invalidates);
54190
54191 /* Only permit invalidation of data files. Invalidating an index will
54192 * require the caller to release all its attachments to the tree rooted
54193@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
54194 {
54195 struct fscache_object *object;
54196
54197- fscache_stat(&fscache_n_updates);
54198+ fscache_stat_unchecked(&fscache_n_updates);
54199
54200 if (!cookie) {
54201- fscache_stat(&fscache_n_updates_null);
54202+ fscache_stat_unchecked(&fscache_n_updates_null);
54203 _leave(" [no cookie]");
54204 return;
54205 }
54206@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
54207 struct fscache_object *object;
54208 unsigned long event;
54209
54210- fscache_stat(&fscache_n_relinquishes);
54211+ fscache_stat_unchecked(&fscache_n_relinquishes);
54212 if (retire)
54213- fscache_stat(&fscache_n_relinquishes_retire);
54214+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
54215
54216 if (!cookie) {
54217- fscache_stat(&fscache_n_relinquishes_null);
54218+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
54219 _leave(" [no cookie]");
54220 return;
54221 }
54222@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
54223
54224 /* wait for the cookie to finish being instantiated (or to fail) */
54225 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
54226- fscache_stat(&fscache_n_relinquishes_waitcrt);
54227+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
54228 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
54229 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
54230 }
54231diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
54232index ee38fef..0a326d4 100644
54233--- a/fs/fscache/internal.h
54234+++ b/fs/fscache/internal.h
54235@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
54236 * stats.c
54237 */
54238 #ifdef CONFIG_FSCACHE_STATS
54239-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
54240-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
54241+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
54242+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
54243
54244-extern atomic_t fscache_n_op_pend;
54245-extern atomic_t fscache_n_op_run;
54246-extern atomic_t fscache_n_op_enqueue;
54247-extern atomic_t fscache_n_op_deferred_release;
54248-extern atomic_t fscache_n_op_release;
54249-extern atomic_t fscache_n_op_gc;
54250-extern atomic_t fscache_n_op_cancelled;
54251-extern atomic_t fscache_n_op_rejected;
54252+extern atomic_unchecked_t fscache_n_op_pend;
54253+extern atomic_unchecked_t fscache_n_op_run;
54254+extern atomic_unchecked_t fscache_n_op_enqueue;
54255+extern atomic_unchecked_t fscache_n_op_deferred_release;
54256+extern atomic_unchecked_t fscache_n_op_release;
54257+extern atomic_unchecked_t fscache_n_op_gc;
54258+extern atomic_unchecked_t fscache_n_op_cancelled;
54259+extern atomic_unchecked_t fscache_n_op_rejected;
54260
54261-extern atomic_t fscache_n_attr_changed;
54262-extern atomic_t fscache_n_attr_changed_ok;
54263-extern atomic_t fscache_n_attr_changed_nobufs;
54264-extern atomic_t fscache_n_attr_changed_nomem;
54265-extern atomic_t fscache_n_attr_changed_calls;
54266+extern atomic_unchecked_t fscache_n_attr_changed;
54267+extern atomic_unchecked_t fscache_n_attr_changed_ok;
54268+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
54269+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
54270+extern atomic_unchecked_t fscache_n_attr_changed_calls;
54271
54272-extern atomic_t fscache_n_allocs;
54273-extern atomic_t fscache_n_allocs_ok;
54274-extern atomic_t fscache_n_allocs_wait;
54275-extern atomic_t fscache_n_allocs_nobufs;
54276-extern atomic_t fscache_n_allocs_intr;
54277-extern atomic_t fscache_n_allocs_object_dead;
54278-extern atomic_t fscache_n_alloc_ops;
54279-extern atomic_t fscache_n_alloc_op_waits;
54280+extern atomic_unchecked_t fscache_n_allocs;
54281+extern atomic_unchecked_t fscache_n_allocs_ok;
54282+extern atomic_unchecked_t fscache_n_allocs_wait;
54283+extern atomic_unchecked_t fscache_n_allocs_nobufs;
54284+extern atomic_unchecked_t fscache_n_allocs_intr;
54285+extern atomic_unchecked_t fscache_n_allocs_object_dead;
54286+extern atomic_unchecked_t fscache_n_alloc_ops;
54287+extern atomic_unchecked_t fscache_n_alloc_op_waits;
54288
54289-extern atomic_t fscache_n_retrievals;
54290-extern atomic_t fscache_n_retrievals_ok;
54291-extern atomic_t fscache_n_retrievals_wait;
54292-extern atomic_t fscache_n_retrievals_nodata;
54293-extern atomic_t fscache_n_retrievals_nobufs;
54294-extern atomic_t fscache_n_retrievals_intr;
54295-extern atomic_t fscache_n_retrievals_nomem;
54296-extern atomic_t fscache_n_retrievals_object_dead;
54297-extern atomic_t fscache_n_retrieval_ops;
54298-extern atomic_t fscache_n_retrieval_op_waits;
54299+extern atomic_unchecked_t fscache_n_retrievals;
54300+extern atomic_unchecked_t fscache_n_retrievals_ok;
54301+extern atomic_unchecked_t fscache_n_retrievals_wait;
54302+extern atomic_unchecked_t fscache_n_retrievals_nodata;
54303+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
54304+extern atomic_unchecked_t fscache_n_retrievals_intr;
54305+extern atomic_unchecked_t fscache_n_retrievals_nomem;
54306+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
54307+extern atomic_unchecked_t fscache_n_retrieval_ops;
54308+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
54309
54310-extern atomic_t fscache_n_stores;
54311-extern atomic_t fscache_n_stores_ok;
54312-extern atomic_t fscache_n_stores_again;
54313-extern atomic_t fscache_n_stores_nobufs;
54314-extern atomic_t fscache_n_stores_oom;
54315-extern atomic_t fscache_n_store_ops;
54316-extern atomic_t fscache_n_store_calls;
54317-extern atomic_t fscache_n_store_pages;
54318-extern atomic_t fscache_n_store_radix_deletes;
54319-extern atomic_t fscache_n_store_pages_over_limit;
54320+extern atomic_unchecked_t fscache_n_stores;
54321+extern atomic_unchecked_t fscache_n_stores_ok;
54322+extern atomic_unchecked_t fscache_n_stores_again;
54323+extern atomic_unchecked_t fscache_n_stores_nobufs;
54324+extern atomic_unchecked_t fscache_n_stores_oom;
54325+extern atomic_unchecked_t fscache_n_store_ops;
54326+extern atomic_unchecked_t fscache_n_store_calls;
54327+extern atomic_unchecked_t fscache_n_store_pages;
54328+extern atomic_unchecked_t fscache_n_store_radix_deletes;
54329+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
54330
54331-extern atomic_t fscache_n_store_vmscan_not_storing;
54332-extern atomic_t fscache_n_store_vmscan_gone;
54333-extern atomic_t fscache_n_store_vmscan_busy;
54334-extern atomic_t fscache_n_store_vmscan_cancelled;
54335-extern atomic_t fscache_n_store_vmscan_wait;
54336+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
54337+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
54338+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
54339+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
54340+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
54341
54342-extern atomic_t fscache_n_marks;
54343-extern atomic_t fscache_n_uncaches;
54344+extern atomic_unchecked_t fscache_n_marks;
54345+extern atomic_unchecked_t fscache_n_uncaches;
54346
54347-extern atomic_t fscache_n_acquires;
54348-extern atomic_t fscache_n_acquires_null;
54349-extern atomic_t fscache_n_acquires_no_cache;
54350-extern atomic_t fscache_n_acquires_ok;
54351-extern atomic_t fscache_n_acquires_nobufs;
54352-extern atomic_t fscache_n_acquires_oom;
54353+extern atomic_unchecked_t fscache_n_acquires;
54354+extern atomic_unchecked_t fscache_n_acquires_null;
54355+extern atomic_unchecked_t fscache_n_acquires_no_cache;
54356+extern atomic_unchecked_t fscache_n_acquires_ok;
54357+extern atomic_unchecked_t fscache_n_acquires_nobufs;
54358+extern atomic_unchecked_t fscache_n_acquires_oom;
54359
54360-extern atomic_t fscache_n_invalidates;
54361-extern atomic_t fscache_n_invalidates_run;
54362+extern atomic_unchecked_t fscache_n_invalidates;
54363+extern atomic_unchecked_t fscache_n_invalidates_run;
54364
54365-extern atomic_t fscache_n_updates;
54366-extern atomic_t fscache_n_updates_null;
54367-extern atomic_t fscache_n_updates_run;
54368+extern atomic_unchecked_t fscache_n_updates;
54369+extern atomic_unchecked_t fscache_n_updates_null;
54370+extern atomic_unchecked_t fscache_n_updates_run;
54371
54372-extern atomic_t fscache_n_relinquishes;
54373-extern atomic_t fscache_n_relinquishes_null;
54374-extern atomic_t fscache_n_relinquishes_waitcrt;
54375-extern atomic_t fscache_n_relinquishes_retire;
54376+extern atomic_unchecked_t fscache_n_relinquishes;
54377+extern atomic_unchecked_t fscache_n_relinquishes_null;
54378+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
54379+extern atomic_unchecked_t fscache_n_relinquishes_retire;
54380
54381-extern atomic_t fscache_n_cookie_index;
54382-extern atomic_t fscache_n_cookie_data;
54383-extern atomic_t fscache_n_cookie_special;
54384+extern atomic_unchecked_t fscache_n_cookie_index;
54385+extern atomic_unchecked_t fscache_n_cookie_data;
54386+extern atomic_unchecked_t fscache_n_cookie_special;
54387
54388-extern atomic_t fscache_n_object_alloc;
54389-extern atomic_t fscache_n_object_no_alloc;
54390-extern atomic_t fscache_n_object_lookups;
54391-extern atomic_t fscache_n_object_lookups_negative;
54392-extern atomic_t fscache_n_object_lookups_positive;
54393-extern atomic_t fscache_n_object_lookups_timed_out;
54394-extern atomic_t fscache_n_object_created;
54395-extern atomic_t fscache_n_object_avail;
54396-extern atomic_t fscache_n_object_dead;
54397+extern atomic_unchecked_t fscache_n_object_alloc;
54398+extern atomic_unchecked_t fscache_n_object_no_alloc;
54399+extern atomic_unchecked_t fscache_n_object_lookups;
54400+extern atomic_unchecked_t fscache_n_object_lookups_negative;
54401+extern atomic_unchecked_t fscache_n_object_lookups_positive;
54402+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
54403+extern atomic_unchecked_t fscache_n_object_created;
54404+extern atomic_unchecked_t fscache_n_object_avail;
54405+extern atomic_unchecked_t fscache_n_object_dead;
54406
54407-extern atomic_t fscache_n_checkaux_none;
54408-extern atomic_t fscache_n_checkaux_okay;
54409-extern atomic_t fscache_n_checkaux_update;
54410-extern atomic_t fscache_n_checkaux_obsolete;
54411+extern atomic_unchecked_t fscache_n_checkaux_none;
54412+extern atomic_unchecked_t fscache_n_checkaux_okay;
54413+extern atomic_unchecked_t fscache_n_checkaux_update;
54414+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
54415
54416 extern atomic_t fscache_n_cop_alloc_object;
54417 extern atomic_t fscache_n_cop_lookup_object;
54418@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
54419 atomic_inc(stat);
54420 }
54421
54422+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
54423+{
54424+ atomic_inc_unchecked(stat);
54425+}
54426+
54427 static inline void fscache_stat_d(atomic_t *stat)
54428 {
54429 atomic_dec(stat);
54430@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
54431
54432 #define __fscache_stat(stat) (NULL)
54433 #define fscache_stat(stat) do {} while (0)
54434+#define fscache_stat_unchecked(stat) do {} while (0)
54435 #define fscache_stat_d(stat) do {} while (0)
54436 #endif
54437
54438diff --git a/fs/fscache/object.c b/fs/fscache/object.c
54439index 50d41c1..10ee117 100644
54440--- a/fs/fscache/object.c
54441+++ b/fs/fscache/object.c
54442@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54443 /* Invalidate an object on disk */
54444 case FSCACHE_OBJECT_INVALIDATING:
54445 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
54446- fscache_stat(&fscache_n_invalidates_run);
54447+ fscache_stat_unchecked(&fscache_n_invalidates_run);
54448 fscache_stat(&fscache_n_cop_invalidate_object);
54449 fscache_invalidate_object(object);
54450 fscache_stat_d(&fscache_n_cop_invalidate_object);
54451@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54452 /* update the object metadata on disk */
54453 case FSCACHE_OBJECT_UPDATING:
54454 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
54455- fscache_stat(&fscache_n_updates_run);
54456+ fscache_stat_unchecked(&fscache_n_updates_run);
54457 fscache_stat(&fscache_n_cop_update_object);
54458 object->cache->ops->update_object(object);
54459 fscache_stat_d(&fscache_n_cop_update_object);
54460@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54461 spin_lock(&object->lock);
54462 object->state = FSCACHE_OBJECT_DEAD;
54463 spin_unlock(&object->lock);
54464- fscache_stat(&fscache_n_object_dead);
54465+ fscache_stat_unchecked(&fscache_n_object_dead);
54466 goto terminal_transit;
54467
54468 /* handle the parent cache of this object being withdrawn from
54469@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54470 spin_lock(&object->lock);
54471 object->state = FSCACHE_OBJECT_DEAD;
54472 spin_unlock(&object->lock);
54473- fscache_stat(&fscache_n_object_dead);
54474+ fscache_stat_unchecked(&fscache_n_object_dead);
54475 goto terminal_transit;
54476
54477 /* complain about the object being woken up once it is
54478@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
54479 parent->cookie->def->name, cookie->def->name,
54480 object->cache->tag->name);
54481
54482- fscache_stat(&fscache_n_object_lookups);
54483+ fscache_stat_unchecked(&fscache_n_object_lookups);
54484 fscache_stat(&fscache_n_cop_lookup_object);
54485 ret = object->cache->ops->lookup_object(object);
54486 fscache_stat_d(&fscache_n_cop_lookup_object);
54487@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
54488 if (ret == -ETIMEDOUT) {
54489 /* probably stuck behind another object, so move this one to
54490 * the back of the queue */
54491- fscache_stat(&fscache_n_object_lookups_timed_out);
54492+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
54493 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
54494 }
54495
54496@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
54497
54498 spin_lock(&object->lock);
54499 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
54500- fscache_stat(&fscache_n_object_lookups_negative);
54501+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
54502
54503 /* transit here to allow write requests to begin stacking up
54504 * and read requests to begin returning ENODATA */
54505@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
54506 * result, in which case there may be data available */
54507 spin_lock(&object->lock);
54508 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
54509- fscache_stat(&fscache_n_object_lookups_positive);
54510+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
54511
54512 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
54513
54514@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
54515 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
54516 } else {
54517 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
54518- fscache_stat(&fscache_n_object_created);
54519+ fscache_stat_unchecked(&fscache_n_object_created);
54520
54521 object->state = FSCACHE_OBJECT_AVAILABLE;
54522 spin_unlock(&object->lock);
54523@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
54524 fscache_enqueue_dependents(object);
54525
54526 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
54527- fscache_stat(&fscache_n_object_avail);
54528+ fscache_stat_unchecked(&fscache_n_object_avail);
54529
54530 _leave("");
54531 }
54532@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
54533 enum fscache_checkaux result;
54534
54535 if (!object->cookie->def->check_aux) {
54536- fscache_stat(&fscache_n_checkaux_none);
54537+ fscache_stat_unchecked(&fscache_n_checkaux_none);
54538 return FSCACHE_CHECKAUX_OKAY;
54539 }
54540
54541@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
54542 switch (result) {
54543 /* entry okay as is */
54544 case FSCACHE_CHECKAUX_OKAY:
54545- fscache_stat(&fscache_n_checkaux_okay);
54546+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
54547 break;
54548
54549 /* entry requires update */
54550 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
54551- fscache_stat(&fscache_n_checkaux_update);
54552+ fscache_stat_unchecked(&fscache_n_checkaux_update);
54553 break;
54554
54555 /* entry requires deletion */
54556 case FSCACHE_CHECKAUX_OBSOLETE:
54557- fscache_stat(&fscache_n_checkaux_obsolete);
54558+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
54559 break;
54560
54561 default:
54562diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
54563index 762a9ec..2023284 100644
54564--- a/fs/fscache/operation.c
54565+++ b/fs/fscache/operation.c
54566@@ -17,7 +17,7 @@
54567 #include <linux/slab.h>
54568 #include "internal.h"
54569
54570-atomic_t fscache_op_debug_id;
54571+atomic_unchecked_t fscache_op_debug_id;
54572 EXPORT_SYMBOL(fscache_op_debug_id);
54573
54574 /**
54575@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
54576 ASSERTCMP(atomic_read(&op->usage), >, 0);
54577 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
54578
54579- fscache_stat(&fscache_n_op_enqueue);
54580+ fscache_stat_unchecked(&fscache_n_op_enqueue);
54581 switch (op->flags & FSCACHE_OP_TYPE) {
54582 case FSCACHE_OP_ASYNC:
54583 _debug("queue async");
54584@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
54585 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
54586 if (op->processor)
54587 fscache_enqueue_operation(op);
54588- fscache_stat(&fscache_n_op_run);
54589+ fscache_stat_unchecked(&fscache_n_op_run);
54590 }
54591
54592 /*
54593@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
54594 if (object->n_in_progress > 0) {
54595 atomic_inc(&op->usage);
54596 list_add_tail(&op->pend_link, &object->pending_ops);
54597- fscache_stat(&fscache_n_op_pend);
54598+ fscache_stat_unchecked(&fscache_n_op_pend);
54599 } else if (!list_empty(&object->pending_ops)) {
54600 atomic_inc(&op->usage);
54601 list_add_tail(&op->pend_link, &object->pending_ops);
54602- fscache_stat(&fscache_n_op_pend);
54603+ fscache_stat_unchecked(&fscache_n_op_pend);
54604 fscache_start_operations(object);
54605 } else {
54606 ASSERTCMP(object->n_in_progress, ==, 0);
54607@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
54608 object->n_exclusive++; /* reads and writes must wait */
54609 atomic_inc(&op->usage);
54610 list_add_tail(&op->pend_link, &object->pending_ops);
54611- fscache_stat(&fscache_n_op_pend);
54612+ fscache_stat_unchecked(&fscache_n_op_pend);
54613 ret = 0;
54614 } else {
54615 /* If we're in any other state, there must have been an I/O
54616@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
54617 if (object->n_exclusive > 0) {
54618 atomic_inc(&op->usage);
54619 list_add_tail(&op->pend_link, &object->pending_ops);
54620- fscache_stat(&fscache_n_op_pend);
54621+ fscache_stat_unchecked(&fscache_n_op_pend);
54622 } else if (!list_empty(&object->pending_ops)) {
54623 atomic_inc(&op->usage);
54624 list_add_tail(&op->pend_link, &object->pending_ops);
54625- fscache_stat(&fscache_n_op_pend);
54626+ fscache_stat_unchecked(&fscache_n_op_pend);
54627 fscache_start_operations(object);
54628 } else {
54629 ASSERTCMP(object->n_exclusive, ==, 0);
54630@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
54631 object->n_ops++;
54632 atomic_inc(&op->usage);
54633 list_add_tail(&op->pend_link, &object->pending_ops);
54634- fscache_stat(&fscache_n_op_pend);
54635+ fscache_stat_unchecked(&fscache_n_op_pend);
54636 ret = 0;
54637 } else if (object->state == FSCACHE_OBJECT_DYING ||
54638 object->state == FSCACHE_OBJECT_LC_DYING ||
54639 object->state == FSCACHE_OBJECT_WITHDRAWING) {
54640- fscache_stat(&fscache_n_op_rejected);
54641+ fscache_stat_unchecked(&fscache_n_op_rejected);
54642 op->state = FSCACHE_OP_ST_CANCELLED;
54643 ret = -ENOBUFS;
54644 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
54645@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
54646 ret = -EBUSY;
54647 if (op->state == FSCACHE_OP_ST_PENDING) {
54648 ASSERT(!list_empty(&op->pend_link));
54649- fscache_stat(&fscache_n_op_cancelled);
54650+ fscache_stat_unchecked(&fscache_n_op_cancelled);
54651 list_del_init(&op->pend_link);
54652 if (do_cancel)
54653 do_cancel(op);
54654@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
54655 while (!list_empty(&object->pending_ops)) {
54656 op = list_entry(object->pending_ops.next,
54657 struct fscache_operation, pend_link);
54658- fscache_stat(&fscache_n_op_cancelled);
54659+ fscache_stat_unchecked(&fscache_n_op_cancelled);
54660 list_del_init(&op->pend_link);
54661
54662 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
54663@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
54664 op->state, ==, FSCACHE_OP_ST_CANCELLED);
54665 op->state = FSCACHE_OP_ST_DEAD;
54666
54667- fscache_stat(&fscache_n_op_release);
54668+ fscache_stat_unchecked(&fscache_n_op_release);
54669
54670 if (op->release) {
54671 op->release(op);
54672@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
54673 * lock, and defer it otherwise */
54674 if (!spin_trylock(&object->lock)) {
54675 _debug("defer put");
54676- fscache_stat(&fscache_n_op_deferred_release);
54677+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
54678
54679 cache = object->cache;
54680 spin_lock(&cache->op_gc_list_lock);
54681@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
54682
54683 _debug("GC DEFERRED REL OBJ%x OP%x",
54684 object->debug_id, op->debug_id);
54685- fscache_stat(&fscache_n_op_gc);
54686+ fscache_stat_unchecked(&fscache_n_op_gc);
54687
54688 ASSERTCMP(atomic_read(&op->usage), ==, 0);
54689 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
54690diff --git a/fs/fscache/page.c b/fs/fscache/page.c
54691index ff000e5..c44ec6d 100644
54692--- a/fs/fscache/page.c
54693+++ b/fs/fscache/page.c
54694@@ -61,7 +61,7 @@ try_again:
54695 val = radix_tree_lookup(&cookie->stores, page->index);
54696 if (!val) {
54697 rcu_read_unlock();
54698- fscache_stat(&fscache_n_store_vmscan_not_storing);
54699+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
54700 __fscache_uncache_page(cookie, page);
54701 return true;
54702 }
54703@@ -91,11 +91,11 @@ try_again:
54704 spin_unlock(&cookie->stores_lock);
54705
54706 if (xpage) {
54707- fscache_stat(&fscache_n_store_vmscan_cancelled);
54708- fscache_stat(&fscache_n_store_radix_deletes);
54709+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
54710+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
54711 ASSERTCMP(xpage, ==, page);
54712 } else {
54713- fscache_stat(&fscache_n_store_vmscan_gone);
54714+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
54715 }
54716
54717 wake_up_bit(&cookie->flags, 0);
54718@@ -110,11 +110,11 @@ page_busy:
54719 * sleeping on memory allocation, so we may need to impose a timeout
54720 * too. */
54721 if (!(gfp & __GFP_WAIT)) {
54722- fscache_stat(&fscache_n_store_vmscan_busy);
54723+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
54724 return false;
54725 }
54726
54727- fscache_stat(&fscache_n_store_vmscan_wait);
54728+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
54729 __fscache_wait_on_page_write(cookie, page);
54730 gfp &= ~__GFP_WAIT;
54731 goto try_again;
54732@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
54733 FSCACHE_COOKIE_STORING_TAG);
54734 if (!radix_tree_tag_get(&cookie->stores, page->index,
54735 FSCACHE_COOKIE_PENDING_TAG)) {
54736- fscache_stat(&fscache_n_store_radix_deletes);
54737+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
54738 xpage = radix_tree_delete(&cookie->stores, page->index);
54739 }
54740 spin_unlock(&cookie->stores_lock);
54741@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
54742
54743 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
54744
54745- fscache_stat(&fscache_n_attr_changed_calls);
54746+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
54747
54748 if (fscache_object_is_active(object)) {
54749 fscache_stat(&fscache_n_cop_attr_changed);
54750@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
54751
54752 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
54753
54754- fscache_stat(&fscache_n_attr_changed);
54755+ fscache_stat_unchecked(&fscache_n_attr_changed);
54756
54757 op = kzalloc(sizeof(*op), GFP_KERNEL);
54758 if (!op) {
54759- fscache_stat(&fscache_n_attr_changed_nomem);
54760+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
54761 _leave(" = -ENOMEM");
54762 return -ENOMEM;
54763 }
54764@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
54765 if (fscache_submit_exclusive_op(object, op) < 0)
54766 goto nobufs;
54767 spin_unlock(&cookie->lock);
54768- fscache_stat(&fscache_n_attr_changed_ok);
54769+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
54770 fscache_put_operation(op);
54771 _leave(" = 0");
54772 return 0;
54773@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
54774 nobufs:
54775 spin_unlock(&cookie->lock);
54776 kfree(op);
54777- fscache_stat(&fscache_n_attr_changed_nobufs);
54778+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
54779 _leave(" = %d", -ENOBUFS);
54780 return -ENOBUFS;
54781 }
54782@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
54783 /* allocate a retrieval operation and attempt to submit it */
54784 op = kzalloc(sizeof(*op), GFP_NOIO);
54785 if (!op) {
54786- fscache_stat(&fscache_n_retrievals_nomem);
54787+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
54788 return NULL;
54789 }
54790
54791@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
54792 return 0;
54793 }
54794
54795- fscache_stat(&fscache_n_retrievals_wait);
54796+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
54797
54798 jif = jiffies;
54799 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
54800 fscache_wait_bit_interruptible,
54801 TASK_INTERRUPTIBLE) != 0) {
54802- fscache_stat(&fscache_n_retrievals_intr);
54803+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
54804 _leave(" = -ERESTARTSYS");
54805 return -ERESTARTSYS;
54806 }
54807@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
54808 */
54809 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
54810 struct fscache_retrieval *op,
54811- atomic_t *stat_op_waits,
54812- atomic_t *stat_object_dead)
54813+ atomic_unchecked_t *stat_op_waits,
54814+ atomic_unchecked_t *stat_object_dead)
54815 {
54816 int ret;
54817
54818@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
54819 goto check_if_dead;
54820
54821 _debug(">>> WT");
54822- fscache_stat(stat_op_waits);
54823+ fscache_stat_unchecked(stat_op_waits);
54824 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
54825 fscache_wait_bit_interruptible,
54826 TASK_INTERRUPTIBLE) != 0) {
54827@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
54828
54829 check_if_dead:
54830 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
54831- fscache_stat(stat_object_dead);
54832+ fscache_stat_unchecked(stat_object_dead);
54833 _leave(" = -ENOBUFS [cancelled]");
54834 return -ENOBUFS;
54835 }
54836 if (unlikely(fscache_object_is_dead(object))) {
54837 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
54838 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
54839- fscache_stat(stat_object_dead);
54840+ fscache_stat_unchecked(stat_object_dead);
54841 return -ENOBUFS;
54842 }
54843 return 0;
54844@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
54845
54846 _enter("%p,%p,,,", cookie, page);
54847
54848- fscache_stat(&fscache_n_retrievals);
54849+ fscache_stat_unchecked(&fscache_n_retrievals);
54850
54851 if (hlist_empty(&cookie->backing_objects))
54852 goto nobufs;
54853@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
54854 goto nobufs_unlock_dec;
54855 spin_unlock(&cookie->lock);
54856
54857- fscache_stat(&fscache_n_retrieval_ops);
54858+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
54859
54860 /* pin the netfs read context in case we need to do the actual netfs
54861 * read because we've encountered a cache read failure */
54862@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
54863
54864 error:
54865 if (ret == -ENOMEM)
54866- fscache_stat(&fscache_n_retrievals_nomem);
54867+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
54868 else if (ret == -ERESTARTSYS)
54869- fscache_stat(&fscache_n_retrievals_intr);
54870+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
54871 else if (ret == -ENODATA)
54872- fscache_stat(&fscache_n_retrievals_nodata);
54873+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
54874 else if (ret < 0)
54875- fscache_stat(&fscache_n_retrievals_nobufs);
54876+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54877 else
54878- fscache_stat(&fscache_n_retrievals_ok);
54879+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
54880
54881 fscache_put_retrieval(op);
54882 _leave(" = %d", ret);
54883@@ -467,7 +467,7 @@ nobufs_unlock:
54884 spin_unlock(&cookie->lock);
54885 kfree(op);
54886 nobufs:
54887- fscache_stat(&fscache_n_retrievals_nobufs);
54888+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54889 _leave(" = -ENOBUFS");
54890 return -ENOBUFS;
54891 }
54892@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
54893
54894 _enter("%p,,%d,,,", cookie, *nr_pages);
54895
54896- fscache_stat(&fscache_n_retrievals);
54897+ fscache_stat_unchecked(&fscache_n_retrievals);
54898
54899 if (hlist_empty(&cookie->backing_objects))
54900 goto nobufs;
54901@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
54902 goto nobufs_unlock_dec;
54903 spin_unlock(&cookie->lock);
54904
54905- fscache_stat(&fscache_n_retrieval_ops);
54906+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
54907
54908 /* pin the netfs read context in case we need to do the actual netfs
54909 * read because we've encountered a cache read failure */
54910@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
54911
54912 error:
54913 if (ret == -ENOMEM)
54914- fscache_stat(&fscache_n_retrievals_nomem);
54915+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
54916 else if (ret == -ERESTARTSYS)
54917- fscache_stat(&fscache_n_retrievals_intr);
54918+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
54919 else if (ret == -ENODATA)
54920- fscache_stat(&fscache_n_retrievals_nodata);
54921+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
54922 else if (ret < 0)
54923- fscache_stat(&fscache_n_retrievals_nobufs);
54924+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54925 else
54926- fscache_stat(&fscache_n_retrievals_ok);
54927+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
54928
54929 fscache_put_retrieval(op);
54930 _leave(" = %d", ret);
54931@@ -591,7 +591,7 @@ nobufs_unlock:
54932 spin_unlock(&cookie->lock);
54933 kfree(op);
54934 nobufs:
54935- fscache_stat(&fscache_n_retrievals_nobufs);
54936+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54937 _leave(" = -ENOBUFS");
54938 return -ENOBUFS;
54939 }
54940@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
54941
54942 _enter("%p,%p,,,", cookie, page);
54943
54944- fscache_stat(&fscache_n_allocs);
54945+ fscache_stat_unchecked(&fscache_n_allocs);
54946
54947 if (hlist_empty(&cookie->backing_objects))
54948 goto nobufs;
54949@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
54950 goto nobufs_unlock;
54951 spin_unlock(&cookie->lock);
54952
54953- fscache_stat(&fscache_n_alloc_ops);
54954+ fscache_stat_unchecked(&fscache_n_alloc_ops);
54955
54956 ret = fscache_wait_for_retrieval_activation(
54957 object, op,
54958@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
54959
54960 error:
54961 if (ret == -ERESTARTSYS)
54962- fscache_stat(&fscache_n_allocs_intr);
54963+ fscache_stat_unchecked(&fscache_n_allocs_intr);
54964 else if (ret < 0)
54965- fscache_stat(&fscache_n_allocs_nobufs);
54966+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
54967 else
54968- fscache_stat(&fscache_n_allocs_ok);
54969+ fscache_stat_unchecked(&fscache_n_allocs_ok);
54970
54971 fscache_put_retrieval(op);
54972 _leave(" = %d", ret);
54973@@ -677,7 +677,7 @@ nobufs_unlock:
54974 spin_unlock(&cookie->lock);
54975 kfree(op);
54976 nobufs:
54977- fscache_stat(&fscache_n_allocs_nobufs);
54978+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
54979 _leave(" = -ENOBUFS");
54980 return -ENOBUFS;
54981 }
54982@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
54983
54984 spin_lock(&cookie->stores_lock);
54985
54986- fscache_stat(&fscache_n_store_calls);
54987+ fscache_stat_unchecked(&fscache_n_store_calls);
54988
54989 /* find a page to store */
54990 page = NULL;
54991@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
54992 page = results[0];
54993 _debug("gang %d [%lx]", n, page->index);
54994 if (page->index > op->store_limit) {
54995- fscache_stat(&fscache_n_store_pages_over_limit);
54996+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
54997 goto superseded;
54998 }
54999
55000@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
55001 spin_unlock(&cookie->stores_lock);
55002 spin_unlock(&object->lock);
55003
55004- fscache_stat(&fscache_n_store_pages);
55005+ fscache_stat_unchecked(&fscache_n_store_pages);
55006 fscache_stat(&fscache_n_cop_write_page);
55007 ret = object->cache->ops->write_page(op, page);
55008 fscache_stat_d(&fscache_n_cop_write_page);
55009@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
55010 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
55011 ASSERT(PageFsCache(page));
55012
55013- fscache_stat(&fscache_n_stores);
55014+ fscache_stat_unchecked(&fscache_n_stores);
55015
55016 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
55017 _leave(" = -ENOBUFS [invalidating]");
55018@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
55019 spin_unlock(&cookie->stores_lock);
55020 spin_unlock(&object->lock);
55021
55022- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
55023+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55024 op->store_limit = object->store_limit;
55025
55026 if (fscache_submit_op(object, &op->op) < 0)
55027@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
55028
55029 spin_unlock(&cookie->lock);
55030 radix_tree_preload_end();
55031- fscache_stat(&fscache_n_store_ops);
55032- fscache_stat(&fscache_n_stores_ok);
55033+ fscache_stat_unchecked(&fscache_n_store_ops);
55034+ fscache_stat_unchecked(&fscache_n_stores_ok);
55035
55036 /* the work queue now carries its own ref on the object */
55037 fscache_put_operation(&op->op);
55038@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
55039 return 0;
55040
55041 already_queued:
55042- fscache_stat(&fscache_n_stores_again);
55043+ fscache_stat_unchecked(&fscache_n_stores_again);
55044 already_pending:
55045 spin_unlock(&cookie->stores_lock);
55046 spin_unlock(&object->lock);
55047 spin_unlock(&cookie->lock);
55048 radix_tree_preload_end();
55049 kfree(op);
55050- fscache_stat(&fscache_n_stores_ok);
55051+ fscache_stat_unchecked(&fscache_n_stores_ok);
55052 _leave(" = 0");
55053 return 0;
55054
55055@@ -959,14 +959,14 @@ nobufs:
55056 spin_unlock(&cookie->lock);
55057 radix_tree_preload_end();
55058 kfree(op);
55059- fscache_stat(&fscache_n_stores_nobufs);
55060+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
55061 _leave(" = -ENOBUFS");
55062 return -ENOBUFS;
55063
55064 nomem_free:
55065 kfree(op);
55066 nomem:
55067- fscache_stat(&fscache_n_stores_oom);
55068+ fscache_stat_unchecked(&fscache_n_stores_oom);
55069 _leave(" = -ENOMEM");
55070 return -ENOMEM;
55071 }
55072@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
55073 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
55074 ASSERTCMP(page, !=, NULL);
55075
55076- fscache_stat(&fscache_n_uncaches);
55077+ fscache_stat_unchecked(&fscache_n_uncaches);
55078
55079 /* cache withdrawal may beat us to it */
55080 if (!PageFsCache(page))
55081@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
55082 struct fscache_cookie *cookie = op->op.object->cookie;
55083
55084 #ifdef CONFIG_FSCACHE_STATS
55085- atomic_inc(&fscache_n_marks);
55086+ atomic_inc_unchecked(&fscache_n_marks);
55087 #endif
55088
55089 _debug("- mark %p{%lx}", page, page->index);
55090diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
55091index 40d13c7..ddf52b9 100644
55092--- a/fs/fscache/stats.c
55093+++ b/fs/fscache/stats.c
55094@@ -18,99 +18,99 @@
55095 /*
55096 * operation counters
55097 */
55098-atomic_t fscache_n_op_pend;
55099-atomic_t fscache_n_op_run;
55100-atomic_t fscache_n_op_enqueue;
55101-atomic_t fscache_n_op_requeue;
55102-atomic_t fscache_n_op_deferred_release;
55103-atomic_t fscache_n_op_release;
55104-atomic_t fscache_n_op_gc;
55105-atomic_t fscache_n_op_cancelled;
55106-atomic_t fscache_n_op_rejected;
55107+atomic_unchecked_t fscache_n_op_pend;
55108+atomic_unchecked_t fscache_n_op_run;
55109+atomic_unchecked_t fscache_n_op_enqueue;
55110+atomic_unchecked_t fscache_n_op_requeue;
55111+atomic_unchecked_t fscache_n_op_deferred_release;
55112+atomic_unchecked_t fscache_n_op_release;
55113+atomic_unchecked_t fscache_n_op_gc;
55114+atomic_unchecked_t fscache_n_op_cancelled;
55115+atomic_unchecked_t fscache_n_op_rejected;
55116
55117-atomic_t fscache_n_attr_changed;
55118-atomic_t fscache_n_attr_changed_ok;
55119-atomic_t fscache_n_attr_changed_nobufs;
55120-atomic_t fscache_n_attr_changed_nomem;
55121-atomic_t fscache_n_attr_changed_calls;
55122+atomic_unchecked_t fscache_n_attr_changed;
55123+atomic_unchecked_t fscache_n_attr_changed_ok;
55124+atomic_unchecked_t fscache_n_attr_changed_nobufs;
55125+atomic_unchecked_t fscache_n_attr_changed_nomem;
55126+atomic_unchecked_t fscache_n_attr_changed_calls;
55127
55128-atomic_t fscache_n_allocs;
55129-atomic_t fscache_n_allocs_ok;
55130-atomic_t fscache_n_allocs_wait;
55131-atomic_t fscache_n_allocs_nobufs;
55132-atomic_t fscache_n_allocs_intr;
55133-atomic_t fscache_n_allocs_object_dead;
55134-atomic_t fscache_n_alloc_ops;
55135-atomic_t fscache_n_alloc_op_waits;
55136+atomic_unchecked_t fscache_n_allocs;
55137+atomic_unchecked_t fscache_n_allocs_ok;
55138+atomic_unchecked_t fscache_n_allocs_wait;
55139+atomic_unchecked_t fscache_n_allocs_nobufs;
55140+atomic_unchecked_t fscache_n_allocs_intr;
55141+atomic_unchecked_t fscache_n_allocs_object_dead;
55142+atomic_unchecked_t fscache_n_alloc_ops;
55143+atomic_unchecked_t fscache_n_alloc_op_waits;
55144
55145-atomic_t fscache_n_retrievals;
55146-atomic_t fscache_n_retrievals_ok;
55147-atomic_t fscache_n_retrievals_wait;
55148-atomic_t fscache_n_retrievals_nodata;
55149-atomic_t fscache_n_retrievals_nobufs;
55150-atomic_t fscache_n_retrievals_intr;
55151-atomic_t fscache_n_retrievals_nomem;
55152-atomic_t fscache_n_retrievals_object_dead;
55153-atomic_t fscache_n_retrieval_ops;
55154-atomic_t fscache_n_retrieval_op_waits;
55155+atomic_unchecked_t fscache_n_retrievals;
55156+atomic_unchecked_t fscache_n_retrievals_ok;
55157+atomic_unchecked_t fscache_n_retrievals_wait;
55158+atomic_unchecked_t fscache_n_retrievals_nodata;
55159+atomic_unchecked_t fscache_n_retrievals_nobufs;
55160+atomic_unchecked_t fscache_n_retrievals_intr;
55161+atomic_unchecked_t fscache_n_retrievals_nomem;
55162+atomic_unchecked_t fscache_n_retrievals_object_dead;
55163+atomic_unchecked_t fscache_n_retrieval_ops;
55164+atomic_unchecked_t fscache_n_retrieval_op_waits;
55165
55166-atomic_t fscache_n_stores;
55167-atomic_t fscache_n_stores_ok;
55168-atomic_t fscache_n_stores_again;
55169-atomic_t fscache_n_stores_nobufs;
55170-atomic_t fscache_n_stores_oom;
55171-atomic_t fscache_n_store_ops;
55172-atomic_t fscache_n_store_calls;
55173-atomic_t fscache_n_store_pages;
55174-atomic_t fscache_n_store_radix_deletes;
55175-atomic_t fscache_n_store_pages_over_limit;
55176+atomic_unchecked_t fscache_n_stores;
55177+atomic_unchecked_t fscache_n_stores_ok;
55178+atomic_unchecked_t fscache_n_stores_again;
55179+atomic_unchecked_t fscache_n_stores_nobufs;
55180+atomic_unchecked_t fscache_n_stores_oom;
55181+atomic_unchecked_t fscache_n_store_ops;
55182+atomic_unchecked_t fscache_n_store_calls;
55183+atomic_unchecked_t fscache_n_store_pages;
55184+atomic_unchecked_t fscache_n_store_radix_deletes;
55185+atomic_unchecked_t fscache_n_store_pages_over_limit;
55186
55187-atomic_t fscache_n_store_vmscan_not_storing;
55188-atomic_t fscache_n_store_vmscan_gone;
55189-atomic_t fscache_n_store_vmscan_busy;
55190-atomic_t fscache_n_store_vmscan_cancelled;
55191-atomic_t fscache_n_store_vmscan_wait;
55192+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
55193+atomic_unchecked_t fscache_n_store_vmscan_gone;
55194+atomic_unchecked_t fscache_n_store_vmscan_busy;
55195+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
55196+atomic_unchecked_t fscache_n_store_vmscan_wait;
55197
55198-atomic_t fscache_n_marks;
55199-atomic_t fscache_n_uncaches;
55200+atomic_unchecked_t fscache_n_marks;
55201+atomic_unchecked_t fscache_n_uncaches;
55202
55203-atomic_t fscache_n_acquires;
55204-atomic_t fscache_n_acquires_null;
55205-atomic_t fscache_n_acquires_no_cache;
55206-atomic_t fscache_n_acquires_ok;
55207-atomic_t fscache_n_acquires_nobufs;
55208-atomic_t fscache_n_acquires_oom;
55209+atomic_unchecked_t fscache_n_acquires;
55210+atomic_unchecked_t fscache_n_acquires_null;
55211+atomic_unchecked_t fscache_n_acquires_no_cache;
55212+atomic_unchecked_t fscache_n_acquires_ok;
55213+atomic_unchecked_t fscache_n_acquires_nobufs;
55214+atomic_unchecked_t fscache_n_acquires_oom;
55215
55216-atomic_t fscache_n_invalidates;
55217-atomic_t fscache_n_invalidates_run;
55218+atomic_unchecked_t fscache_n_invalidates;
55219+atomic_unchecked_t fscache_n_invalidates_run;
55220
55221-atomic_t fscache_n_updates;
55222-atomic_t fscache_n_updates_null;
55223-atomic_t fscache_n_updates_run;
55224+atomic_unchecked_t fscache_n_updates;
55225+atomic_unchecked_t fscache_n_updates_null;
55226+atomic_unchecked_t fscache_n_updates_run;
55227
55228-atomic_t fscache_n_relinquishes;
55229-atomic_t fscache_n_relinquishes_null;
55230-atomic_t fscache_n_relinquishes_waitcrt;
55231-atomic_t fscache_n_relinquishes_retire;
55232+atomic_unchecked_t fscache_n_relinquishes;
55233+atomic_unchecked_t fscache_n_relinquishes_null;
55234+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
55235+atomic_unchecked_t fscache_n_relinquishes_retire;
55236
55237-atomic_t fscache_n_cookie_index;
55238-atomic_t fscache_n_cookie_data;
55239-atomic_t fscache_n_cookie_special;
55240+atomic_unchecked_t fscache_n_cookie_index;
55241+atomic_unchecked_t fscache_n_cookie_data;
55242+atomic_unchecked_t fscache_n_cookie_special;
55243
55244-atomic_t fscache_n_object_alloc;
55245-atomic_t fscache_n_object_no_alloc;
55246-atomic_t fscache_n_object_lookups;
55247-atomic_t fscache_n_object_lookups_negative;
55248-atomic_t fscache_n_object_lookups_positive;
55249-atomic_t fscache_n_object_lookups_timed_out;
55250-atomic_t fscache_n_object_created;
55251-atomic_t fscache_n_object_avail;
55252-atomic_t fscache_n_object_dead;
55253+atomic_unchecked_t fscache_n_object_alloc;
55254+atomic_unchecked_t fscache_n_object_no_alloc;
55255+atomic_unchecked_t fscache_n_object_lookups;
55256+atomic_unchecked_t fscache_n_object_lookups_negative;
55257+atomic_unchecked_t fscache_n_object_lookups_positive;
55258+atomic_unchecked_t fscache_n_object_lookups_timed_out;
55259+atomic_unchecked_t fscache_n_object_created;
55260+atomic_unchecked_t fscache_n_object_avail;
55261+atomic_unchecked_t fscache_n_object_dead;
55262
55263-atomic_t fscache_n_checkaux_none;
55264-atomic_t fscache_n_checkaux_okay;
55265-atomic_t fscache_n_checkaux_update;
55266-atomic_t fscache_n_checkaux_obsolete;
55267+atomic_unchecked_t fscache_n_checkaux_none;
55268+atomic_unchecked_t fscache_n_checkaux_okay;
55269+atomic_unchecked_t fscache_n_checkaux_update;
55270+atomic_unchecked_t fscache_n_checkaux_obsolete;
55271
55272 atomic_t fscache_n_cop_alloc_object;
55273 atomic_t fscache_n_cop_lookup_object;
55274@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
55275 seq_puts(m, "FS-Cache statistics\n");
55276
55277 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
55278- atomic_read(&fscache_n_cookie_index),
55279- atomic_read(&fscache_n_cookie_data),
55280- atomic_read(&fscache_n_cookie_special));
55281+ atomic_read_unchecked(&fscache_n_cookie_index),
55282+ atomic_read_unchecked(&fscache_n_cookie_data),
55283+ atomic_read_unchecked(&fscache_n_cookie_special));
55284
55285 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
55286- atomic_read(&fscache_n_object_alloc),
55287- atomic_read(&fscache_n_object_no_alloc),
55288- atomic_read(&fscache_n_object_avail),
55289- atomic_read(&fscache_n_object_dead));
55290+ atomic_read_unchecked(&fscache_n_object_alloc),
55291+ atomic_read_unchecked(&fscache_n_object_no_alloc),
55292+ atomic_read_unchecked(&fscache_n_object_avail),
55293+ atomic_read_unchecked(&fscache_n_object_dead));
55294 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
55295- atomic_read(&fscache_n_checkaux_none),
55296- atomic_read(&fscache_n_checkaux_okay),
55297- atomic_read(&fscache_n_checkaux_update),
55298- atomic_read(&fscache_n_checkaux_obsolete));
55299+ atomic_read_unchecked(&fscache_n_checkaux_none),
55300+ atomic_read_unchecked(&fscache_n_checkaux_okay),
55301+ atomic_read_unchecked(&fscache_n_checkaux_update),
55302+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
55303
55304 seq_printf(m, "Pages : mrk=%u unc=%u\n",
55305- atomic_read(&fscache_n_marks),
55306- atomic_read(&fscache_n_uncaches));
55307+ atomic_read_unchecked(&fscache_n_marks),
55308+ atomic_read_unchecked(&fscache_n_uncaches));
55309
55310 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
55311 " oom=%u\n",
55312- atomic_read(&fscache_n_acquires),
55313- atomic_read(&fscache_n_acquires_null),
55314- atomic_read(&fscache_n_acquires_no_cache),
55315- atomic_read(&fscache_n_acquires_ok),
55316- atomic_read(&fscache_n_acquires_nobufs),
55317- atomic_read(&fscache_n_acquires_oom));
55318+ atomic_read_unchecked(&fscache_n_acquires),
55319+ atomic_read_unchecked(&fscache_n_acquires_null),
55320+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
55321+ atomic_read_unchecked(&fscache_n_acquires_ok),
55322+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
55323+ atomic_read_unchecked(&fscache_n_acquires_oom));
55324
55325 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
55326- atomic_read(&fscache_n_object_lookups),
55327- atomic_read(&fscache_n_object_lookups_negative),
55328- atomic_read(&fscache_n_object_lookups_positive),
55329- atomic_read(&fscache_n_object_created),
55330- atomic_read(&fscache_n_object_lookups_timed_out));
55331+ atomic_read_unchecked(&fscache_n_object_lookups),
55332+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
55333+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
55334+ atomic_read_unchecked(&fscache_n_object_created),
55335+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
55336
55337 seq_printf(m, "Invals : n=%u run=%u\n",
55338- atomic_read(&fscache_n_invalidates),
55339- atomic_read(&fscache_n_invalidates_run));
55340+ atomic_read_unchecked(&fscache_n_invalidates),
55341+ atomic_read_unchecked(&fscache_n_invalidates_run));
55342
55343 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
55344- atomic_read(&fscache_n_updates),
55345- atomic_read(&fscache_n_updates_null),
55346- atomic_read(&fscache_n_updates_run));
55347+ atomic_read_unchecked(&fscache_n_updates),
55348+ atomic_read_unchecked(&fscache_n_updates_null),
55349+ atomic_read_unchecked(&fscache_n_updates_run));
55350
55351 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
55352- atomic_read(&fscache_n_relinquishes),
55353- atomic_read(&fscache_n_relinquishes_null),
55354- atomic_read(&fscache_n_relinquishes_waitcrt),
55355- atomic_read(&fscache_n_relinquishes_retire));
55356+ atomic_read_unchecked(&fscache_n_relinquishes),
55357+ atomic_read_unchecked(&fscache_n_relinquishes_null),
55358+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
55359+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
55360
55361 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
55362- atomic_read(&fscache_n_attr_changed),
55363- atomic_read(&fscache_n_attr_changed_ok),
55364- atomic_read(&fscache_n_attr_changed_nobufs),
55365- atomic_read(&fscache_n_attr_changed_nomem),
55366- atomic_read(&fscache_n_attr_changed_calls));
55367+ atomic_read_unchecked(&fscache_n_attr_changed),
55368+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
55369+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
55370+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
55371+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
55372
55373 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
55374- atomic_read(&fscache_n_allocs),
55375- atomic_read(&fscache_n_allocs_ok),
55376- atomic_read(&fscache_n_allocs_wait),
55377- atomic_read(&fscache_n_allocs_nobufs),
55378- atomic_read(&fscache_n_allocs_intr));
55379+ atomic_read_unchecked(&fscache_n_allocs),
55380+ atomic_read_unchecked(&fscache_n_allocs_ok),
55381+ atomic_read_unchecked(&fscache_n_allocs_wait),
55382+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
55383+ atomic_read_unchecked(&fscache_n_allocs_intr));
55384 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
55385- atomic_read(&fscache_n_alloc_ops),
55386- atomic_read(&fscache_n_alloc_op_waits),
55387- atomic_read(&fscache_n_allocs_object_dead));
55388+ atomic_read_unchecked(&fscache_n_alloc_ops),
55389+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
55390+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
55391
55392 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
55393 " int=%u oom=%u\n",
55394- atomic_read(&fscache_n_retrievals),
55395- atomic_read(&fscache_n_retrievals_ok),
55396- atomic_read(&fscache_n_retrievals_wait),
55397- atomic_read(&fscache_n_retrievals_nodata),
55398- atomic_read(&fscache_n_retrievals_nobufs),
55399- atomic_read(&fscache_n_retrievals_intr),
55400- atomic_read(&fscache_n_retrievals_nomem));
55401+ atomic_read_unchecked(&fscache_n_retrievals),
55402+ atomic_read_unchecked(&fscache_n_retrievals_ok),
55403+ atomic_read_unchecked(&fscache_n_retrievals_wait),
55404+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
55405+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
55406+ atomic_read_unchecked(&fscache_n_retrievals_intr),
55407+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
55408 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
55409- atomic_read(&fscache_n_retrieval_ops),
55410- atomic_read(&fscache_n_retrieval_op_waits),
55411- atomic_read(&fscache_n_retrievals_object_dead));
55412+ atomic_read_unchecked(&fscache_n_retrieval_ops),
55413+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
55414+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
55415
55416 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
55417- atomic_read(&fscache_n_stores),
55418- atomic_read(&fscache_n_stores_ok),
55419- atomic_read(&fscache_n_stores_again),
55420- atomic_read(&fscache_n_stores_nobufs),
55421- atomic_read(&fscache_n_stores_oom));
55422+ atomic_read_unchecked(&fscache_n_stores),
55423+ atomic_read_unchecked(&fscache_n_stores_ok),
55424+ atomic_read_unchecked(&fscache_n_stores_again),
55425+ atomic_read_unchecked(&fscache_n_stores_nobufs),
55426+ atomic_read_unchecked(&fscache_n_stores_oom));
55427 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
55428- atomic_read(&fscache_n_store_ops),
55429- atomic_read(&fscache_n_store_calls),
55430- atomic_read(&fscache_n_store_pages),
55431- atomic_read(&fscache_n_store_radix_deletes),
55432- atomic_read(&fscache_n_store_pages_over_limit));
55433+ atomic_read_unchecked(&fscache_n_store_ops),
55434+ atomic_read_unchecked(&fscache_n_store_calls),
55435+ atomic_read_unchecked(&fscache_n_store_pages),
55436+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
55437+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
55438
55439 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
55440- atomic_read(&fscache_n_store_vmscan_not_storing),
55441- atomic_read(&fscache_n_store_vmscan_gone),
55442- atomic_read(&fscache_n_store_vmscan_busy),
55443- atomic_read(&fscache_n_store_vmscan_cancelled),
55444- atomic_read(&fscache_n_store_vmscan_wait));
55445+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
55446+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
55447+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
55448+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
55449+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
55450
55451 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
55452- atomic_read(&fscache_n_op_pend),
55453- atomic_read(&fscache_n_op_run),
55454- atomic_read(&fscache_n_op_enqueue),
55455- atomic_read(&fscache_n_op_cancelled),
55456- atomic_read(&fscache_n_op_rejected));
55457+ atomic_read_unchecked(&fscache_n_op_pend),
55458+ atomic_read_unchecked(&fscache_n_op_run),
55459+ atomic_read_unchecked(&fscache_n_op_enqueue),
55460+ atomic_read_unchecked(&fscache_n_op_cancelled),
55461+ atomic_read_unchecked(&fscache_n_op_rejected));
55462 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
55463- atomic_read(&fscache_n_op_deferred_release),
55464- atomic_read(&fscache_n_op_release),
55465- atomic_read(&fscache_n_op_gc));
55466+ atomic_read_unchecked(&fscache_n_op_deferred_release),
55467+ atomic_read_unchecked(&fscache_n_op_release),
55468+ atomic_read_unchecked(&fscache_n_op_gc));
55469
55470 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
55471 atomic_read(&fscache_n_cop_alloc_object),
55472diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
55473index aef34b1..59bfd7b 100644
55474--- a/fs/fuse/cuse.c
55475+++ b/fs/fuse/cuse.c
55476@@ -600,10 +600,12 @@ static int __init cuse_init(void)
55477 INIT_LIST_HEAD(&cuse_conntbl[i]);
55478
55479 /* inherit and extend fuse_dev_operations */
55480- cuse_channel_fops = fuse_dev_operations;
55481- cuse_channel_fops.owner = THIS_MODULE;
55482- cuse_channel_fops.open = cuse_channel_open;
55483- cuse_channel_fops.release = cuse_channel_release;
55484+ pax_open_kernel();
55485+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
55486+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
55487+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
55488+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
55489+ pax_close_kernel();
55490
55491 cuse_class = class_create(THIS_MODULE, "cuse");
55492 if (IS_ERR(cuse_class))
55493diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
55494index 1d55f94..088da65 100644
55495--- a/fs/fuse/dev.c
55496+++ b/fs/fuse/dev.c
55497@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
55498 ret = 0;
55499 pipe_lock(pipe);
55500
55501- if (!pipe->readers) {
55502+ if (!atomic_read(&pipe->readers)) {
55503 send_sig(SIGPIPE, current, 0);
55504 if (!ret)
55505 ret = -EPIPE;
55506@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
55507 page_nr++;
55508 ret += buf->len;
55509
55510- if (pipe->files)
55511+ if (atomic_read(&pipe->files))
55512 do_wakeup = 1;
55513 }
55514
55515diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
55516index 5b12746..b481b03 100644
55517--- a/fs/fuse/dir.c
55518+++ b/fs/fuse/dir.c
55519@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
55520 return link;
55521 }
55522
55523-static void free_link(char *link)
55524+static void free_link(const char *link)
55525 {
55526 if (!IS_ERR(link))
55527 free_page((unsigned long) link);
55528diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
55529index 62b484e..0f9a140 100644
55530--- a/fs/gfs2/inode.c
55531+++ b/fs/gfs2/inode.c
55532@@ -1441,7 +1441,7 @@ out:
55533
55534 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
55535 {
55536- char *s = nd_get_link(nd);
55537+ const char *s = nd_get_link(nd);
55538 if (!IS_ERR(s))
55539 kfree(s);
55540 }
55541diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
55542index a3f868a..bb308ae 100644
55543--- a/fs/hugetlbfs/inode.c
55544+++ b/fs/hugetlbfs/inode.c
55545@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
55546 struct mm_struct *mm = current->mm;
55547 struct vm_area_struct *vma;
55548 struct hstate *h = hstate_file(file);
55549+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
55550 struct vm_unmapped_area_info info;
55551
55552 if (len & ~huge_page_mask(h))
55553@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
55554 return addr;
55555 }
55556
55557+#ifdef CONFIG_PAX_RANDMMAP
55558+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
55559+#endif
55560+
55561 if (addr) {
55562 addr = ALIGN(addr, huge_page_size(h));
55563 vma = find_vma(mm, addr);
55564- if (TASK_SIZE - len >= addr &&
55565- (!vma || addr + len <= vma->vm_start))
55566+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
55567 return addr;
55568 }
55569
55570 info.flags = 0;
55571 info.length = len;
55572 info.low_limit = TASK_UNMAPPED_BASE;
55573+
55574+#ifdef CONFIG_PAX_RANDMMAP
55575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
55576+ info.low_limit += mm->delta_mmap;
55577+#endif
55578+
55579 info.high_limit = TASK_SIZE;
55580 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
55581 info.align_offset = 0;
55582@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
55583 };
55584 MODULE_ALIAS_FS("hugetlbfs");
55585
55586-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
55587+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
55588
55589 static int can_do_hugetlb_shm(void)
55590 {
55591diff --git a/fs/inode.c b/fs/inode.c
55592index 00d5fc3..98ce7d7 100644
55593--- a/fs/inode.c
55594+++ b/fs/inode.c
55595@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
55596
55597 #ifdef CONFIG_SMP
55598 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
55599- static atomic_t shared_last_ino;
55600- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
55601+ static atomic_unchecked_t shared_last_ino;
55602+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
55603
55604 res = next - LAST_INO_BATCH;
55605 }
55606diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
55607index 4a6cf28..d3a29d3 100644
55608--- a/fs/jffs2/erase.c
55609+++ b/fs/jffs2/erase.c
55610@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
55611 struct jffs2_unknown_node marker = {
55612 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
55613 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
55614- .totlen = cpu_to_je32(c->cleanmarker_size)
55615+ .totlen = cpu_to_je32(c->cleanmarker_size),
55616+ .hdr_crc = cpu_to_je32(0)
55617 };
55618
55619 jffs2_prealloc_raw_node_refs(c, jeb, 1);
55620diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
55621index a6597d6..41b30ec 100644
55622--- a/fs/jffs2/wbuf.c
55623+++ b/fs/jffs2/wbuf.c
55624@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
55625 {
55626 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
55627 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
55628- .totlen = constant_cpu_to_je32(8)
55629+ .totlen = constant_cpu_to_je32(8),
55630+ .hdr_crc = constant_cpu_to_je32(0)
55631 };
55632
55633 /*
55634diff --git a/fs/jfs/super.c b/fs/jfs/super.c
55635index 788e0a9..8433098 100644
55636--- a/fs/jfs/super.c
55637+++ b/fs/jfs/super.c
55638@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
55639
55640 jfs_inode_cachep =
55641 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
55642- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
55643+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
55644 init_once);
55645 if (jfs_inode_cachep == NULL)
55646 return -ENOMEM;
55647diff --git a/fs/libfs.c b/fs/libfs.c
55648index 916da8c..1588998 100644
55649--- a/fs/libfs.c
55650+++ b/fs/libfs.c
55651@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
55652
55653 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
55654 struct dentry *next;
55655+ char d_name[sizeof(next->d_iname)];
55656+ const unsigned char *name;
55657+
55658 next = list_entry(p, struct dentry, d_u.d_child);
55659 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
55660 if (!simple_positive(next)) {
55661@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
55662
55663 spin_unlock(&next->d_lock);
55664 spin_unlock(&dentry->d_lock);
55665- if (filldir(dirent, next->d_name.name,
55666+ name = next->d_name.name;
55667+ if (name == next->d_iname) {
55668+ memcpy(d_name, name, next->d_name.len);
55669+ name = d_name;
55670+ }
55671+ if (filldir(dirent, name,
55672 next->d_name.len, filp->f_pos,
55673 next->d_inode->i_ino,
55674 dt_type(next->d_inode)) < 0)
55675diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
55676index acd3947..1f896e2 100644
55677--- a/fs/lockd/clntproc.c
55678+++ b/fs/lockd/clntproc.c
55679@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
55680 /*
55681 * Cookie counter for NLM requests
55682 */
55683-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
55684+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
55685
55686 void nlmclnt_next_cookie(struct nlm_cookie *c)
55687 {
55688- u32 cookie = atomic_inc_return(&nlm_cookie);
55689+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
55690
55691 memcpy(c->data, &cookie, 4);
55692 c->len=4;
55693diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
55694index a2aa97d..10d6c41 100644
55695--- a/fs/lockd/svc.c
55696+++ b/fs/lockd/svc.c
55697@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
55698 svc_sock_update_bufs(serv);
55699 serv->sv_maxconn = nlm_max_connections;
55700
55701- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
55702+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
55703 if (IS_ERR(nlmsvc_task)) {
55704 error = PTR_ERR(nlmsvc_task);
55705 printk(KERN_WARNING
55706diff --git a/fs/locks.c b/fs/locks.c
55707index cb424a4..850e4dd 100644
55708--- a/fs/locks.c
55709+++ b/fs/locks.c
55710@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
55711 return;
55712
55713 if (filp->f_op && filp->f_op->flock) {
55714- struct file_lock fl = {
55715+ struct file_lock flock = {
55716 .fl_pid = current->tgid,
55717 .fl_file = filp,
55718 .fl_flags = FL_FLOCK,
55719 .fl_type = F_UNLCK,
55720 .fl_end = OFFSET_MAX,
55721 };
55722- filp->f_op->flock(filp, F_SETLKW, &fl);
55723- if (fl.fl_ops && fl.fl_ops->fl_release_private)
55724- fl.fl_ops->fl_release_private(&fl);
55725+ filp->f_op->flock(filp, F_SETLKW, &flock);
55726+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
55727+ flock.fl_ops->fl_release_private(&flock);
55728 }
55729
55730 lock_flocks();
55731diff --git a/fs/namei.c b/fs/namei.c
55732index 9ed9361..2b72db1 100644
55733--- a/fs/namei.c
55734+++ b/fs/namei.c
55735@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
55736 if (ret != -EACCES)
55737 return ret;
55738
55739+#ifdef CONFIG_GRKERNSEC
55740+ /* we'll block if we have to log due to a denied capability use */
55741+ if (mask & MAY_NOT_BLOCK)
55742+ return -ECHILD;
55743+#endif
55744+
55745 if (S_ISDIR(inode->i_mode)) {
55746 /* DACs are overridable for directories */
55747- if (inode_capable(inode, CAP_DAC_OVERRIDE))
55748- return 0;
55749 if (!(mask & MAY_WRITE))
55750- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
55751+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
55752+ inode_capable(inode, CAP_DAC_READ_SEARCH))
55753 return 0;
55754+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
55755+ return 0;
55756 return -EACCES;
55757 }
55758 /*
55759+ * Searching includes executable on directories, else just read.
55760+ */
55761+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
55762+ if (mask == MAY_READ)
55763+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
55764+ inode_capable(inode, CAP_DAC_READ_SEARCH))
55765+ return 0;
55766+
55767+ /*
55768 * Read/write DACs are always overridable.
55769 * Executable DACs are overridable when there is
55770 * at least one exec bit set.
55771@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
55772 if (inode_capable(inode, CAP_DAC_OVERRIDE))
55773 return 0;
55774
55775- /*
55776- * Searching includes executable on directories, else just read.
55777- */
55778- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
55779- if (mask == MAY_READ)
55780- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
55781- return 0;
55782-
55783 return -EACCES;
55784 }
55785
55786@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
55787 {
55788 struct dentry *dentry = link->dentry;
55789 int error;
55790- char *s;
55791+ const char *s;
55792
55793 BUG_ON(nd->flags & LOOKUP_RCU);
55794
55795@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
55796 if (error)
55797 goto out_put_nd_path;
55798
55799+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
55800+ dentry->d_inode, dentry, nd->path.mnt)) {
55801+ error = -EACCES;
55802+ goto out_put_nd_path;
55803+ }
55804+
55805 nd->last_type = LAST_BIND;
55806 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
55807 error = PTR_ERR(*p);
55808@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
55809 if (res)
55810 break;
55811 res = walk_component(nd, path, LOOKUP_FOLLOW);
55812+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
55813+ res = -EACCES;
55814 put_link(nd, &link, cookie);
55815 } while (res > 0);
55816
55817@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
55818 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
55819 {
55820 unsigned long a, b, adata, bdata, mask, hash, len;
55821- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
55822+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
55823
55824 hash = a = 0;
55825 len = -sizeof(unsigned long);
55826@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
55827 if (err)
55828 break;
55829 err = lookup_last(nd, &path);
55830+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
55831+ err = -EACCES;
55832 put_link(nd, &link, cookie);
55833 }
55834 }
55835@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
55836 if (!err)
55837 err = complete_walk(nd);
55838
55839+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
55840+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
55841+ path_put(&nd->path);
55842+ err = -ENOENT;
55843+ }
55844+ }
55845+
55846 if (!err && nd->flags & LOOKUP_DIRECTORY) {
55847 if (!can_lookup(nd->inode)) {
55848 path_put(&nd->path);
55849@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
55850 retval = path_lookupat(dfd, name->name,
55851 flags | LOOKUP_REVAL, nd);
55852
55853- if (likely(!retval))
55854+ if (likely(!retval)) {
55855 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
55856+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
55857+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
55858+ path_put(&nd->path);
55859+ return -ENOENT;
55860+ }
55861+ }
55862+ }
55863 return retval;
55864 }
55865
55866@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
55867 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
55868 return -EPERM;
55869
55870+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
55871+ return -EPERM;
55872+ if (gr_handle_rawio(inode))
55873+ return -EPERM;
55874+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
55875+ return -EACCES;
55876+
55877 return 0;
55878 }
55879
55880@@ -2602,7 +2641,7 @@ looked_up:
55881 * cleared otherwise prior to returning.
55882 */
55883 static int lookup_open(struct nameidata *nd, struct path *path,
55884- struct file *file,
55885+ struct path *link, struct file *file,
55886 const struct open_flags *op,
55887 bool got_write, int *opened)
55888 {
55889@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
55890 /* Negative dentry, just create the file */
55891 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
55892 umode_t mode = op->mode;
55893+
55894+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
55895+ error = -EACCES;
55896+ goto out_dput;
55897+ }
55898+
55899+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
55900+ error = -EACCES;
55901+ goto out_dput;
55902+ }
55903+
55904 if (!IS_POSIXACL(dir->d_inode))
55905 mode &= ~current_umask();
55906 /*
55907@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
55908 nd->flags & LOOKUP_EXCL);
55909 if (error)
55910 goto out_dput;
55911+ else
55912+ gr_handle_create(dentry, nd->path.mnt);
55913 }
55914 out_no_open:
55915 path->dentry = dentry;
55916@@ -2672,7 +2724,7 @@ out_dput:
55917 /*
55918 * Handle the last step of open()
55919 */
55920-static int do_last(struct nameidata *nd, struct path *path,
55921+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
55922 struct file *file, const struct open_flags *op,
55923 int *opened, struct filename *name)
55924 {
55925@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
55926 error = complete_walk(nd);
55927 if (error)
55928 return error;
55929+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
55930+ error = -ENOENT;
55931+ goto out;
55932+ }
55933 audit_inode(name, nd->path.dentry, 0);
55934 if (open_flag & O_CREAT) {
55935 error = -EISDIR;
55936 goto out;
55937 }
55938+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
55939+ error = -EACCES;
55940+ goto out;
55941+ }
55942 goto finish_open;
55943 case LAST_BIND:
55944 error = complete_walk(nd);
55945 if (error)
55946 return error;
55947+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
55948+ error = -ENOENT;
55949+ goto out;
55950+ }
55951+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
55952+ error = -EACCES;
55953+ goto out;
55954+ }
55955 audit_inode(name, dir, 0);
55956 goto finish_open;
55957 }
55958@@ -2759,7 +2827,7 @@ retry_lookup:
55959 */
55960 }
55961 mutex_lock(&dir->d_inode->i_mutex);
55962- error = lookup_open(nd, path, file, op, got_write, opened);
55963+ error = lookup_open(nd, path, link, file, op, got_write, opened);
55964 mutex_unlock(&dir->d_inode->i_mutex);
55965
55966 if (error <= 0) {
55967@@ -2783,11 +2851,28 @@ retry_lookup:
55968 goto finish_open_created;
55969 }
55970
55971+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
55972+ error = -ENOENT;
55973+ goto exit_dput;
55974+ }
55975+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
55976+ error = -EACCES;
55977+ goto exit_dput;
55978+ }
55979+
55980 /*
55981 * create/update audit record if it already exists.
55982 */
55983- if (path->dentry->d_inode)
55984+ if (path->dentry->d_inode) {
55985+ /* only check if O_CREAT is specified, all other checks need to go
55986+ into may_open */
55987+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
55988+ error = -EACCES;
55989+ goto exit_dput;
55990+ }
55991+
55992 audit_inode(name, path->dentry, 0);
55993+ }
55994
55995 /*
55996 * If atomic_open() acquired write access it is dropped now due to
55997@@ -2828,6 +2913,11 @@ finish_lookup:
55998 }
55999 }
56000 BUG_ON(inode != path->dentry->d_inode);
56001+ /* if we're resolving a symlink to another symlink */
56002+ if (link && gr_handle_symlink_owner(link, inode)) {
56003+ error = -EACCES;
56004+ goto out;
56005+ }
56006 return 1;
56007 }
56008
56009@@ -2837,7 +2927,6 @@ finish_lookup:
56010 save_parent.dentry = nd->path.dentry;
56011 save_parent.mnt = mntget(path->mnt);
56012 nd->path.dentry = path->dentry;
56013-
56014 }
56015 nd->inode = inode;
56016 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
56017@@ -2846,6 +2935,16 @@ finish_lookup:
56018 path_put(&save_parent);
56019 return error;
56020 }
56021+
56022+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
56023+ error = -ENOENT;
56024+ goto out;
56025+ }
56026+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
56027+ error = -EACCES;
56028+ goto out;
56029+ }
56030+
56031 error = -EISDIR;
56032 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
56033 goto out;
56034@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
56035 if (unlikely(error))
56036 goto out;
56037
56038- error = do_last(nd, &path, file, op, &opened, pathname);
56039+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
56040 while (unlikely(error > 0)) { /* trailing symlink */
56041 struct path link = path;
56042 void *cookie;
56043@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
56044 error = follow_link(&link, nd, &cookie);
56045 if (unlikely(error))
56046 break;
56047- error = do_last(nd, &path, file, op, &opened, pathname);
56048+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
56049 put_link(nd, &link, cookie);
56050 }
56051 out:
56052@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
56053 goto unlock;
56054
56055 error = -EEXIST;
56056- if (dentry->d_inode)
56057+ if (dentry->d_inode) {
56058+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
56059+ error = -ENOENT;
56060+ }
56061 goto fail;
56062+ }
56063 /*
56064 * Special case - lookup gave negative, but... we had foo/bar/
56065 * From the vfs_mknod() POV we just have a negative dentry -
56066@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
56067 }
56068 EXPORT_SYMBOL(user_path_create);
56069
56070+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
56071+{
56072+ struct filename *tmp = getname(pathname);
56073+ struct dentry *res;
56074+ if (IS_ERR(tmp))
56075+ return ERR_CAST(tmp);
56076+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
56077+ if (IS_ERR(res))
56078+ putname(tmp);
56079+ else
56080+ *to = tmp;
56081+ return res;
56082+}
56083+
56084 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
56085 {
56086 int error = may_create(dir, dentry);
56087@@ -3177,6 +3294,17 @@ retry:
56088
56089 if (!IS_POSIXACL(path.dentry->d_inode))
56090 mode &= ~current_umask();
56091+
56092+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
56093+ error = -EPERM;
56094+ goto out;
56095+ }
56096+
56097+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
56098+ error = -EACCES;
56099+ goto out;
56100+ }
56101+
56102 error = security_path_mknod(&path, dentry, mode, dev);
56103 if (error)
56104 goto out;
56105@@ -3193,6 +3321,8 @@ retry:
56106 break;
56107 }
56108 out:
56109+ if (!error)
56110+ gr_handle_create(dentry, path.mnt);
56111 done_path_create(&path, dentry);
56112 if (retry_estale(error, lookup_flags)) {
56113 lookup_flags |= LOOKUP_REVAL;
56114@@ -3245,9 +3375,16 @@ retry:
56115
56116 if (!IS_POSIXACL(path.dentry->d_inode))
56117 mode &= ~current_umask();
56118+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
56119+ error = -EACCES;
56120+ goto out;
56121+ }
56122 error = security_path_mkdir(&path, dentry, mode);
56123 if (!error)
56124 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
56125+ if (!error)
56126+ gr_handle_create(dentry, path.mnt);
56127+out:
56128 done_path_create(&path, dentry);
56129 if (retry_estale(error, lookup_flags)) {
56130 lookup_flags |= LOOKUP_REVAL;
56131@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
56132 struct filename *name;
56133 struct dentry *dentry;
56134 struct nameidata nd;
56135+ ino_t saved_ino = 0;
56136+ dev_t saved_dev = 0;
56137 unsigned int lookup_flags = 0;
56138 retry:
56139 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
56140@@ -3360,10 +3499,21 @@ retry:
56141 error = -ENOENT;
56142 goto exit3;
56143 }
56144+
56145+ saved_ino = dentry->d_inode->i_ino;
56146+ saved_dev = gr_get_dev_from_dentry(dentry);
56147+
56148+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
56149+ error = -EACCES;
56150+ goto exit3;
56151+ }
56152+
56153 error = security_path_rmdir(&nd.path, dentry);
56154 if (error)
56155 goto exit3;
56156 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
56157+ if (!error && (saved_dev || saved_ino))
56158+ gr_handle_delete(saved_ino, saved_dev);
56159 exit3:
56160 dput(dentry);
56161 exit2:
56162@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
56163 struct dentry *dentry;
56164 struct nameidata nd;
56165 struct inode *inode = NULL;
56166+ ino_t saved_ino = 0;
56167+ dev_t saved_dev = 0;
56168 unsigned int lookup_flags = 0;
56169 retry:
56170 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
56171@@ -3455,10 +3607,22 @@ retry:
56172 if (!inode)
56173 goto slashes;
56174 ihold(inode);
56175+
56176+ if (inode->i_nlink <= 1) {
56177+ saved_ino = inode->i_ino;
56178+ saved_dev = gr_get_dev_from_dentry(dentry);
56179+ }
56180+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
56181+ error = -EACCES;
56182+ goto exit2;
56183+ }
56184+
56185 error = security_path_unlink(&nd.path, dentry);
56186 if (error)
56187 goto exit2;
56188 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
56189+ if (!error && (saved_ino || saved_dev))
56190+ gr_handle_delete(saved_ino, saved_dev);
56191 exit2:
56192 dput(dentry);
56193 }
56194@@ -3536,9 +3700,17 @@ retry:
56195 if (IS_ERR(dentry))
56196 goto out_putname;
56197
56198+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
56199+ error = -EACCES;
56200+ goto out;
56201+ }
56202+
56203 error = security_path_symlink(&path, dentry, from->name);
56204 if (!error)
56205 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
56206+ if (!error)
56207+ gr_handle_create(dentry, path.mnt);
56208+out:
56209 done_path_create(&path, dentry);
56210 if (retry_estale(error, lookup_flags)) {
56211 lookup_flags |= LOOKUP_REVAL;
56212@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
56213 {
56214 struct dentry *new_dentry;
56215 struct path old_path, new_path;
56216+ struct filename *to = NULL;
56217 int how = 0;
56218 int error;
56219
56220@@ -3635,7 +3808,7 @@ retry:
56221 if (error)
56222 return error;
56223
56224- new_dentry = user_path_create(newdfd, newname, &new_path,
56225+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
56226 (how & LOOKUP_REVAL));
56227 error = PTR_ERR(new_dentry);
56228 if (IS_ERR(new_dentry))
56229@@ -3647,11 +3820,28 @@ retry:
56230 error = may_linkat(&old_path);
56231 if (unlikely(error))
56232 goto out_dput;
56233+
56234+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
56235+ old_path.dentry->d_inode,
56236+ old_path.dentry->d_inode->i_mode, to)) {
56237+ error = -EACCES;
56238+ goto out_dput;
56239+ }
56240+
56241+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
56242+ old_path.dentry, old_path.mnt, to)) {
56243+ error = -EACCES;
56244+ goto out_dput;
56245+ }
56246+
56247 error = security_path_link(old_path.dentry, &new_path, new_dentry);
56248 if (error)
56249 goto out_dput;
56250 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
56251+ if (!error)
56252+ gr_handle_create(new_dentry, new_path.mnt);
56253 out_dput:
56254+ putname(to);
56255 done_path_create(&new_path, new_dentry);
56256 if (retry_estale(error, how)) {
56257 how |= LOOKUP_REVAL;
56258@@ -3897,12 +4087,21 @@ retry:
56259 if (new_dentry == trap)
56260 goto exit5;
56261
56262+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
56263+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
56264+ to);
56265+ if (error)
56266+ goto exit5;
56267+
56268 error = security_path_rename(&oldnd.path, old_dentry,
56269 &newnd.path, new_dentry);
56270 if (error)
56271 goto exit5;
56272 error = vfs_rename(old_dir->d_inode, old_dentry,
56273 new_dir->d_inode, new_dentry);
56274+ if (!error)
56275+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
56276+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
56277 exit5:
56278 dput(new_dentry);
56279 exit4:
56280@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
56281
56282 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
56283 {
56284+ char tmpbuf[64];
56285+ const char *newlink;
56286 int len;
56287
56288 len = PTR_ERR(link);
56289@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
56290 len = strlen(link);
56291 if (len > (unsigned) buflen)
56292 len = buflen;
56293- if (copy_to_user(buffer, link, len))
56294+
56295+ if (len < sizeof(tmpbuf)) {
56296+ memcpy(tmpbuf, link, len);
56297+ newlink = tmpbuf;
56298+ } else
56299+ newlink = link;
56300+
56301+ if (copy_to_user(buffer, newlink, len))
56302 len = -EFAULT;
56303 out:
56304 return len;
56305diff --git a/fs/namespace.c b/fs/namespace.c
56306index 7b1ca9b..6faeccf 100644
56307--- a/fs/namespace.c
56308+++ b/fs/namespace.c
56309@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
56310 if (!(sb->s_flags & MS_RDONLY))
56311 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
56312 up_write(&sb->s_umount);
56313+
56314+ gr_log_remount(mnt->mnt_devname, retval);
56315+
56316 return retval;
56317 }
56318
56319@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
56320 }
56321 br_write_unlock(&vfsmount_lock);
56322 namespace_unlock();
56323+
56324+ gr_log_unmount(mnt->mnt_devname, retval);
56325+
56326 return retval;
56327 }
56328
56329@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
56330 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
56331 */
56332
56333-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
56334+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
56335 {
56336 struct path path;
56337 struct mount *mnt;
56338@@ -1342,7 +1348,7 @@ out:
56339 /*
56340 * The 2.0 compatible umount. No flags.
56341 */
56342-SYSCALL_DEFINE1(oldumount, char __user *, name)
56343+SYSCALL_DEFINE1(oldumount, const char __user *, name)
56344 {
56345 return sys_umount(name, 0);
56346 }
56347@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
56348 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
56349 MS_STRICTATIME);
56350
56351+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
56352+ retval = -EPERM;
56353+ goto dput_out;
56354+ }
56355+
56356+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
56357+ retval = -EPERM;
56358+ goto dput_out;
56359+ }
56360+
56361 if (flags & MS_REMOUNT)
56362 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
56363 data_page);
56364@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
56365 dev_name, data_page);
56366 dput_out:
56367 path_put(&path);
56368+
56369+ gr_log_mount(dev_name, dir_name, retval);
56370+
56371 return retval;
56372 }
56373
56374@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
56375 }
56376 EXPORT_SYMBOL(mount_subtree);
56377
56378-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
56379- char __user *, type, unsigned long, flags, void __user *, data)
56380+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
56381+ const char __user *, type, unsigned long, flags, void __user *, data)
56382 {
56383 int ret;
56384 char *kernel_type;
56385@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
56386 if (error)
56387 goto out2;
56388
56389+ if (gr_handle_chroot_pivot()) {
56390+ error = -EPERM;
56391+ goto out2;
56392+ }
56393+
56394 get_fs_root(current->fs, &root);
56395 old_mp = lock_mount(&old);
56396 error = PTR_ERR(old_mp);
56397@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
56398 !nsown_capable(CAP_SYS_ADMIN))
56399 return -EPERM;
56400
56401- if (fs->users != 1)
56402+ if (atomic_read(&fs->users) != 1)
56403 return -EINVAL;
56404
56405 get_mnt_ns(mnt_ns);
56406diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
56407index cff089a..4c3d57a 100644
56408--- a/fs/nfs/callback.c
56409+++ b/fs/nfs/callback.c
56410@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
56411 struct svc_rqst *rqstp;
56412 int (*callback_svc)(void *vrqstp);
56413 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
56414- char svc_name[12];
56415 int ret;
56416
56417 nfs_callback_bc_serv(minorversion, xprt, serv);
56418@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
56419
56420 svc_sock_update_bufs(serv);
56421
56422- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
56423 cb_info->serv = serv;
56424 cb_info->rqst = rqstp;
56425- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
56426+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
56427 if (IS_ERR(cb_info->task)) {
56428 ret = PTR_ERR(cb_info->task);
56429 svc_exit_thread(cb_info->rqst);
56430diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
56431index a35582c..ebbdcd5 100644
56432--- a/fs/nfs/callback_xdr.c
56433+++ b/fs/nfs/callback_xdr.c
56434@@ -51,7 +51,7 @@ struct callback_op {
56435 callback_decode_arg_t decode_args;
56436 callback_encode_res_t encode_res;
56437 long res_maxsize;
56438-};
56439+} __do_const;
56440
56441 static struct callback_op callback_ops[];
56442
56443diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
56444index c1c7a9d..7afa0b8 100644
56445--- a/fs/nfs/inode.c
56446+++ b/fs/nfs/inode.c
56447@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
56448 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
56449 }
56450
56451-static atomic_long_t nfs_attr_generation_counter;
56452+static atomic_long_unchecked_t nfs_attr_generation_counter;
56453
56454 static unsigned long nfs_read_attr_generation_counter(void)
56455 {
56456- return atomic_long_read(&nfs_attr_generation_counter);
56457+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
56458 }
56459
56460 unsigned long nfs_inc_attr_generation_counter(void)
56461 {
56462- return atomic_long_inc_return(&nfs_attr_generation_counter);
56463+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
56464 }
56465
56466 void nfs_fattr_init(struct nfs_fattr *fattr)
56467diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
56468index 2c37442..9b9538b 100644
56469--- a/fs/nfs/nfs4state.c
56470+++ b/fs/nfs/nfs4state.c
56471@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
56472 snprintf(buf, sizeof(buf), "%s-manager",
56473 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
56474 rcu_read_unlock();
56475- task = kthread_run(nfs4_run_state_manager, clp, buf);
56476+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
56477 if (IS_ERR(task)) {
56478 printk(KERN_ERR "%s: kthread_run: %ld\n",
56479 __func__, PTR_ERR(task));
56480diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
56481index 27d74a2..c4c2a73 100644
56482--- a/fs/nfsd/nfs4proc.c
56483+++ b/fs/nfsd/nfs4proc.c
56484@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
56485 nfsd4op_rsize op_rsize_bop;
56486 stateid_getter op_get_currentstateid;
56487 stateid_setter op_set_currentstateid;
56488-};
56489+} __do_const;
56490
56491 static struct nfsd4_operation nfsd4_ops[];
56492
56493diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
56494index 582321a..0224663 100644
56495--- a/fs/nfsd/nfs4xdr.c
56496+++ b/fs/nfsd/nfs4xdr.c
56497@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
56498
56499 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
56500
56501-static nfsd4_dec nfsd4_dec_ops[] = {
56502+static const nfsd4_dec nfsd4_dec_ops[] = {
56503 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
56504 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
56505 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
56506@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
56507 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
56508 };
56509
56510-static nfsd4_dec nfsd41_dec_ops[] = {
56511+static const nfsd4_dec nfsd41_dec_ops[] = {
56512 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
56513 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
56514 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
56515@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
56516 };
56517
56518 struct nfsd4_minorversion_ops {
56519- nfsd4_dec *decoders;
56520+ const nfsd4_dec *decoders;
56521 int nops;
56522 };
56523
56524diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
56525index e76244e..9fe8f2f1 100644
56526--- a/fs/nfsd/nfscache.c
56527+++ b/fs/nfsd/nfscache.c
56528@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
56529 {
56530 struct svc_cacherep *rp = rqstp->rq_cacherep;
56531 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
56532- int len;
56533+ long len;
56534 size_t bufsize = 0;
56535
56536 if (!rp)
56537 return;
56538
56539- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
56540- len >>= 2;
56541+ if (statp) {
56542+ len = (char*)statp - (char*)resv->iov_base;
56543+ len = resv->iov_len - len;
56544+ len >>= 2;
56545+ }
56546
56547 /* Don't cache excessive amounts of data and XDR failures */
56548 if (!statp || len > (256 >> 2)) {
56549diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
56550index baf149a..76b86ad 100644
56551--- a/fs/nfsd/vfs.c
56552+++ b/fs/nfsd/vfs.c
56553@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
56554 } else {
56555 oldfs = get_fs();
56556 set_fs(KERNEL_DS);
56557- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
56558+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
56559 set_fs(oldfs);
56560 }
56561
56562@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
56563
56564 /* Write the data. */
56565 oldfs = get_fs(); set_fs(KERNEL_DS);
56566- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
56567+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
56568 set_fs(oldfs);
56569 if (host_err < 0)
56570 goto out_nfserr;
56571@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
56572 */
56573
56574 oldfs = get_fs(); set_fs(KERNEL_DS);
56575- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
56576+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
56577 set_fs(oldfs);
56578
56579 if (host_err < 0)
56580diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
56581index fea6bd5..8ee9d81 100644
56582--- a/fs/nls/nls_base.c
56583+++ b/fs/nls/nls_base.c
56584@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
56585
56586 int register_nls(struct nls_table * nls)
56587 {
56588- struct nls_table ** tmp = &tables;
56589+ struct nls_table *tmp = tables;
56590
56591 if (nls->next)
56592 return -EBUSY;
56593
56594 spin_lock(&nls_lock);
56595- while (*tmp) {
56596- if (nls == *tmp) {
56597+ while (tmp) {
56598+ if (nls == tmp) {
56599 spin_unlock(&nls_lock);
56600 return -EBUSY;
56601 }
56602- tmp = &(*tmp)->next;
56603+ tmp = tmp->next;
56604 }
56605- nls->next = tables;
56606+ pax_open_kernel();
56607+ *(struct nls_table **)&nls->next = tables;
56608+ pax_close_kernel();
56609 tables = nls;
56610 spin_unlock(&nls_lock);
56611 return 0;
56612@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
56613
56614 int unregister_nls(struct nls_table * nls)
56615 {
56616- struct nls_table ** tmp = &tables;
56617+ struct nls_table * const * tmp = &tables;
56618
56619 spin_lock(&nls_lock);
56620 while (*tmp) {
56621 if (nls == *tmp) {
56622- *tmp = nls->next;
56623+ pax_open_kernel();
56624+ *(struct nls_table **)tmp = nls->next;
56625+ pax_close_kernel();
56626 spin_unlock(&nls_lock);
56627 return 0;
56628 }
56629diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
56630index 7424929..35f6be5 100644
56631--- a/fs/nls/nls_euc-jp.c
56632+++ b/fs/nls/nls_euc-jp.c
56633@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
56634 p_nls = load_nls("cp932");
56635
56636 if (p_nls) {
56637- table.charset2upper = p_nls->charset2upper;
56638- table.charset2lower = p_nls->charset2lower;
56639+ pax_open_kernel();
56640+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
56641+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
56642+ pax_close_kernel();
56643 return register_nls(&table);
56644 }
56645
56646diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
56647index e7bc1d7..06bd4bb 100644
56648--- a/fs/nls/nls_koi8-ru.c
56649+++ b/fs/nls/nls_koi8-ru.c
56650@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
56651 p_nls = load_nls("koi8-u");
56652
56653 if (p_nls) {
56654- table.charset2upper = p_nls->charset2upper;
56655- table.charset2lower = p_nls->charset2lower;
56656+ pax_open_kernel();
56657+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
56658+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
56659+ pax_close_kernel();
56660 return register_nls(&table);
56661 }
56662
56663diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
56664index 77cc85d..a1e6299 100644
56665--- a/fs/notify/fanotify/fanotify_user.c
56666+++ b/fs/notify/fanotify/fanotify_user.c
56667@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
56668
56669 fd = fanotify_event_metadata.fd;
56670 ret = -EFAULT;
56671- if (copy_to_user(buf, &fanotify_event_metadata,
56672- fanotify_event_metadata.event_len))
56673+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
56674+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
56675 goto out_close_fd;
56676
56677 ret = prepare_for_access_response(group, event, fd);
56678diff --git a/fs/notify/notification.c b/fs/notify/notification.c
56679index 7b51b05..5ea5ef6 100644
56680--- a/fs/notify/notification.c
56681+++ b/fs/notify/notification.c
56682@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
56683 * get set to 0 so it will never get 'freed'
56684 */
56685 static struct fsnotify_event *q_overflow_event;
56686-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
56687+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
56688
56689 /**
56690 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
56691@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
56692 */
56693 u32 fsnotify_get_cookie(void)
56694 {
56695- return atomic_inc_return(&fsnotify_sync_cookie);
56696+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
56697 }
56698 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
56699
56700diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
56701index aa411c3..c260a84 100644
56702--- a/fs/ntfs/dir.c
56703+++ b/fs/ntfs/dir.c
56704@@ -1329,7 +1329,7 @@ find_next_index_buffer:
56705 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
56706 ~(s64)(ndir->itype.index.block_size - 1)));
56707 /* Bounds checks. */
56708- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
56709+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
56710 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
56711 "inode 0x%lx or driver bug.", vdir->i_ino);
56712 goto err_out;
56713diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
56714index c5670b8..01a3656 100644
56715--- a/fs/ntfs/file.c
56716+++ b/fs/ntfs/file.c
56717@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
56718 #endif /* NTFS_RW */
56719 };
56720
56721-const struct file_operations ntfs_empty_file_ops = {};
56722+const struct file_operations ntfs_empty_file_ops __read_only;
56723
56724-const struct inode_operations ntfs_empty_inode_ops = {};
56725+const struct inode_operations ntfs_empty_inode_ops __read_only;
56726diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
56727index 20dfec7..e238cb7 100644
56728--- a/fs/ocfs2/aops.c
56729+++ b/fs/ocfs2/aops.c
56730@@ -1756,7 +1756,7 @@ try_again:
56731 goto out;
56732 } else if (ret == 1) {
56733 clusters_need = wc->w_clen;
56734- ret = ocfs2_refcount_cow(inode, filp, di_bh,
56735+ ret = ocfs2_refcount_cow(inode, di_bh,
56736 wc->w_cpos, wc->w_clen, UINT_MAX);
56737 if (ret) {
56738 mlog_errno(ret);
56739diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
56740index ff54014..ff125fd 100644
56741--- a/fs/ocfs2/file.c
56742+++ b/fs/ocfs2/file.c
56743@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
56744 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
56745 goto out;
56746
56747- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
56748+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
56749
56750 out:
56751 return status;
56752@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
56753 zero_clusters = last_cpos - zero_cpos;
56754
56755 if (needs_cow) {
56756- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
56757+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
56758 zero_clusters, UINT_MAX);
56759 if (rc) {
56760 mlog_errno(rc);
56761@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
56762
56763 *meta_level = 1;
56764
56765- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
56766+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
56767 if (ret)
56768 mlog_errno(ret);
56769 out:
56770diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
56771index aebeacd..0dcdd26 100644
56772--- a/fs/ocfs2/localalloc.c
56773+++ b/fs/ocfs2/localalloc.c
56774@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
56775 goto bail;
56776 }
56777
56778- atomic_inc(&osb->alloc_stats.moves);
56779+ atomic_inc_unchecked(&osb->alloc_stats.moves);
56780
56781 bail:
56782 if (handle)
56783diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
56784index f1fc172..452068b 100644
56785--- a/fs/ocfs2/move_extents.c
56786+++ b/fs/ocfs2/move_extents.c
56787@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
56788 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
56789 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
56790
56791- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
56792+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
56793 p_cpos, new_p_cpos, len);
56794 if (ret) {
56795 mlog_errno(ret);
56796diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
56797index d355e6e..578d905 100644
56798--- a/fs/ocfs2/ocfs2.h
56799+++ b/fs/ocfs2/ocfs2.h
56800@@ -235,11 +235,11 @@ enum ocfs2_vol_state
56801
56802 struct ocfs2_alloc_stats
56803 {
56804- atomic_t moves;
56805- atomic_t local_data;
56806- atomic_t bitmap_data;
56807- atomic_t bg_allocs;
56808- atomic_t bg_extends;
56809+ atomic_unchecked_t moves;
56810+ atomic_unchecked_t local_data;
56811+ atomic_unchecked_t bitmap_data;
56812+ atomic_unchecked_t bg_allocs;
56813+ atomic_unchecked_t bg_extends;
56814 };
56815
56816 enum ocfs2_local_alloc_state
56817diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
56818index 998b17e..aefe414 100644
56819--- a/fs/ocfs2/refcounttree.c
56820+++ b/fs/ocfs2/refcounttree.c
56821@@ -49,7 +49,6 @@
56822
56823 struct ocfs2_cow_context {
56824 struct inode *inode;
56825- struct file *file;
56826 u32 cow_start;
56827 u32 cow_len;
56828 struct ocfs2_extent_tree data_et;
56829@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
56830 u32 *num_clusters,
56831 unsigned int *extent_flags);
56832 int (*cow_duplicate_clusters)(handle_t *handle,
56833- struct file *file,
56834+ struct inode *inode,
56835 u32 cpos, u32 old_cluster,
56836 u32 new_cluster, u32 new_len);
56837 };
56838@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
56839 }
56840
56841 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56842- struct file *file,
56843+ struct inode *inode,
56844 u32 cpos, u32 old_cluster,
56845 u32 new_cluster, u32 new_len)
56846 {
56847 int ret = 0, partial;
56848- struct inode *inode = file_inode(file);
56849- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
56850- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
56851+ struct super_block *sb = inode->i_sb;
56852 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
56853 struct page *page;
56854 pgoff_t page_index;
56855@@ -2973,13 +2970,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56856 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
56857 BUG_ON(PageDirty(page));
56858
56859- if (PageReadahead(page)) {
56860- page_cache_async_readahead(mapping,
56861- &file->f_ra, file,
56862- page, page_index,
56863- readahead_pages);
56864- }
56865-
56866 if (!PageUptodate(page)) {
56867 ret = block_read_full_page(page, ocfs2_get_block);
56868 if (ret) {
56869@@ -2999,7 +2989,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56870 }
56871 }
56872
56873- ocfs2_map_and_dirty_page(inode, handle, from, to,
56874+ ocfs2_map_and_dirty_page(inode,
56875+ handle, from, to,
56876 page, 0, &new_block);
56877 mark_page_accessed(page);
56878 unlock:
56879@@ -3015,12 +3006,11 @@ unlock:
56880 }
56881
56882 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
56883- struct file *file,
56884+ struct inode *inode,
56885 u32 cpos, u32 old_cluster,
56886 u32 new_cluster, u32 new_len)
56887 {
56888 int ret = 0;
56889- struct inode *inode = file_inode(file);
56890 struct super_block *sb = inode->i_sb;
56891 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
56892 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
56893@@ -3145,7 +3135,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
56894
56895 /*If the old clusters is unwritten, no need to duplicate. */
56896 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
56897- ret = context->cow_duplicate_clusters(handle, context->file,
56898+ ret = context->cow_duplicate_clusters(handle, context->inode,
56899 cpos, old, new, len);
56900 if (ret) {
56901 mlog_errno(ret);
56902@@ -3423,35 +3413,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
56903 return ret;
56904 }
56905
56906-static void ocfs2_readahead_for_cow(struct inode *inode,
56907- struct file *file,
56908- u32 start, u32 len)
56909-{
56910- struct address_space *mapping;
56911- pgoff_t index;
56912- unsigned long num_pages;
56913- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
56914-
56915- if (!file)
56916- return;
56917-
56918- mapping = file->f_mapping;
56919- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
56920- if (!num_pages)
56921- num_pages = 1;
56922-
56923- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
56924- page_cache_sync_readahead(mapping, &file->f_ra, file,
56925- index, num_pages);
56926-}
56927-
56928 /*
56929 * Starting at cpos, try to CoW write_len clusters. Don't CoW
56930 * past max_cpos. This will stop when it runs into a hole or an
56931 * unrefcounted extent.
56932 */
56933 static int ocfs2_refcount_cow_hunk(struct inode *inode,
56934- struct file *file,
56935 struct buffer_head *di_bh,
56936 u32 cpos, u32 write_len, u32 max_cpos)
56937 {
56938@@ -3480,8 +3447,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
56939
56940 BUG_ON(cow_len == 0);
56941
56942- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
56943-
56944 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
56945 if (!context) {
56946 ret = -ENOMEM;
56947@@ -3503,7 +3468,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
56948 context->ref_root_bh = ref_root_bh;
56949 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
56950 context->get_clusters = ocfs2_di_get_clusters;
56951- context->file = file;
56952
56953 ocfs2_init_dinode_extent_tree(&context->data_et,
56954 INODE_CACHE(inode), di_bh);
56955@@ -3532,7 +3496,6 @@ out:
56956 * clusters between cpos and cpos+write_len are safe to modify.
56957 */
56958 int ocfs2_refcount_cow(struct inode *inode,
56959- struct file *file,
56960 struct buffer_head *di_bh,
56961 u32 cpos, u32 write_len, u32 max_cpos)
56962 {
56963@@ -3552,7 +3515,7 @@ int ocfs2_refcount_cow(struct inode *inode,
56964 num_clusters = write_len;
56965
56966 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
56967- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
56968+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
56969 num_clusters, max_cpos);
56970 if (ret) {
56971 mlog_errno(ret);
56972diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
56973index 7754608..6422bbcdb 100644
56974--- a/fs/ocfs2/refcounttree.h
56975+++ b/fs/ocfs2/refcounttree.h
56976@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
56977 int *credits,
56978 int *ref_blocks);
56979 int ocfs2_refcount_cow(struct inode *inode,
56980- struct file *filep, struct buffer_head *di_bh,
56981+ struct buffer_head *di_bh,
56982 u32 cpos, u32 write_len, u32 max_cpos);
56983
56984 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
56985@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
56986 u32 cpos, u32 write_len,
56987 struct ocfs2_post_refcount *post);
56988 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56989- struct file *file,
56990+ struct inode *inode,
56991 u32 cpos, u32 old_cluster,
56992 u32 new_cluster, u32 new_len);
56993 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
56994- struct file *file,
56995+ struct inode *inode,
56996 u32 cpos, u32 old_cluster,
56997 u32 new_cluster, u32 new_len);
56998 int ocfs2_cow_sync_writeback(struct super_block *sb,
56999diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
57000index b7e74b5..19c6536 100644
57001--- a/fs/ocfs2/suballoc.c
57002+++ b/fs/ocfs2/suballoc.c
57003@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
57004 mlog_errno(status);
57005 goto bail;
57006 }
57007- atomic_inc(&osb->alloc_stats.bg_extends);
57008+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
57009
57010 /* You should never ask for this much metadata */
57011 BUG_ON(bits_wanted >
57012@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
57013 mlog_errno(status);
57014 goto bail;
57015 }
57016- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
57017+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
57018
57019 *suballoc_loc = res.sr_bg_blkno;
57020 *suballoc_bit_start = res.sr_bit_offset;
57021@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
57022 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
57023 res->sr_bits);
57024
57025- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
57026+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
57027
57028 BUG_ON(res->sr_bits != 1);
57029
57030@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
57031 mlog_errno(status);
57032 goto bail;
57033 }
57034- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
57035+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
57036
57037 BUG_ON(res.sr_bits != 1);
57038
57039@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
57040 cluster_start,
57041 num_clusters);
57042 if (!status)
57043- atomic_inc(&osb->alloc_stats.local_data);
57044+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
57045 } else {
57046 if (min_clusters > (osb->bitmap_cpg - 1)) {
57047 /* The only paths asking for contiguousness
57048@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
57049 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
57050 res.sr_bg_blkno,
57051 res.sr_bit_offset);
57052- atomic_inc(&osb->alloc_stats.bitmap_data);
57053+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
57054 *num_clusters = res.sr_bits;
57055 }
57056 }
57057diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
57058index 01b8516..579c4df 100644
57059--- a/fs/ocfs2/super.c
57060+++ b/fs/ocfs2/super.c
57061@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
57062 "%10s => GlobalAllocs: %d LocalAllocs: %d "
57063 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
57064 "Stats",
57065- atomic_read(&osb->alloc_stats.bitmap_data),
57066- atomic_read(&osb->alloc_stats.local_data),
57067- atomic_read(&osb->alloc_stats.bg_allocs),
57068- atomic_read(&osb->alloc_stats.moves),
57069- atomic_read(&osb->alloc_stats.bg_extends));
57070+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
57071+ atomic_read_unchecked(&osb->alloc_stats.local_data),
57072+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
57073+ atomic_read_unchecked(&osb->alloc_stats.moves),
57074+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
57075
57076 out += snprintf(buf + out, len - out,
57077 "%10s => State: %u Descriptor: %llu Size: %u bits "
57078@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
57079 spin_lock_init(&osb->osb_xattr_lock);
57080 ocfs2_init_steal_slots(osb);
57081
57082- atomic_set(&osb->alloc_stats.moves, 0);
57083- atomic_set(&osb->alloc_stats.local_data, 0);
57084- atomic_set(&osb->alloc_stats.bitmap_data, 0);
57085- atomic_set(&osb->alloc_stats.bg_allocs, 0);
57086- atomic_set(&osb->alloc_stats.bg_extends, 0);
57087+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
57088+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
57089+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
57090+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
57091+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
57092
57093 /* Copy the blockcheck stats from the superblock probe */
57094 osb->osb_ecc_stats = *stats;
57095diff --git a/fs/open.c b/fs/open.c
57096index 8c74100..4239c48 100644
57097--- a/fs/open.c
57098+++ b/fs/open.c
57099@@ -32,6 +32,8 @@
57100 #include <linux/dnotify.h>
57101 #include <linux/compat.h>
57102
57103+#define CREATE_TRACE_POINTS
57104+#include <trace/events/fs.h>
57105 #include "internal.h"
57106
57107 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
57108@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
57109 error = locks_verify_truncate(inode, NULL, length);
57110 if (!error)
57111 error = security_path_truncate(path);
57112+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
57113+ error = -EACCES;
57114 if (!error)
57115 error = do_truncate(path->dentry, length, 0, NULL);
57116
57117@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
57118 error = locks_verify_truncate(inode, f.file, length);
57119 if (!error)
57120 error = security_path_truncate(&f.file->f_path);
57121+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
57122+ error = -EACCES;
57123 if (!error)
57124 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
57125 sb_end_write(inode->i_sb);
57126@@ -360,6 +366,9 @@ retry:
57127 if (__mnt_is_readonly(path.mnt))
57128 res = -EROFS;
57129
57130+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
57131+ res = -EACCES;
57132+
57133 out_path_release:
57134 path_put(&path);
57135 if (retry_estale(res, lookup_flags)) {
57136@@ -391,6 +400,8 @@ retry:
57137 if (error)
57138 goto dput_and_out;
57139
57140+ gr_log_chdir(path.dentry, path.mnt);
57141+
57142 set_fs_pwd(current->fs, &path);
57143
57144 dput_and_out:
57145@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
57146 goto out_putf;
57147
57148 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
57149+
57150+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
57151+ error = -EPERM;
57152+
57153+ if (!error)
57154+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
57155+
57156 if (!error)
57157 set_fs_pwd(current->fs, &f.file->f_path);
57158 out_putf:
57159@@ -449,7 +467,13 @@ retry:
57160 if (error)
57161 goto dput_and_out;
57162
57163+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
57164+ goto dput_and_out;
57165+
57166 set_fs_root(current->fs, &path);
57167+
57168+ gr_handle_chroot_chdir(&path);
57169+
57170 error = 0;
57171 dput_and_out:
57172 path_put(&path);
57173@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
57174 if (error)
57175 return error;
57176 mutex_lock(&inode->i_mutex);
57177+
57178+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
57179+ error = -EACCES;
57180+ goto out_unlock;
57181+ }
57182+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
57183+ error = -EACCES;
57184+ goto out_unlock;
57185+ }
57186+
57187 error = security_path_chmod(path, mode);
57188 if (error)
57189 goto out_unlock;
57190@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
57191 uid = make_kuid(current_user_ns(), user);
57192 gid = make_kgid(current_user_ns(), group);
57193
57194+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
57195+ return -EACCES;
57196+
57197 newattrs.ia_valid = ATTR_CTIME;
57198 if (user != (uid_t) -1) {
57199 if (!uid_valid(uid))
57200@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
57201 } else {
57202 fsnotify_open(f);
57203 fd_install(fd, f);
57204+ trace_do_sys_open(tmp->name, flags, mode);
57205 }
57206 }
57207 putname(tmp);
57208diff --git a/fs/pipe.c b/fs/pipe.c
57209index d2c45e1..009fe1c 100644
57210--- a/fs/pipe.c
57211+++ b/fs/pipe.c
57212@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
57213
57214 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
57215 {
57216- if (pipe->files)
57217+ if (atomic_read(&pipe->files))
57218 mutex_lock_nested(&pipe->mutex, subclass);
57219 }
57220
57221@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
57222
57223 void pipe_unlock(struct pipe_inode_info *pipe)
57224 {
57225- if (pipe->files)
57226+ if (atomic_read(&pipe->files))
57227 mutex_unlock(&pipe->mutex);
57228 }
57229 EXPORT_SYMBOL(pipe_unlock);
57230@@ -449,9 +449,9 @@ redo:
57231 }
57232 if (bufs) /* More to do? */
57233 continue;
57234- if (!pipe->writers)
57235+ if (!atomic_read(&pipe->writers))
57236 break;
57237- if (!pipe->waiting_writers) {
57238+ if (!atomic_read(&pipe->waiting_writers)) {
57239 /* syscall merging: Usually we must not sleep
57240 * if O_NONBLOCK is set, or if we got some data.
57241 * But if a writer sleeps in kernel space, then
57242@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
57243 ret = 0;
57244 __pipe_lock(pipe);
57245
57246- if (!pipe->readers) {
57247+ if (!atomic_read(&pipe->readers)) {
57248 send_sig(SIGPIPE, current, 0);
57249 ret = -EPIPE;
57250 goto out;
57251@@ -562,7 +562,7 @@ redo1:
57252 for (;;) {
57253 int bufs;
57254
57255- if (!pipe->readers) {
57256+ if (!atomic_read(&pipe->readers)) {
57257 send_sig(SIGPIPE, current, 0);
57258 if (!ret)
57259 ret = -EPIPE;
57260@@ -653,9 +653,9 @@ redo2:
57261 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57262 do_wakeup = 0;
57263 }
57264- pipe->waiting_writers++;
57265+ atomic_inc(&pipe->waiting_writers);
57266 pipe_wait(pipe);
57267- pipe->waiting_writers--;
57268+ atomic_dec(&pipe->waiting_writers);
57269 }
57270 out:
57271 __pipe_unlock(pipe);
57272@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
57273 mask = 0;
57274 if (filp->f_mode & FMODE_READ) {
57275 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
57276- if (!pipe->writers && filp->f_version != pipe->w_counter)
57277+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
57278 mask |= POLLHUP;
57279 }
57280
57281@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
57282 * Most Unices do not set POLLERR for FIFOs but on Linux they
57283 * behave exactly like pipes for poll().
57284 */
57285- if (!pipe->readers)
57286+ if (!atomic_read(&pipe->readers))
57287 mask |= POLLERR;
57288 }
57289
57290@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
57291
57292 __pipe_lock(pipe);
57293 if (file->f_mode & FMODE_READ)
57294- pipe->readers--;
57295+ atomic_dec(&pipe->readers);
57296 if (file->f_mode & FMODE_WRITE)
57297- pipe->writers--;
57298+ atomic_dec(&pipe->writers);
57299
57300- if (pipe->readers || pipe->writers) {
57301+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
57302 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
57303 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57304 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
57305 }
57306 spin_lock(&inode->i_lock);
57307- if (!--pipe->files) {
57308+ if (atomic_dec_and_test(&pipe->files)) {
57309 inode->i_pipe = NULL;
57310 kill = 1;
57311 }
57312@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
57313 kfree(pipe);
57314 }
57315
57316-static struct vfsmount *pipe_mnt __read_mostly;
57317+struct vfsmount *pipe_mnt __read_mostly;
57318
57319 /*
57320 * pipefs_dname() is called from d_path().
57321@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
57322 goto fail_iput;
57323
57324 inode->i_pipe = pipe;
57325- pipe->files = 2;
57326- pipe->readers = pipe->writers = 1;
57327+ atomic_set(&pipe->files, 2);
57328+ atomic_set(&pipe->readers, 1);
57329+ atomic_set(&pipe->writers, 1);
57330 inode->i_fop = &pipefifo_fops;
57331
57332 /*
57333@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
57334 spin_lock(&inode->i_lock);
57335 if (inode->i_pipe) {
57336 pipe = inode->i_pipe;
57337- pipe->files++;
57338+ atomic_inc(&pipe->files);
57339 spin_unlock(&inode->i_lock);
57340 } else {
57341 spin_unlock(&inode->i_lock);
57342 pipe = alloc_pipe_info();
57343 if (!pipe)
57344 return -ENOMEM;
57345- pipe->files = 1;
57346+ atomic_set(&pipe->files, 1);
57347 spin_lock(&inode->i_lock);
57348 if (unlikely(inode->i_pipe)) {
57349- inode->i_pipe->files++;
57350+ atomic_inc(&inode->i_pipe->files);
57351 spin_unlock(&inode->i_lock);
57352 free_pipe_info(pipe);
57353 pipe = inode->i_pipe;
57354@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
57355 * opened, even when there is no process writing the FIFO.
57356 */
57357 pipe->r_counter++;
57358- if (pipe->readers++ == 0)
57359+ if (atomic_inc_return(&pipe->readers) == 1)
57360 wake_up_partner(pipe);
57361
57362- if (!is_pipe && !pipe->writers) {
57363+ if (!is_pipe && !atomic_read(&pipe->writers)) {
57364 if ((filp->f_flags & O_NONBLOCK)) {
57365 /* suppress POLLHUP until we have
57366 * seen a writer */
57367@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
57368 * errno=ENXIO when there is no process reading the FIFO.
57369 */
57370 ret = -ENXIO;
57371- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
57372+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
57373 goto err;
57374
57375 pipe->w_counter++;
57376- if (!pipe->writers++)
57377+ if (atomic_inc_return(&pipe->writers) == 1)
57378 wake_up_partner(pipe);
57379
57380- if (!is_pipe && !pipe->readers) {
57381+ if (!is_pipe && !atomic_read(&pipe->readers)) {
57382 if (wait_for_partner(pipe, &pipe->r_counter))
57383 goto err_wr;
57384 }
57385@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
57386 * the process can at least talk to itself.
57387 */
57388
57389- pipe->readers++;
57390- pipe->writers++;
57391+ atomic_inc(&pipe->readers);
57392+ atomic_inc(&pipe->writers);
57393 pipe->r_counter++;
57394 pipe->w_counter++;
57395- if (pipe->readers == 1 || pipe->writers == 1)
57396+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
57397 wake_up_partner(pipe);
57398 break;
57399
57400@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
57401 return 0;
57402
57403 err_rd:
57404- if (!--pipe->readers)
57405+ if (atomic_dec_and_test(&pipe->readers))
57406 wake_up_interruptible(&pipe->wait);
57407 ret = -ERESTARTSYS;
57408 goto err;
57409
57410 err_wr:
57411- if (!--pipe->writers)
57412+ if (atomic_dec_and_test(&pipe->writers))
57413 wake_up_interruptible(&pipe->wait);
57414 ret = -ERESTARTSYS;
57415 goto err;
57416
57417 err:
57418 spin_lock(&inode->i_lock);
57419- if (!--pipe->files) {
57420+ if (atomic_dec_and_test(&pipe->files)) {
57421 inode->i_pipe = NULL;
57422 kill = 1;
57423 }
57424diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
57425index 15af622..0e9f4467 100644
57426--- a/fs/proc/Kconfig
57427+++ b/fs/proc/Kconfig
57428@@ -30,12 +30,12 @@ config PROC_FS
57429
57430 config PROC_KCORE
57431 bool "/proc/kcore support" if !ARM
57432- depends on PROC_FS && MMU
57433+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
57434
57435 config PROC_VMCORE
57436 bool "/proc/vmcore support"
57437- depends on PROC_FS && CRASH_DUMP
57438- default y
57439+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
57440+ default n
57441 help
57442 Exports the dump image of crashed kernel in ELF format.
57443
57444@@ -59,8 +59,8 @@ config PROC_SYSCTL
57445 limited in memory.
57446
57447 config PROC_PAGE_MONITOR
57448- default y
57449- depends on PROC_FS && MMU
57450+ default n
57451+ depends on PROC_FS && MMU && !GRKERNSEC
57452 bool "Enable /proc page monitoring" if EXPERT
57453 help
57454 Various /proc files exist to monitor process memory utilization:
57455diff --git a/fs/proc/array.c b/fs/proc/array.c
57456index cbd0f1b..adec3f0 100644
57457--- a/fs/proc/array.c
57458+++ b/fs/proc/array.c
57459@@ -60,6 +60,7 @@
57460 #include <linux/tty.h>
57461 #include <linux/string.h>
57462 #include <linux/mman.h>
57463+#include <linux/grsecurity.h>
57464 #include <linux/proc_fs.h>
57465 #include <linux/ioport.h>
57466 #include <linux/uaccess.h>
57467@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
57468 seq_putc(m, '\n');
57469 }
57470
57471+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57472+static inline void task_pax(struct seq_file *m, struct task_struct *p)
57473+{
57474+ if (p->mm)
57475+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
57476+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
57477+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
57478+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
57479+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
57480+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
57481+ else
57482+ seq_printf(m, "PaX:\t-----\n");
57483+}
57484+#endif
57485+
57486 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57487 struct pid *pid, struct task_struct *task)
57488 {
57489@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57490 task_cpus_allowed(m, task);
57491 cpuset_task_status_allowed(m, task);
57492 task_context_switch_counts(m, task);
57493+
57494+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57495+ task_pax(m, task);
57496+#endif
57497+
57498+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
57499+ task_grsec_rbac(m, task);
57500+#endif
57501+
57502 return 0;
57503 }
57504
57505+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57506+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
57507+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
57508+ _mm->pax_flags & MF_PAX_SEGMEXEC))
57509+#endif
57510+
57511 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57512 struct pid *pid, struct task_struct *task, int whole)
57513 {
57514@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57515 char tcomm[sizeof(task->comm)];
57516 unsigned long flags;
57517
57518+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57519+ if (current->exec_id != m->exec_id) {
57520+ gr_log_badprocpid("stat");
57521+ return 0;
57522+ }
57523+#endif
57524+
57525 state = *get_task_state(task);
57526 vsize = eip = esp = 0;
57527 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
57528@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57529 gtime = task_gtime(task);
57530 }
57531
57532+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57533+ if (PAX_RAND_FLAGS(mm)) {
57534+ eip = 0;
57535+ esp = 0;
57536+ wchan = 0;
57537+ }
57538+#endif
57539+#ifdef CONFIG_GRKERNSEC_HIDESYM
57540+ wchan = 0;
57541+ eip =0;
57542+ esp =0;
57543+#endif
57544+
57545 /* scale priority and nice values from timeslices to -20..20 */
57546 /* to make it look like a "normal" Unix priority/nice value */
57547 priority = task_prio(task);
57548@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57549 seq_put_decimal_ull(m, ' ', vsize);
57550 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
57551 seq_put_decimal_ull(m, ' ', rsslim);
57552+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57553+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
57554+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
57555+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
57556+#else
57557 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
57558 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
57559 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
57560+#endif
57561 seq_put_decimal_ull(m, ' ', esp);
57562 seq_put_decimal_ull(m, ' ', eip);
57563 /* The signal information here is obsolete.
57564@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57565 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
57566 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
57567
57568- if (mm && permitted) {
57569+ if (mm && permitted
57570+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57571+ && !PAX_RAND_FLAGS(mm)
57572+#endif
57573+ ) {
57574 seq_put_decimal_ull(m, ' ', mm->start_data);
57575 seq_put_decimal_ull(m, ' ', mm->end_data);
57576 seq_put_decimal_ull(m, ' ', mm->start_brk);
57577@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57578 struct pid *pid, struct task_struct *task)
57579 {
57580 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
57581- struct mm_struct *mm = get_task_mm(task);
57582+ struct mm_struct *mm;
57583
57584+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57585+ if (current->exec_id != m->exec_id) {
57586+ gr_log_badprocpid("statm");
57587+ return 0;
57588+ }
57589+#endif
57590+ mm = get_task_mm(task);
57591 if (mm) {
57592 size = task_statm(mm, &shared, &text, &data, &resident);
57593 mmput(mm);
57594@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57595 return 0;
57596 }
57597
57598+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
57599+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
57600+{
57601+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
57602+}
57603+#endif
57604+
57605 #ifdef CONFIG_CHECKPOINT_RESTORE
57606 static struct pid *
57607 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
57608diff --git a/fs/proc/base.c b/fs/proc/base.c
57609index c3834da..b402b2b 100644
57610--- a/fs/proc/base.c
57611+++ b/fs/proc/base.c
57612@@ -113,6 +113,14 @@ struct pid_entry {
57613 union proc_op op;
57614 };
57615
57616+struct getdents_callback {
57617+ struct linux_dirent __user * current_dir;
57618+ struct linux_dirent __user * previous;
57619+ struct file * file;
57620+ int count;
57621+ int error;
57622+};
57623+
57624 #define NOD(NAME, MODE, IOP, FOP, OP) { \
57625 .name = (NAME), \
57626 .len = sizeof(NAME) - 1, \
57627@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
57628 if (!mm->arg_end)
57629 goto out_mm; /* Shh! No looking before we're done */
57630
57631+ if (gr_acl_handle_procpidmem(task))
57632+ goto out_mm;
57633+
57634 len = mm->arg_end - mm->arg_start;
57635
57636 if (len > PAGE_SIZE)
57637@@ -237,12 +248,28 @@ out:
57638 return res;
57639 }
57640
57641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57642+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
57643+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
57644+ _mm->pax_flags & MF_PAX_SEGMEXEC))
57645+#endif
57646+
57647 static int proc_pid_auxv(struct task_struct *task, char *buffer)
57648 {
57649 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
57650 int res = PTR_ERR(mm);
57651 if (mm && !IS_ERR(mm)) {
57652 unsigned int nwords = 0;
57653+
57654+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57655+ /* allow if we're currently ptracing this task */
57656+ if (PAX_RAND_FLAGS(mm) &&
57657+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
57658+ mmput(mm);
57659+ return 0;
57660+ }
57661+#endif
57662+
57663 do {
57664 nwords += 2;
57665 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
57666@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
57667 }
57668
57669
57670-#ifdef CONFIG_KALLSYMS
57671+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57672 /*
57673 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
57674 * Returns the resolved symbol. If that fails, simply return the address.
57675@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
57676 mutex_unlock(&task->signal->cred_guard_mutex);
57677 }
57678
57679-#ifdef CONFIG_STACKTRACE
57680+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57681
57682 #define MAX_STACK_TRACE_DEPTH 64
57683
57684@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
57685 return count;
57686 }
57687
57688-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
57689+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
57690 static int proc_pid_syscall(struct task_struct *task, char *buffer)
57691 {
57692 long nr;
57693@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
57694 /************************************************************************/
57695
57696 /* permission checks */
57697-static int proc_fd_access_allowed(struct inode *inode)
57698+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
57699 {
57700 struct task_struct *task;
57701 int allowed = 0;
57702@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
57703 */
57704 task = get_proc_task(inode);
57705 if (task) {
57706- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
57707+ if (log)
57708+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
57709+ else
57710+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
57711 put_task_struct(task);
57712 }
57713 return allowed;
57714@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
57715 struct task_struct *task,
57716 int hide_pid_min)
57717 {
57718+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57719+ return false;
57720+
57721+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57722+ rcu_read_lock();
57723+ {
57724+ const struct cred *tmpcred = current_cred();
57725+ const struct cred *cred = __task_cred(task);
57726+
57727+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
57728+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57729+ || in_group_p(grsec_proc_gid)
57730+#endif
57731+ ) {
57732+ rcu_read_unlock();
57733+ return true;
57734+ }
57735+ }
57736+ rcu_read_unlock();
57737+
57738+ if (!pid->hide_pid)
57739+ return false;
57740+#endif
57741+
57742 if (pid->hide_pid < hide_pid_min)
57743 return true;
57744 if (in_group_p(pid->pid_gid))
57745 return true;
57746+
57747 return ptrace_may_access(task, PTRACE_MODE_READ);
57748 }
57749
57750@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
57751 put_task_struct(task);
57752
57753 if (!has_perms) {
57754+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57755+ {
57756+#else
57757 if (pid->hide_pid == 2) {
57758+#endif
57759 /*
57760 * Let's make getdents(), stat(), and open()
57761 * consistent with each other. If a process
57762@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
57763 if (!task)
57764 return -ESRCH;
57765
57766+ if (gr_acl_handle_procpidmem(task)) {
57767+ put_task_struct(task);
57768+ return -EPERM;
57769+ }
57770+
57771 mm = mm_access(task, mode);
57772 put_task_struct(task);
57773
57774@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
57775
57776 file->private_data = mm;
57777
57778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57779+ file->f_version = current->exec_id;
57780+#endif
57781+
57782 return 0;
57783 }
57784
57785@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
57786 ssize_t copied;
57787 char *page;
57788
57789+#ifdef CONFIG_GRKERNSEC
57790+ if (write)
57791+ return -EPERM;
57792+#endif
57793+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57794+ if (file->f_version != current->exec_id) {
57795+ gr_log_badprocpid("mem");
57796+ return 0;
57797+ }
57798+#endif
57799+
57800 if (!mm)
57801 return 0;
57802
57803@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
57804 goto free;
57805
57806 while (count > 0) {
57807- int this_len = min_t(int, count, PAGE_SIZE);
57808+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
57809
57810 if (write && copy_from_user(page, buf, this_len)) {
57811 copied = -EFAULT;
57812@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
57813 if (!mm)
57814 return 0;
57815
57816+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57817+ if (file->f_version != current->exec_id) {
57818+ gr_log_badprocpid("environ");
57819+ return 0;
57820+ }
57821+#endif
57822+
57823 page = (char *)__get_free_page(GFP_TEMPORARY);
57824 if (!page)
57825 return -ENOMEM;
57826@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
57827 goto free;
57828 while (count > 0) {
57829 size_t this_len, max_len;
57830- int retval;
57831+ ssize_t retval;
57832
57833 if (src >= (mm->env_end - mm->env_start))
57834 break;
57835@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
57836 int error = -EACCES;
57837
57838 /* Are we allowed to snoop on the tasks file descriptors? */
57839- if (!proc_fd_access_allowed(inode))
57840+ if (!proc_fd_access_allowed(inode, 0))
57841 goto out;
57842
57843 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
57844@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
57845 struct path path;
57846
57847 /* Are we allowed to snoop on the tasks file descriptors? */
57848- if (!proc_fd_access_allowed(inode))
57849- goto out;
57850+ /* logging this is needed for learning on chromium to work properly,
57851+ but we don't want to flood the logs from 'ps' which does a readlink
57852+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
57853+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
57854+ */
57855+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
57856+ if (!proc_fd_access_allowed(inode,0))
57857+ goto out;
57858+ } else {
57859+ if (!proc_fd_access_allowed(inode,1))
57860+ goto out;
57861+ }
57862
57863 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
57864 if (error)
57865@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
57866 rcu_read_lock();
57867 cred = __task_cred(task);
57868 inode->i_uid = cred->euid;
57869+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57870+ inode->i_gid = grsec_proc_gid;
57871+#else
57872 inode->i_gid = cred->egid;
57873+#endif
57874 rcu_read_unlock();
57875 }
57876 security_task_to_inode(task, inode);
57877@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
57878 return -ENOENT;
57879 }
57880 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
57881+#ifdef CONFIG_GRKERNSEC_PROC_USER
57882+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
57883+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57884+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
57885+#endif
57886 task_dumpable(task)) {
57887 cred = __task_cred(task);
57888 stat->uid = cred->euid;
57889+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57890+ stat->gid = grsec_proc_gid;
57891+#else
57892 stat->gid = cred->egid;
57893+#endif
57894 }
57895 }
57896 rcu_read_unlock();
57897@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
57898
57899 if (task) {
57900 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
57901+#ifdef CONFIG_GRKERNSEC_PROC_USER
57902+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
57903+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57904+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
57905+#endif
57906 task_dumpable(task)) {
57907 rcu_read_lock();
57908 cred = __task_cred(task);
57909 inode->i_uid = cred->euid;
57910+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57911+ inode->i_gid = grsec_proc_gid;
57912+#else
57913 inode->i_gid = cred->egid;
57914+#endif
57915 rcu_read_unlock();
57916 } else {
57917 inode->i_uid = GLOBAL_ROOT_UID;
57918@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
57919 if (!task)
57920 goto out_no_task;
57921
57922+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57923+ goto out;
57924+
57925 /*
57926 * Yes, it does not scale. And it should not. Don't add
57927 * new entries into /proc/<tgid>/ without very good reasons.
57928@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
57929 if (!task)
57930 goto out_no_task;
57931
57932+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57933+ goto out;
57934+
57935 ret = 0;
57936 i = filp->f_pos;
57937 switch (i) {
57938@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
57939 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
57940 #endif
57941 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
57942-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
57943+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
57944 INF("syscall", S_IRUGO, proc_pid_syscall),
57945 #endif
57946 INF("cmdline", S_IRUGO, proc_pid_cmdline),
57947@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
57948 #ifdef CONFIG_SECURITY
57949 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
57950 #endif
57951-#ifdef CONFIG_KALLSYMS
57952+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57953 INF("wchan", S_IRUGO, proc_pid_wchan),
57954 #endif
57955-#ifdef CONFIG_STACKTRACE
57956+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57957 ONE("stack", S_IRUGO, proc_pid_stack),
57958 #endif
57959 #ifdef CONFIG_SCHEDSTATS
57960@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
57961 #ifdef CONFIG_HARDWALL
57962 INF("hardwall", S_IRUGO, proc_pid_hardwall),
57963 #endif
57964+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
57965+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
57966+#endif
57967 #ifdef CONFIG_USER_NS
57968 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
57969 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
57970@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
57971 if (!inode)
57972 goto out;
57973
57974+#ifdef CONFIG_GRKERNSEC_PROC_USER
57975+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
57976+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57977+ inode->i_gid = grsec_proc_gid;
57978+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
57979+#else
57980 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
57981+#endif
57982 inode->i_op = &proc_tgid_base_inode_operations;
57983 inode->i_fop = &proc_tgid_base_operations;
57984 inode->i_flags|=S_IMMUTABLE;
57985@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
57986 if (!task)
57987 goto out;
57988
57989+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57990+ goto out_put_task;
57991+
57992 result = proc_pid_instantiate(dir, dentry, task, NULL);
57993+out_put_task:
57994 put_task_struct(task);
57995 out:
57996 return result;
57997@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
57998 static int fake_filldir(void *buf, const char *name, int namelen,
57999 loff_t offset, u64 ino, unsigned d_type)
58000 {
58001+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
58002+ __buf->error = -EINVAL;
58003 return 0;
58004 }
58005
58006@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
58007 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
58008 #endif
58009 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
58010-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
58011+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
58012 INF("syscall", S_IRUGO, proc_pid_syscall),
58013 #endif
58014 INF("cmdline", S_IRUGO, proc_pid_cmdline),
58015@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
58016 #ifdef CONFIG_SECURITY
58017 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
58018 #endif
58019-#ifdef CONFIG_KALLSYMS
58020+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58021 INF("wchan", S_IRUGO, proc_pid_wchan),
58022 #endif
58023-#ifdef CONFIG_STACKTRACE
58024+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58025 ONE("stack", S_IRUGO, proc_pid_stack),
58026 #endif
58027 #ifdef CONFIG_SCHEDSTATS
58028diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
58029index 82676e3..5f8518a 100644
58030--- a/fs/proc/cmdline.c
58031+++ b/fs/proc/cmdline.c
58032@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
58033
58034 static int __init proc_cmdline_init(void)
58035 {
58036+#ifdef CONFIG_GRKERNSEC_PROC_ADD
58037+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
58038+#else
58039 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
58040+#endif
58041 return 0;
58042 }
58043 module_init(proc_cmdline_init);
58044diff --git a/fs/proc/devices.c b/fs/proc/devices.c
58045index b143471..bb105e5 100644
58046--- a/fs/proc/devices.c
58047+++ b/fs/proc/devices.c
58048@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
58049
58050 static int __init proc_devices_init(void)
58051 {
58052+#ifdef CONFIG_GRKERNSEC_PROC_ADD
58053+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
58054+#else
58055 proc_create("devices", 0, NULL, &proc_devinfo_operations);
58056+#endif
58057 return 0;
58058 }
58059 module_init(proc_devices_init);
58060diff --git a/fs/proc/fd.c b/fs/proc/fd.c
58061index d7a4a28..0201742 100644
58062--- a/fs/proc/fd.c
58063+++ b/fs/proc/fd.c
58064@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
58065 if (!task)
58066 return -ENOENT;
58067
58068- files = get_files_struct(task);
58069+ if (!gr_acl_handle_procpidmem(task))
58070+ files = get_files_struct(task);
58071 put_task_struct(task);
58072
58073 if (files) {
58074@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
58075 */
58076 int proc_fd_permission(struct inode *inode, int mask)
58077 {
58078+ struct task_struct *task;
58079 int rv = generic_permission(inode, mask);
58080- if (rv == 0)
58081- return 0;
58082+
58083 if (task_pid(current) == proc_pid(inode))
58084 rv = 0;
58085+
58086+ task = get_proc_task(inode);
58087+ if (task == NULL)
58088+ return rv;
58089+
58090+ if (gr_acl_handle_procpidmem(task))
58091+ rv = -EACCES;
58092+
58093+ put_task_struct(task);
58094+
58095 return rv;
58096 }
58097
58098diff --git a/fs/proc/inode.c b/fs/proc/inode.c
58099index 073aea6..0630370 100644
58100--- a/fs/proc/inode.c
58101+++ b/fs/proc/inode.c
58102@@ -23,11 +23,17 @@
58103 #include <linux/slab.h>
58104 #include <linux/mount.h>
58105 #include <linux/magic.h>
58106+#include <linux/grsecurity.h>
58107
58108 #include <asm/uaccess.h>
58109
58110 #include "internal.h"
58111
58112+#ifdef CONFIG_PROC_SYSCTL
58113+extern const struct inode_operations proc_sys_inode_operations;
58114+extern const struct inode_operations proc_sys_dir_operations;
58115+#endif
58116+
58117 static void proc_evict_inode(struct inode *inode)
58118 {
58119 struct proc_dir_entry *de;
58120@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
58121 ns = PROC_I(inode)->ns.ns;
58122 if (ns_ops && ns)
58123 ns_ops->put(ns);
58124+
58125+#ifdef CONFIG_PROC_SYSCTL
58126+ if (inode->i_op == &proc_sys_inode_operations ||
58127+ inode->i_op == &proc_sys_dir_operations)
58128+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
58129+#endif
58130+
58131 }
58132
58133 static struct kmem_cache * proc_inode_cachep;
58134@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
58135 if (de->mode) {
58136 inode->i_mode = de->mode;
58137 inode->i_uid = de->uid;
58138+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
58139+ inode->i_gid = grsec_proc_gid;
58140+#else
58141 inode->i_gid = de->gid;
58142+#endif
58143 }
58144 if (de->size)
58145 inode->i_size = de->size;
58146diff --git a/fs/proc/internal.h b/fs/proc/internal.h
58147index d600fb0..3b495fe 100644
58148--- a/fs/proc/internal.h
58149+++ b/fs/proc/internal.h
58150@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
58151 struct pid *, struct task_struct *);
58152 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
58153 struct pid *, struct task_struct *);
58154+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
58155+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
58156+#endif
58157
58158 /*
58159 * base.c
58160diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
58161index 0a22194..a9fc8c1 100644
58162--- a/fs/proc/kcore.c
58163+++ b/fs/proc/kcore.c
58164@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58165 * the addresses in the elf_phdr on our list.
58166 */
58167 start = kc_offset_to_vaddr(*fpos - elf_buflen);
58168- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
58169+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
58170+ if (tsz > buflen)
58171 tsz = buflen;
58172-
58173+
58174 while (buflen) {
58175 struct kcore_list *m;
58176
58177@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58178 kfree(elf_buf);
58179 } else {
58180 if (kern_addr_valid(start)) {
58181- unsigned long n;
58182+ char *elf_buf;
58183+ mm_segment_t oldfs;
58184
58185- n = copy_to_user(buffer, (char *)start, tsz);
58186- /*
58187- * We cannot distinguish between fault on source
58188- * and fault on destination. When this happens
58189- * we clear too and hope it will trigger the
58190- * EFAULT again.
58191- */
58192- if (n) {
58193- if (clear_user(buffer + tsz - n,
58194- n))
58195+ elf_buf = kmalloc(tsz, GFP_KERNEL);
58196+ if (!elf_buf)
58197+ return -ENOMEM;
58198+ oldfs = get_fs();
58199+ set_fs(KERNEL_DS);
58200+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
58201+ set_fs(oldfs);
58202+ if (copy_to_user(buffer, elf_buf, tsz)) {
58203+ kfree(elf_buf);
58204 return -EFAULT;
58205+ }
58206 }
58207+ set_fs(oldfs);
58208+ kfree(elf_buf);
58209 } else {
58210 if (clear_user(buffer, tsz))
58211 return -EFAULT;
58212@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58213
58214 static int open_kcore(struct inode *inode, struct file *filp)
58215 {
58216+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58217+ return -EPERM;
58218+#endif
58219 if (!capable(CAP_SYS_RAWIO))
58220 return -EPERM;
58221 if (kcore_need_update)
58222diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
58223index 5aa847a..f77c8d4 100644
58224--- a/fs/proc/meminfo.c
58225+++ b/fs/proc/meminfo.c
58226@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
58227 vmi.used >> 10,
58228 vmi.largest_chunk >> 10
58229 #ifdef CONFIG_MEMORY_FAILURE
58230- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
58231+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
58232 #endif
58233 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58234 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
58235diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
58236index ccfd99b..1b7e255 100644
58237--- a/fs/proc/nommu.c
58238+++ b/fs/proc/nommu.c
58239@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
58240 if (len < 1)
58241 len = 1;
58242 seq_printf(m, "%*c", len, ' ');
58243- seq_path(m, &file->f_path, "");
58244+ seq_path(m, &file->f_path, "\n\\");
58245 }
58246
58247 seq_putc(m, '\n');
58248diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
58249index 986e832..6e8e859 100644
58250--- a/fs/proc/proc_net.c
58251+++ b/fs/proc/proc_net.c
58252@@ -23,6 +23,7 @@
58253 #include <linux/nsproxy.h>
58254 #include <net/net_namespace.h>
58255 #include <linux/seq_file.h>
58256+#include <linux/grsecurity.h>
58257
58258 #include "internal.h"
58259
58260@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
58261 struct task_struct *task;
58262 struct nsproxy *ns;
58263 struct net *net = NULL;
58264+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58265+ const struct cred *cred = current_cred();
58266+#endif
58267+
58268+#ifdef CONFIG_GRKERNSEC_PROC_USER
58269+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
58270+ return net;
58271+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58272+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
58273+ return net;
58274+#endif
58275
58276 rcu_read_lock();
58277 task = pid_task(proc_pid(dir), PIDTYPE_PID);
58278diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
58279index ac05f33..1e6dc7e 100644
58280--- a/fs/proc/proc_sysctl.c
58281+++ b/fs/proc/proc_sysctl.c
58282@@ -13,11 +13,15 @@
58283 #include <linux/module.h>
58284 #include "internal.h"
58285
58286+extern int gr_handle_chroot_sysctl(const int op);
58287+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
58288+ const int op);
58289+
58290 static const struct dentry_operations proc_sys_dentry_operations;
58291 static const struct file_operations proc_sys_file_operations;
58292-static const struct inode_operations proc_sys_inode_operations;
58293+const struct inode_operations proc_sys_inode_operations;
58294 static const struct file_operations proc_sys_dir_file_operations;
58295-static const struct inode_operations proc_sys_dir_operations;
58296+const struct inode_operations proc_sys_dir_operations;
58297
58298 void proc_sys_poll_notify(struct ctl_table_poll *poll)
58299 {
58300@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
58301
58302 err = NULL;
58303 d_set_d_op(dentry, &proc_sys_dentry_operations);
58304+
58305+ gr_handle_proc_create(dentry, inode);
58306+
58307 d_add(dentry, inode);
58308
58309 out:
58310@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
58311 struct inode *inode = file_inode(filp);
58312 struct ctl_table_header *head = grab_header(inode);
58313 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
58314+ int op = write ? MAY_WRITE : MAY_READ;
58315 ssize_t error;
58316 size_t res;
58317
58318@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
58319 * and won't be until we finish.
58320 */
58321 error = -EPERM;
58322- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
58323+ if (sysctl_perm(head, table, op))
58324 goto out;
58325
58326 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
58327@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
58328 if (!table->proc_handler)
58329 goto out;
58330
58331+#ifdef CONFIG_GRKERNSEC
58332+ error = -EPERM;
58333+ if (gr_handle_chroot_sysctl(op))
58334+ goto out;
58335+ dget(filp->f_path.dentry);
58336+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
58337+ dput(filp->f_path.dentry);
58338+ goto out;
58339+ }
58340+ dput(filp->f_path.dentry);
58341+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
58342+ goto out;
58343+ if (write && !capable(CAP_SYS_ADMIN))
58344+ goto out;
58345+#endif
58346+
58347 /* careful: calling conventions are nasty here */
58348 res = count;
58349 error = table->proc_handler(table, write, buf, &res, ppos);
58350@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
58351 return -ENOMEM;
58352 } else {
58353 d_set_d_op(child, &proc_sys_dentry_operations);
58354+
58355+ gr_handle_proc_create(child, inode);
58356+
58357 d_add(child, inode);
58358 }
58359 } else {
58360@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
58361 if ((*pos)++ < file->f_pos)
58362 return 0;
58363
58364+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
58365+ return 0;
58366+
58367 if (unlikely(S_ISLNK(table->mode)))
58368 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
58369 else
58370@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
58371 if (IS_ERR(head))
58372 return PTR_ERR(head);
58373
58374+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
58375+ return -ENOENT;
58376+
58377 generic_fillattr(inode, stat);
58378 if (table)
58379 stat->mode = (stat->mode & S_IFMT) | table->mode;
58380@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
58381 .llseek = generic_file_llseek,
58382 };
58383
58384-static const struct inode_operations proc_sys_inode_operations = {
58385+const struct inode_operations proc_sys_inode_operations = {
58386 .permission = proc_sys_permission,
58387 .setattr = proc_sys_setattr,
58388 .getattr = proc_sys_getattr,
58389 };
58390
58391-static const struct inode_operations proc_sys_dir_operations = {
58392+const struct inode_operations proc_sys_dir_operations = {
58393 .lookup = proc_sys_lookup,
58394 .permission = proc_sys_permission,
58395 .setattr = proc_sys_setattr,
58396@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
58397 static struct ctl_dir *new_dir(struct ctl_table_set *set,
58398 const char *name, int namelen)
58399 {
58400- struct ctl_table *table;
58401+ ctl_table_no_const *table;
58402 struct ctl_dir *new;
58403 struct ctl_node *node;
58404 char *new_name;
58405@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
58406 return NULL;
58407
58408 node = (struct ctl_node *)(new + 1);
58409- table = (struct ctl_table *)(node + 1);
58410+ table = (ctl_table_no_const *)(node + 1);
58411 new_name = (char *)(table + 2);
58412 memcpy(new_name, name, namelen);
58413 new_name[namelen] = '\0';
58414@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
58415 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
58416 struct ctl_table_root *link_root)
58417 {
58418- struct ctl_table *link_table, *entry, *link;
58419+ ctl_table_no_const *link_table, *link;
58420+ struct ctl_table *entry;
58421 struct ctl_table_header *links;
58422 struct ctl_node *node;
58423 char *link_name;
58424@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
58425 return NULL;
58426
58427 node = (struct ctl_node *)(links + 1);
58428- link_table = (struct ctl_table *)(node + nr_entries);
58429+ link_table = (ctl_table_no_const *)(node + nr_entries);
58430 link_name = (char *)&link_table[nr_entries + 1];
58431
58432 for (link = link_table, entry = table; entry->procname; link++, entry++) {
58433@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
58434 struct ctl_table_header ***subheader, struct ctl_table_set *set,
58435 struct ctl_table *table)
58436 {
58437- struct ctl_table *ctl_table_arg = NULL;
58438- struct ctl_table *entry, *files;
58439+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
58440+ struct ctl_table *entry;
58441 int nr_files = 0;
58442 int nr_dirs = 0;
58443 int err = -ENOMEM;
58444@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
58445 nr_files++;
58446 }
58447
58448- files = table;
58449 /* If there are mixed files and directories we need a new table */
58450 if (nr_dirs && nr_files) {
58451- struct ctl_table *new;
58452+ ctl_table_no_const *new;
58453 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
58454 GFP_KERNEL);
58455 if (!files)
58456@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
58457 /* Register everything except a directory full of subdirectories */
58458 if (nr_files || !nr_dirs) {
58459 struct ctl_table_header *header;
58460- header = __register_sysctl_table(set, path, files);
58461+ header = __register_sysctl_table(set, path, files ? files : table);
58462 if (!header) {
58463 kfree(ctl_table_arg);
58464 goto out;
58465diff --git a/fs/proc/root.c b/fs/proc/root.c
58466index 41a6ea9..23eaa92 100644
58467--- a/fs/proc/root.c
58468+++ b/fs/proc/root.c
58469@@ -182,7 +182,15 @@ void __init proc_root_init(void)
58470 #ifdef CONFIG_PROC_DEVICETREE
58471 proc_device_tree_init();
58472 #endif
58473+#ifdef CONFIG_GRKERNSEC_PROC_ADD
58474+#ifdef CONFIG_GRKERNSEC_PROC_USER
58475+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
58476+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58477+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
58478+#endif
58479+#else
58480 proc_mkdir("bus", NULL);
58481+#endif
58482 proc_sys_init();
58483 }
58484
58485diff --git a/fs/proc/self.c b/fs/proc/self.c
58486index 6b6a993..807cccc 100644
58487--- a/fs/proc/self.c
58488+++ b/fs/proc/self.c
58489@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
58490 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
58491 void *cookie)
58492 {
58493- char *s = nd_get_link(nd);
58494+ const char *s = nd_get_link(nd);
58495 if (!IS_ERR(s))
58496 kfree(s);
58497 }
58498diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
58499index 3e636d8..350cc48 100644
58500--- a/fs/proc/task_mmu.c
58501+++ b/fs/proc/task_mmu.c
58502@@ -11,12 +11,19 @@
58503 #include <linux/rmap.h>
58504 #include <linux/swap.h>
58505 #include <linux/swapops.h>
58506+#include <linux/grsecurity.h>
58507
58508 #include <asm/elf.h>
58509 #include <asm/uaccess.h>
58510 #include <asm/tlbflush.h>
58511 #include "internal.h"
58512
58513+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58514+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
58515+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
58516+ _mm->pax_flags & MF_PAX_SEGMEXEC))
58517+#endif
58518+
58519 void task_mem(struct seq_file *m, struct mm_struct *mm)
58520 {
58521 unsigned long data, text, lib, swap;
58522@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58523 "VmExe:\t%8lu kB\n"
58524 "VmLib:\t%8lu kB\n"
58525 "VmPTE:\t%8lu kB\n"
58526- "VmSwap:\t%8lu kB\n",
58527- hiwater_vm << (PAGE_SHIFT-10),
58528+ "VmSwap:\t%8lu kB\n"
58529+
58530+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58531+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
58532+#endif
58533+
58534+ ,hiwater_vm << (PAGE_SHIFT-10),
58535 total_vm << (PAGE_SHIFT-10),
58536 mm->locked_vm << (PAGE_SHIFT-10),
58537 mm->pinned_vm << (PAGE_SHIFT-10),
58538@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58539 data << (PAGE_SHIFT-10),
58540 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
58541 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
58542- swap << (PAGE_SHIFT-10));
58543+ swap << (PAGE_SHIFT-10)
58544+
58545+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58546+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58547+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
58548+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
58549+#else
58550+ , mm->context.user_cs_base
58551+ , mm->context.user_cs_limit
58552+#endif
58553+#endif
58554+
58555+ );
58556 }
58557
58558 unsigned long task_vsize(struct mm_struct *mm)
58559@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58560 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
58561 }
58562
58563- /* We don't show the stack guard page in /proc/maps */
58564+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58565+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
58566+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
58567+#else
58568 start = vma->vm_start;
58569- if (stack_guard_page_start(vma, start))
58570- start += PAGE_SIZE;
58571 end = vma->vm_end;
58572- if (stack_guard_page_end(vma, end))
58573- end -= PAGE_SIZE;
58574+#endif
58575
58576 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
58577 start,
58578@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58579 flags & VM_WRITE ? 'w' : '-',
58580 flags & VM_EXEC ? 'x' : '-',
58581 flags & VM_MAYSHARE ? 's' : 'p',
58582+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58583+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
58584+#else
58585 pgoff,
58586+#endif
58587 MAJOR(dev), MINOR(dev), ino, &len);
58588
58589 /*
58590@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58591 */
58592 if (file) {
58593 pad_len_spaces(m, len);
58594- seq_path(m, &file->f_path, "\n");
58595+ seq_path(m, &file->f_path, "\n\\");
58596 goto done;
58597 }
58598
58599@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58600 * Thread stack in /proc/PID/task/TID/maps or
58601 * the main process stack.
58602 */
58603- if (!is_pid || (vma->vm_start <= mm->start_stack &&
58604- vma->vm_end >= mm->start_stack)) {
58605+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
58606+ (vma->vm_start <= mm->start_stack &&
58607+ vma->vm_end >= mm->start_stack)) {
58608 name = "[stack]";
58609 } else {
58610 /* Thread stack in /proc/PID/maps */
58611@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
58612 struct proc_maps_private *priv = m->private;
58613 struct task_struct *task = priv->task;
58614
58615+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58616+ if (current->exec_id != m->exec_id) {
58617+ gr_log_badprocpid("maps");
58618+ return 0;
58619+ }
58620+#endif
58621+
58622 show_map_vma(m, vma, is_pid);
58623
58624 if (m->count < m->size) /* vma is copied successfully */
58625@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
58626 .private = &mss,
58627 };
58628
58629+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58630+ if (current->exec_id != m->exec_id) {
58631+ gr_log_badprocpid("smaps");
58632+ return 0;
58633+ }
58634+#endif
58635 memset(&mss, 0, sizeof mss);
58636- mss.vma = vma;
58637- /* mmap_sem is held in m_start */
58638- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
58639- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
58640-
58641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58642+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
58643+#endif
58644+ mss.vma = vma;
58645+ /* mmap_sem is held in m_start */
58646+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
58647+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
58648+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58649+ }
58650+#endif
58651 show_map_vma(m, vma, is_pid);
58652
58653 seq_printf(m,
58654@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
58655 "KernelPageSize: %8lu kB\n"
58656 "MMUPageSize: %8lu kB\n"
58657 "Locked: %8lu kB\n",
58658+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58659+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
58660+#else
58661 (vma->vm_end - vma->vm_start) >> 10,
58662+#endif
58663 mss.resident >> 10,
58664 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
58665 mss.shared_clean >> 10,
58666@@ -792,14 +843,14 @@ typedef struct {
58667 } pagemap_entry_t;
58668
58669 struct pagemapread {
58670- int pos, len;
58671+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
58672 pagemap_entry_t *buffer;
58673 };
58674
58675 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
58676 #define PAGEMAP_WALK_MASK (PMD_MASK)
58677
58678-#define PM_ENTRY_BYTES sizeof(u64)
58679+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
58680 #define PM_STATUS_BITS 3
58681 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
58682 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
58683@@ -1038,8 +1089,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
58684 if (!count)
58685 goto out_task;
58686
58687- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
58688- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
58689+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
58690+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
58691 ret = -ENOMEM;
58692 if (!pm.buffer)
58693 goto out_task;
58694@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
58695 int n;
58696 char buffer[50];
58697
58698+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58699+ if (current->exec_id != m->exec_id) {
58700+ gr_log_badprocpid("numa_maps");
58701+ return 0;
58702+ }
58703+#endif
58704+
58705 if (!mm)
58706 return 0;
58707
58708@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
58709 mpol_to_str(buffer, sizeof(buffer), pol);
58710 mpol_cond_put(pol);
58711
58712+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58713+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
58714+#else
58715 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
58716+#endif
58717
58718 if (file) {
58719 seq_printf(m, " file=");
58720- seq_path(m, &file->f_path, "\n\t= ");
58721+ seq_path(m, &file->f_path, "\n\t\\= ");
58722 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
58723 seq_printf(m, " heap");
58724 } else {
58725diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
58726index 56123a6..5a2f6ec 100644
58727--- a/fs/proc/task_nommu.c
58728+++ b/fs/proc/task_nommu.c
58729@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58730 else
58731 bytes += kobjsize(mm);
58732
58733- if (current->fs && current->fs->users > 1)
58734+ if (current->fs && atomic_read(&current->fs->users) > 1)
58735 sbytes += kobjsize(current->fs);
58736 else
58737 bytes += kobjsize(current->fs);
58738@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
58739
58740 if (file) {
58741 pad_len_spaces(m, len);
58742- seq_path(m, &file->f_path, "");
58743+ seq_path(m, &file->f_path, "\n\\");
58744 } else if (mm) {
58745 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
58746
58747diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
58748index 17f7e08..e4b1529 100644
58749--- a/fs/proc/vmcore.c
58750+++ b/fs/proc/vmcore.c
58751@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
58752 nr_bytes = count;
58753
58754 /* If pfn is not ram, return zeros for sparse dump files */
58755- if (pfn_is_ram(pfn) == 0)
58756- memset(buf, 0, nr_bytes);
58757- else {
58758+ if (pfn_is_ram(pfn) == 0) {
58759+ if (userbuf) {
58760+ if (clear_user((char __force_user *)buf, nr_bytes))
58761+ return -EFAULT;
58762+ } else
58763+ memset(buf, 0, nr_bytes);
58764+ } else {
58765 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
58766 offset, userbuf);
58767 if (tmp < 0)
58768@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
58769 if (tsz > nr_bytes)
58770 tsz = nr_bytes;
58771
58772- tmp = read_from_oldmem(buffer, tsz, &start, 1);
58773+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
58774 if (tmp < 0)
58775 return tmp;
58776 buflen -= tsz;
58777diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
58778index b00fcc9..e0c6381 100644
58779--- a/fs/qnx6/qnx6.h
58780+++ b/fs/qnx6/qnx6.h
58781@@ -74,7 +74,7 @@ enum {
58782 BYTESEX_BE,
58783 };
58784
58785-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
58786+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
58787 {
58788 if (sbi->s_bytesex == BYTESEX_LE)
58789 return le64_to_cpu((__force __le64)n);
58790@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
58791 return (__force __fs64)cpu_to_be64(n);
58792 }
58793
58794-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
58795+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
58796 {
58797 if (sbi->s_bytesex == BYTESEX_LE)
58798 return le32_to_cpu((__force __le32)n);
58799diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
58800index 16e8abb..2dcf914 100644
58801--- a/fs/quota/netlink.c
58802+++ b/fs/quota/netlink.c
58803@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
58804 void quota_send_warning(struct kqid qid, dev_t dev,
58805 const char warntype)
58806 {
58807- static atomic_t seq;
58808+ static atomic_unchecked_t seq;
58809 struct sk_buff *skb;
58810 void *msg_head;
58811 int ret;
58812@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
58813 "VFS: Not enough memory to send quota warning.\n");
58814 return;
58815 }
58816- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
58817+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
58818 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
58819 if (!msg_head) {
58820 printk(KERN_ERR
58821diff --git a/fs/read_write.c b/fs/read_write.c
58822index 2cefa41..c7e2fe0 100644
58823--- a/fs/read_write.c
58824+++ b/fs/read_write.c
58825@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
58826
58827 old_fs = get_fs();
58828 set_fs(get_ds());
58829- p = (__force const char __user *)buf;
58830+ p = (const char __force_user *)buf;
58831 if (count > MAX_RW_COUNT)
58832 count = MAX_RW_COUNT;
58833 if (file->f_op->write)
58834diff --git a/fs/readdir.c b/fs/readdir.c
58835index fee38e0..12fdf47 100644
58836--- a/fs/readdir.c
58837+++ b/fs/readdir.c
58838@@ -17,6 +17,7 @@
58839 #include <linux/security.h>
58840 #include <linux/syscalls.h>
58841 #include <linux/unistd.h>
58842+#include <linux/namei.h>
58843
58844 #include <asm/uaccess.h>
58845
58846@@ -67,6 +68,7 @@ struct old_linux_dirent {
58847
58848 struct readdir_callback {
58849 struct old_linux_dirent __user * dirent;
58850+ struct file * file;
58851 int result;
58852 };
58853
58854@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
58855 buf->result = -EOVERFLOW;
58856 return -EOVERFLOW;
58857 }
58858+
58859+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58860+ return 0;
58861+
58862 buf->result++;
58863 dirent = buf->dirent;
58864 if (!access_ok(VERIFY_WRITE, dirent,
58865@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
58866
58867 buf.result = 0;
58868 buf.dirent = dirent;
58869+ buf.file = f.file;
58870
58871 error = vfs_readdir(f.file, fillonedir, &buf);
58872 if (buf.result)
58873@@ -139,6 +146,7 @@ struct linux_dirent {
58874 struct getdents_callback {
58875 struct linux_dirent __user * current_dir;
58876 struct linux_dirent __user * previous;
58877+ struct file * file;
58878 int count;
58879 int error;
58880 };
58881@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
58882 buf->error = -EOVERFLOW;
58883 return -EOVERFLOW;
58884 }
58885+
58886+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58887+ return 0;
58888+
58889 dirent = buf->previous;
58890 if (dirent) {
58891 if (__put_user(offset, &dirent->d_off))
58892@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58893 buf.previous = NULL;
58894 buf.count = count;
58895 buf.error = 0;
58896+ buf.file = f.file;
58897
58898 error = vfs_readdir(f.file, filldir, &buf);
58899 if (error >= 0)
58900@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58901 struct getdents_callback64 {
58902 struct linux_dirent64 __user * current_dir;
58903 struct linux_dirent64 __user * previous;
58904+ struct file *file;
58905 int count;
58906 int error;
58907 };
58908@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
58909 buf->error = -EINVAL; /* only used if we fail.. */
58910 if (reclen > buf->count)
58911 return -EINVAL;
58912+
58913+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58914+ return 0;
58915+
58916 dirent = buf->previous;
58917 if (dirent) {
58918 if (__put_user(offset, &dirent->d_off))
58919@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58920
58921 buf.current_dir = dirent;
58922 buf.previous = NULL;
58923+ buf.file = f.file;
58924 buf.count = count;
58925 buf.error = 0;
58926
58927@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58928 error = buf.error;
58929 lastdirent = buf.previous;
58930 if (lastdirent) {
58931- typeof(lastdirent->d_off) d_off = f.file->f_pos;
58932+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
58933 if (__put_user(d_off, &lastdirent->d_off))
58934 error = -EFAULT;
58935 else
58936diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
58937index 2b7882b..1c5ef48 100644
58938--- a/fs/reiserfs/do_balan.c
58939+++ b/fs/reiserfs/do_balan.c
58940@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
58941 return;
58942 }
58943
58944- atomic_inc(&(fs_generation(tb->tb_sb)));
58945+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
58946 do_balance_starts(tb);
58947
58948 /* balance leaf returns 0 except if combining L R and S into
58949diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
58950index 1d48974..2f8f4e0 100644
58951--- a/fs/reiserfs/procfs.c
58952+++ b/fs/reiserfs/procfs.c
58953@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
58954 "SMALL_TAILS " : "NO_TAILS ",
58955 replay_only(sb) ? "REPLAY_ONLY " : "",
58956 convert_reiserfs(sb) ? "CONV " : "",
58957- atomic_read(&r->s_generation_counter),
58958+ atomic_read_unchecked(&r->s_generation_counter),
58959 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
58960 SF(s_do_balance), SF(s_unneeded_left_neighbor),
58961 SF(s_good_search_by_key_reada), SF(s_bmaps),
58962diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
58963index 157e474..65a6114 100644
58964--- a/fs/reiserfs/reiserfs.h
58965+++ b/fs/reiserfs/reiserfs.h
58966@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
58967 /* Comment? -Hans */
58968 wait_queue_head_t s_wait;
58969 /* To be obsoleted soon by per buffer seals.. -Hans */
58970- atomic_t s_generation_counter; // increased by one every time the
58971+ atomic_unchecked_t s_generation_counter; // increased by one every time the
58972 // tree gets re-balanced
58973 unsigned long s_properties; /* File system properties. Currently holds
58974 on-disk FS format */
58975@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
58976 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
58977
58978 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
58979-#define get_generation(s) atomic_read (&fs_generation(s))
58980+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
58981 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
58982 #define __fs_changed(gen,s) (gen != get_generation (s))
58983 #define fs_changed(gen,s) \
58984diff --git a/fs/select.c b/fs/select.c
58985index 8c1c96c..a0f9b6d 100644
58986--- a/fs/select.c
58987+++ b/fs/select.c
58988@@ -20,6 +20,7 @@
58989 #include <linux/export.h>
58990 #include <linux/slab.h>
58991 #include <linux/poll.h>
58992+#include <linux/security.h>
58993 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
58994 #include <linux/file.h>
58995 #include <linux/fdtable.h>
58996@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
58997 struct poll_list *walk = head;
58998 unsigned long todo = nfds;
58999
59000+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
59001 if (nfds > rlimit(RLIMIT_NOFILE))
59002 return -EINVAL;
59003
59004diff --git a/fs/seq_file.c b/fs/seq_file.c
59005index 774c1eb..b67582a 100644
59006--- a/fs/seq_file.c
59007+++ b/fs/seq_file.c
59008@@ -10,6 +10,7 @@
59009 #include <linux/seq_file.h>
59010 #include <linux/slab.h>
59011 #include <linux/cred.h>
59012+#include <linux/sched.h>
59013
59014 #include <asm/uaccess.h>
59015 #include <asm/page.h>
59016@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
59017 #ifdef CONFIG_USER_NS
59018 p->user_ns = file->f_cred->user_ns;
59019 #endif
59020+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59021+ p->exec_id = current->exec_id;
59022+#endif
59023
59024 /*
59025 * Wrappers around seq_open(e.g. swaps_open) need to be
59026@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
59027 return 0;
59028 }
59029 if (!m->buf) {
59030- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
59031+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
59032 if (!m->buf)
59033 return -ENOMEM;
59034 }
59035@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
59036 Eoverflow:
59037 m->op->stop(m, p);
59038 kfree(m->buf);
59039- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
59040+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
59041 return !m->buf ? -ENOMEM : -EAGAIN;
59042 }
59043
59044@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
59045
59046 /* grab buffer if we didn't have one */
59047 if (!m->buf) {
59048- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
59049+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
59050 if (!m->buf)
59051 goto Enomem;
59052 }
59053@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
59054 goto Fill;
59055 m->op->stop(m, p);
59056 kfree(m->buf);
59057- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
59058+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
59059 if (!m->buf)
59060 goto Enomem;
59061 m->count = 0;
59062@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
59063 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
59064 void *data)
59065 {
59066- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
59067+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
59068 int res = -ENOMEM;
59069
59070 if (op) {
59071diff --git a/fs/splice.c b/fs/splice.c
59072index d37431d..81c3044 100644
59073--- a/fs/splice.c
59074+++ b/fs/splice.c
59075@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
59076 pipe_lock(pipe);
59077
59078 for (;;) {
59079- if (!pipe->readers) {
59080+ if (!atomic_read(&pipe->readers)) {
59081 send_sig(SIGPIPE, current, 0);
59082 if (!ret)
59083 ret = -EPIPE;
59084@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
59085 page_nr++;
59086 ret += buf->len;
59087
59088- if (pipe->files)
59089+ if (atomic_read(&pipe->files))
59090 do_wakeup = 1;
59091
59092 if (!--spd->nr_pages)
59093@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
59094 do_wakeup = 0;
59095 }
59096
59097- pipe->waiting_writers++;
59098+ atomic_inc(&pipe->waiting_writers);
59099 pipe_wait(pipe);
59100- pipe->waiting_writers--;
59101+ atomic_dec(&pipe->waiting_writers);
59102 }
59103
59104 pipe_unlock(pipe);
59105@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
59106 old_fs = get_fs();
59107 set_fs(get_ds());
59108 /* The cast to a user pointer is valid due to the set_fs() */
59109- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
59110+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
59111 set_fs(old_fs);
59112
59113 return res;
59114@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
59115 old_fs = get_fs();
59116 set_fs(get_ds());
59117 /* The cast to a user pointer is valid due to the set_fs() */
59118- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
59119+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
59120 set_fs(old_fs);
59121
59122 return res;
59123@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
59124 goto err;
59125
59126 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
59127- vec[i].iov_base = (void __user *) page_address(page);
59128+ vec[i].iov_base = (void __force_user *) page_address(page);
59129 vec[i].iov_len = this_len;
59130 spd.pages[i] = page;
59131 spd.nr_pages++;
59132@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
59133 ops->release(pipe, buf);
59134 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
59135 pipe->nrbufs--;
59136- if (pipe->files)
59137+ if (atomic_read(&pipe->files))
59138 sd->need_wakeup = true;
59139 }
59140
59141@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
59142 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
59143 {
59144 while (!pipe->nrbufs) {
59145- if (!pipe->writers)
59146+ if (!atomic_read(&pipe->writers))
59147 return 0;
59148
59149- if (!pipe->waiting_writers && sd->num_spliced)
59150+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
59151 return 0;
59152
59153 if (sd->flags & SPLICE_F_NONBLOCK)
59154@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
59155 * out of the pipe right after the splice_to_pipe(). So set
59156 * PIPE_READERS appropriately.
59157 */
59158- pipe->readers = 1;
59159+ atomic_set(&pipe->readers, 1);
59160
59161 current->splice_pipe = pipe;
59162 }
59163@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
59164 ret = -ERESTARTSYS;
59165 break;
59166 }
59167- if (!pipe->writers)
59168+ if (!atomic_read(&pipe->writers))
59169 break;
59170- if (!pipe->waiting_writers) {
59171+ if (!atomic_read(&pipe->waiting_writers)) {
59172 if (flags & SPLICE_F_NONBLOCK) {
59173 ret = -EAGAIN;
59174 break;
59175@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
59176 pipe_lock(pipe);
59177
59178 while (pipe->nrbufs >= pipe->buffers) {
59179- if (!pipe->readers) {
59180+ if (!atomic_read(&pipe->readers)) {
59181 send_sig(SIGPIPE, current, 0);
59182 ret = -EPIPE;
59183 break;
59184@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
59185 ret = -ERESTARTSYS;
59186 break;
59187 }
59188- pipe->waiting_writers++;
59189+ atomic_inc(&pipe->waiting_writers);
59190 pipe_wait(pipe);
59191- pipe->waiting_writers--;
59192+ atomic_dec(&pipe->waiting_writers);
59193 }
59194
59195 pipe_unlock(pipe);
59196@@ -1854,14 +1854,14 @@ retry:
59197 pipe_double_lock(ipipe, opipe);
59198
59199 do {
59200- if (!opipe->readers) {
59201+ if (!atomic_read(&opipe->readers)) {
59202 send_sig(SIGPIPE, current, 0);
59203 if (!ret)
59204 ret = -EPIPE;
59205 break;
59206 }
59207
59208- if (!ipipe->nrbufs && !ipipe->writers)
59209+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
59210 break;
59211
59212 /*
59213@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
59214 pipe_double_lock(ipipe, opipe);
59215
59216 do {
59217- if (!opipe->readers) {
59218+ if (!atomic_read(&opipe->readers)) {
59219 send_sig(SIGPIPE, current, 0);
59220 if (!ret)
59221 ret = -EPIPE;
59222@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
59223 * return EAGAIN if we have the potential of some data in the
59224 * future, otherwise just return 0
59225 */
59226- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
59227+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
59228 ret = -EAGAIN;
59229
59230 pipe_unlock(ipipe);
59231diff --git a/fs/stat.c b/fs/stat.c
59232index 04ce1ac..a13dd1e 100644
59233--- a/fs/stat.c
59234+++ b/fs/stat.c
59235@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
59236 stat->gid = inode->i_gid;
59237 stat->rdev = inode->i_rdev;
59238 stat->size = i_size_read(inode);
59239- stat->atime = inode->i_atime;
59240- stat->mtime = inode->i_mtime;
59241+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
59242+ stat->atime = inode->i_ctime;
59243+ stat->mtime = inode->i_ctime;
59244+ } else {
59245+ stat->atime = inode->i_atime;
59246+ stat->mtime = inode->i_mtime;
59247+ }
59248 stat->ctime = inode->i_ctime;
59249 stat->blksize = (1 << inode->i_blkbits);
59250 stat->blocks = inode->i_blocks;
59251@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
59252 if (retval)
59253 return retval;
59254
59255- if (inode->i_op->getattr)
59256- return inode->i_op->getattr(path->mnt, path->dentry, stat);
59257+ if (inode->i_op->getattr) {
59258+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
59259+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
59260+ stat->atime = stat->ctime;
59261+ stat->mtime = stat->ctime;
59262+ }
59263+ return retval;
59264+ }
59265
59266 generic_fillattr(inode, stat);
59267 return 0;
59268diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
59269index 15c68f9..36a8b3e 100644
59270--- a/fs/sysfs/bin.c
59271+++ b/fs/sysfs/bin.c
59272@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
59273 return ret;
59274 }
59275
59276-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
59277- void *buf, int len, int write)
59278+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
59279+ void *buf, size_t len, int write)
59280 {
59281 struct file *file = vma->vm_file;
59282 struct bin_buffer *bb = file->private_data;
59283 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
59284- int ret;
59285+ ssize_t ret;
59286
59287 if (!bb->vm_ops)
59288 return -EINVAL;
59289diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
59290index e8e0e71..79c28ac5 100644
59291--- a/fs/sysfs/dir.c
59292+++ b/fs/sysfs/dir.c
59293@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
59294 *
59295 * Returns 31 bit hash of ns + name (so it fits in an off_t )
59296 */
59297-static unsigned int sysfs_name_hash(const void *ns, const char *name)
59298+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
59299 {
59300 unsigned long hash = init_name_hash();
59301 unsigned int len = strlen(name);
59302@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
59303 struct sysfs_dirent *sd;
59304 int rc;
59305
59306+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
59307+ const char *parent_name = parent_sd->s_name;
59308+
59309+ mode = S_IFDIR | S_IRWXU;
59310+
59311+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
59312+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
59313+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
59314+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
59315+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
59316+#endif
59317+
59318 /* allocate */
59319 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
59320 if (!sd)
59321diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
59322index 602f56d..6853db8 100644
59323--- a/fs/sysfs/file.c
59324+++ b/fs/sysfs/file.c
59325@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
59326
59327 struct sysfs_open_dirent {
59328 atomic_t refcnt;
59329- atomic_t event;
59330+ atomic_unchecked_t event;
59331 wait_queue_head_t poll;
59332 struct list_head buffers; /* goes through sysfs_buffer.list */
59333 };
59334@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
59335 if (!sysfs_get_active(attr_sd))
59336 return -ENODEV;
59337
59338- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
59339+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
59340 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
59341
59342 sysfs_put_active(attr_sd);
59343@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
59344 return -ENOMEM;
59345
59346 atomic_set(&new_od->refcnt, 0);
59347- atomic_set(&new_od->event, 1);
59348+ atomic_set_unchecked(&new_od->event, 1);
59349 init_waitqueue_head(&new_od->poll);
59350 INIT_LIST_HEAD(&new_od->buffers);
59351 goto retry;
59352@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
59353
59354 sysfs_put_active(attr_sd);
59355
59356- if (buffer->event != atomic_read(&od->event))
59357+ if (buffer->event != atomic_read_unchecked(&od->event))
59358 goto trigger;
59359
59360 return DEFAULT_POLLMASK;
59361@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
59362
59363 od = sd->s_attr.open;
59364 if (od) {
59365- atomic_inc(&od->event);
59366+ atomic_inc_unchecked(&od->event);
59367 wake_up_interruptible(&od->poll);
59368 }
59369
59370diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
59371index 8c940df..25b733e 100644
59372--- a/fs/sysfs/symlink.c
59373+++ b/fs/sysfs/symlink.c
59374@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59375
59376 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
59377 {
59378- char *page = nd_get_link(nd);
59379+ const char *page = nd_get_link(nd);
59380 if (!IS_ERR(page))
59381 free_page((unsigned long)page);
59382 }
59383diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
59384index 69d4889..a810bd4 100644
59385--- a/fs/sysv/sysv.h
59386+++ b/fs/sysv/sysv.h
59387@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
59388 #endif
59389 }
59390
59391-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
59392+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
59393 {
59394 if (sbi->s_bytesex == BYTESEX_PDP)
59395 return PDP_swab((__force __u32)n);
59396diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
59397index e18b988..f1d4ad0f 100644
59398--- a/fs/ubifs/io.c
59399+++ b/fs/ubifs/io.c
59400@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
59401 return err;
59402 }
59403
59404-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
59405+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
59406 {
59407 int err;
59408
59409diff --git a/fs/udf/misc.c b/fs/udf/misc.c
59410index c175b4d..8f36a16 100644
59411--- a/fs/udf/misc.c
59412+++ b/fs/udf/misc.c
59413@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
59414
59415 u8 udf_tag_checksum(const struct tag *t)
59416 {
59417- u8 *data = (u8 *)t;
59418+ const u8 *data = (const u8 *)t;
59419 u8 checksum = 0;
59420 int i;
59421 for (i = 0; i < sizeof(struct tag); ++i)
59422diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
59423index 8d974c4..b82f6ec 100644
59424--- a/fs/ufs/swab.h
59425+++ b/fs/ufs/swab.h
59426@@ -22,7 +22,7 @@ enum {
59427 BYTESEX_BE
59428 };
59429
59430-static inline u64
59431+static inline u64 __intentional_overflow(-1)
59432 fs64_to_cpu(struct super_block *sbp, __fs64 n)
59433 {
59434 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
59435@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
59436 return (__force __fs64)cpu_to_be64(n);
59437 }
59438
59439-static inline u32
59440+static inline u32 __intentional_overflow(-1)
59441 fs32_to_cpu(struct super_block *sbp, __fs32 n)
59442 {
59443 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
59444diff --git a/fs/utimes.c b/fs/utimes.c
59445index f4fb7ec..3fe03c0 100644
59446--- a/fs/utimes.c
59447+++ b/fs/utimes.c
59448@@ -1,6 +1,7 @@
59449 #include <linux/compiler.h>
59450 #include <linux/file.h>
59451 #include <linux/fs.h>
59452+#include <linux/security.h>
59453 #include <linux/linkage.h>
59454 #include <linux/mount.h>
59455 #include <linux/namei.h>
59456@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
59457 goto mnt_drop_write_and_out;
59458 }
59459 }
59460+
59461+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
59462+ error = -EACCES;
59463+ goto mnt_drop_write_and_out;
59464+ }
59465+
59466 mutex_lock(&inode->i_mutex);
59467 error = notify_change(path->dentry, &newattrs);
59468 mutex_unlock(&inode->i_mutex);
59469diff --git a/fs/xattr.c b/fs/xattr.c
59470index 3377dff..4d074d9 100644
59471--- a/fs/xattr.c
59472+++ b/fs/xattr.c
59473@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
59474 return rc;
59475 }
59476
59477+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59478+ssize_t
59479+pax_getxattr(struct dentry *dentry, void *value, size_t size)
59480+{
59481+ struct inode *inode = dentry->d_inode;
59482+ ssize_t error;
59483+
59484+ error = inode_permission(inode, MAY_EXEC);
59485+ if (error)
59486+ return error;
59487+
59488+ if (inode->i_op->getxattr)
59489+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
59490+ else
59491+ error = -EOPNOTSUPP;
59492+
59493+ return error;
59494+}
59495+EXPORT_SYMBOL(pax_getxattr);
59496+#endif
59497+
59498 ssize_t
59499 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
59500 {
59501@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
59502 * Extended attribute SET operations
59503 */
59504 static long
59505-setxattr(struct dentry *d, const char __user *name, const void __user *value,
59506+setxattr(struct path *path, const char __user *name, const void __user *value,
59507 size_t size, int flags)
59508 {
59509 int error;
59510@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
59511 posix_acl_fix_xattr_from_user(kvalue, size);
59512 }
59513
59514- error = vfs_setxattr(d, kname, kvalue, size, flags);
59515+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
59516+ error = -EACCES;
59517+ goto out;
59518+ }
59519+
59520+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
59521 out:
59522 if (vvalue)
59523 vfree(vvalue);
59524@@ -377,7 +403,7 @@ retry:
59525 return error;
59526 error = mnt_want_write(path.mnt);
59527 if (!error) {
59528- error = setxattr(path.dentry, name, value, size, flags);
59529+ error = setxattr(&path, name, value, size, flags);
59530 mnt_drop_write(path.mnt);
59531 }
59532 path_put(&path);
59533@@ -401,7 +427,7 @@ retry:
59534 return error;
59535 error = mnt_want_write(path.mnt);
59536 if (!error) {
59537- error = setxattr(path.dentry, name, value, size, flags);
59538+ error = setxattr(&path, name, value, size, flags);
59539 mnt_drop_write(path.mnt);
59540 }
59541 path_put(&path);
59542@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
59543 const void __user *,value, size_t, size, int, flags)
59544 {
59545 struct fd f = fdget(fd);
59546- struct dentry *dentry;
59547 int error = -EBADF;
59548
59549 if (!f.file)
59550 return error;
59551- dentry = f.file->f_path.dentry;
59552- audit_inode(NULL, dentry, 0);
59553+ audit_inode(NULL, f.file->f_path.dentry, 0);
59554 error = mnt_want_write_file(f.file);
59555 if (!error) {
59556- error = setxattr(dentry, name, value, size, flags);
59557+ error = setxattr(&f.file->f_path, name, value, size, flags);
59558 mnt_drop_write_file(f.file);
59559 }
59560 fdput(f);
59561diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
59562index 9fbea87..6b19972 100644
59563--- a/fs/xattr_acl.c
59564+++ b/fs/xattr_acl.c
59565@@ -76,8 +76,8 @@ struct posix_acl *
59566 posix_acl_from_xattr(struct user_namespace *user_ns,
59567 const void *value, size_t size)
59568 {
59569- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
59570- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
59571+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
59572+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
59573 int count;
59574 struct posix_acl *acl;
59575 struct posix_acl_entry *acl_e;
59576diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
59577index 8904284..ee0e14b 100644
59578--- a/fs/xfs/xfs_bmap.c
59579+++ b/fs/xfs/xfs_bmap.c
59580@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
59581
59582 #else
59583 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
59584-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
59585+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
59586 #endif /* DEBUG */
59587
59588 /*
59589diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
59590index 6157424..ac98f6d 100644
59591--- a/fs/xfs/xfs_dir2_sf.c
59592+++ b/fs/xfs/xfs_dir2_sf.c
59593@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
59594 }
59595
59596 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
59597- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
59598+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
59599+ char name[sfep->namelen];
59600+ memcpy(name, sfep->name, sfep->namelen);
59601+ if (filldir(dirent, name, sfep->namelen,
59602+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
59603+ *offset = off & 0x7fffffff;
59604+ return 0;
59605+ }
59606+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
59607 off & 0x7fffffff, ino, DT_UNKNOWN)) {
59608 *offset = off & 0x7fffffff;
59609 return 0;
59610diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
59611index 5e99968..45bd327 100644
59612--- a/fs/xfs/xfs_ioctl.c
59613+++ b/fs/xfs/xfs_ioctl.c
59614@@ -127,7 +127,7 @@ xfs_find_handle(
59615 }
59616
59617 error = -EFAULT;
59618- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
59619+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
59620 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
59621 goto out_put;
59622
59623diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
59624index ca9ecaa..60100c7 100644
59625--- a/fs/xfs/xfs_iops.c
59626+++ b/fs/xfs/xfs_iops.c
59627@@ -395,7 +395,7 @@ xfs_vn_put_link(
59628 struct nameidata *nd,
59629 void *p)
59630 {
59631- char *s = nd_get_link(nd);
59632+ const char *s = nd_get_link(nd);
59633
59634 if (!IS_ERR(s))
59635 kfree(s);
59636diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
59637new file mode 100644
59638index 0000000..712a85d
59639--- /dev/null
59640+++ b/grsecurity/Kconfig
59641@@ -0,0 +1,1043 @@
59642+#
59643+# grecurity configuration
59644+#
59645+menu "Memory Protections"
59646+depends on GRKERNSEC
59647+
59648+config GRKERNSEC_KMEM
59649+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
59650+ default y if GRKERNSEC_CONFIG_AUTO
59651+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
59652+ help
59653+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
59654+ be written to or read from to modify or leak the contents of the running
59655+ kernel. /dev/port will also not be allowed to be opened and support
59656+ for /dev/cpu/*/msr will be removed. If you have module
59657+ support disabled, enabling this will close up five ways that are
59658+ currently used to insert malicious code into the running kernel.
59659+
59660+ Even with all these features enabled, we still highly recommend that
59661+ you use the RBAC system, as it is still possible for an attacker to
59662+ modify the running kernel through privileged I/O granted by ioperm/iopl.
59663+
59664+ If you are not using XFree86, you may be able to stop this additional
59665+ case by enabling the 'Disable privileged I/O' option. Though nothing
59666+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
59667+ but only to video memory, which is the only writing we allow in this
59668+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
59669+ not be allowed to mprotect it with PROT_WRITE later.
59670+ Enabling this feature will prevent the "cpupower" and "powertop" tools
59671+ from working.
59672+
59673+ It is highly recommended that you say Y here if you meet all the
59674+ conditions above.
59675+
59676+config GRKERNSEC_VM86
59677+ bool "Restrict VM86 mode"
59678+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
59679+ depends on X86_32
59680+
59681+ help
59682+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
59683+ make use of a special execution mode on 32bit x86 processors called
59684+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
59685+ video cards and will still work with this option enabled. The purpose
59686+ of the option is to prevent exploitation of emulation errors in
59687+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
59688+ Nearly all users should be able to enable this option.
59689+
59690+config GRKERNSEC_IO
59691+ bool "Disable privileged I/O"
59692+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
59693+ depends on X86
59694+ select RTC_CLASS
59695+ select RTC_INTF_DEV
59696+ select RTC_DRV_CMOS
59697+
59698+ help
59699+ If you say Y here, all ioperm and iopl calls will return an error.
59700+ Ioperm and iopl can be used to modify the running kernel.
59701+ Unfortunately, some programs need this access to operate properly,
59702+ the most notable of which are XFree86 and hwclock. hwclock can be
59703+ remedied by having RTC support in the kernel, so real-time
59704+ clock support is enabled if this option is enabled, to ensure
59705+ that hwclock operates correctly. XFree86 still will not
59706+ operate correctly with this option enabled, so DO NOT CHOOSE Y
59707+ IF YOU USE XFree86. If you use XFree86 and you still want to
59708+ protect your kernel against modification, use the RBAC system.
59709+
59710+config GRKERNSEC_JIT_HARDEN
59711+ bool "Harden BPF JIT against spray attacks"
59712+ default y if GRKERNSEC_CONFIG_AUTO
59713+ depends on BPF_JIT
59714+ help
59715+ If you say Y here, the native code generated by the kernel's Berkeley
59716+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
59717+ attacks that attempt to fit attacker-beneficial instructions in
59718+ 32bit immediate fields of JIT-generated native instructions. The
59719+ attacker will generally aim to cause an unintended instruction sequence
59720+ of JIT-generated native code to execute by jumping into the middle of
59721+ a generated instruction. This feature effectively randomizes the 32bit
59722+ immediate constants present in the generated code to thwart such attacks.
59723+
59724+ If you're using KERNEXEC, it's recommended that you enable this option
59725+ to supplement the hardening of the kernel.
59726+
59727+config GRKERNSEC_PERF_HARDEN
59728+ bool "Disable unprivileged PERF_EVENTS usage by default"
59729+ default y if GRKERNSEC_CONFIG_AUTO
59730+ depends on PERF_EVENTS
59731+ help
59732+ If you say Y here, the range of acceptable values for the
59733+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
59734+ default to a new value: 3. When the sysctl is set to this value, no
59735+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
59736+
59737+ Though PERF_EVENTS can be used legitimately for performance monitoring
59738+ and low-level application profiling, it is forced on regardless of
59739+ configuration, has been at fault for several vulnerabilities, and
59740+ creates new opportunities for side channels and other information leaks.
59741+
59742+ This feature puts PERF_EVENTS into a secure default state and permits
59743+ the administrator to change out of it temporarily if unprivileged
59744+ application profiling is needed.
59745+
59746+config GRKERNSEC_RAND_THREADSTACK
59747+ bool "Insert random gaps between thread stacks"
59748+ default y if GRKERNSEC_CONFIG_AUTO
59749+ depends on PAX_RANDMMAP && !PPC
59750+ help
59751+ If you say Y here, a random-sized gap will be enforced between allocated
59752+ thread stacks. Glibc's NPTL and other threading libraries that
59753+ pass MAP_STACK to the kernel for thread stack allocation are supported.
59754+ The implementation currently provides 8 bits of entropy for the gap.
59755+
59756+ Many distributions do not compile threaded remote services with the
59757+ -fstack-check argument to GCC, causing the variable-sized stack-based
59758+ allocator, alloca(), to not probe the stack on allocation. This
59759+ permits an unbounded alloca() to skip over any guard page and potentially
59760+ modify another thread's stack reliably. An enforced random gap
59761+ reduces the reliability of such an attack and increases the chance
59762+ that such a read/write to another thread's stack instead lands in
59763+ an unmapped area, causing a crash and triggering grsecurity's
59764+ anti-bruteforcing logic.
59765+
59766+config GRKERNSEC_PROC_MEMMAP
59767+ bool "Harden ASLR against information leaks and entropy reduction"
59768+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
59769+ depends on PAX_NOEXEC || PAX_ASLR
59770+ help
59771+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
59772+ give no information about the addresses of its mappings if
59773+ PaX features that rely on random addresses are enabled on the task.
59774+ In addition to sanitizing this information and disabling other
59775+ dangerous sources of information, this option causes reads of sensitive
59776+ /proc/<pid> entries where the file descriptor was opened in a different
59777+ task than the one performing the read. Such attempts are logged.
59778+ This option also limits argv/env strings for suid/sgid binaries
59779+ to 512KB to prevent a complete exhaustion of the stack entropy provided
59780+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
59781+ binaries to prevent alternative mmap layouts from being abused.
59782+
59783+ If you use PaX it is essential that you say Y here as it closes up
59784+ several holes that make full ASLR useless locally.
59785+
59786+config GRKERNSEC_BRUTE
59787+ bool "Deter exploit bruteforcing"
59788+ default y if GRKERNSEC_CONFIG_AUTO
59789+ help
59790+ If you say Y here, attempts to bruteforce exploits against forking
59791+ daemons such as apache or sshd, as well as against suid/sgid binaries
59792+ will be deterred. When a child of a forking daemon is killed by PaX
59793+ or crashes due to an illegal instruction or other suspicious signal,
59794+ the parent process will be delayed 30 seconds upon every subsequent
59795+ fork until the administrator is able to assess the situation and
59796+ restart the daemon.
59797+ In the suid/sgid case, the attempt is logged, the user has all their
59798+ existing instances of the suid/sgid binary terminated and will
59799+ be unable to execute any suid/sgid binaries for 15 minutes.
59800+
59801+ It is recommended that you also enable signal logging in the auditing
59802+ section so that logs are generated when a process triggers a suspicious
59803+ signal.
59804+ If the sysctl option is enabled, a sysctl option with name
59805+ "deter_bruteforce" is created.
59806+
59807+
59808+config GRKERNSEC_MODHARDEN
59809+ bool "Harden module auto-loading"
59810+ default y if GRKERNSEC_CONFIG_AUTO
59811+ depends on MODULES
59812+ help
59813+ If you say Y here, module auto-loading in response to use of some
59814+ feature implemented by an unloaded module will be restricted to
59815+ root users. Enabling this option helps defend against attacks
59816+ by unprivileged users who abuse the auto-loading behavior to
59817+ cause a vulnerable module to load that is then exploited.
59818+
59819+ If this option prevents a legitimate use of auto-loading for a
59820+ non-root user, the administrator can execute modprobe manually
59821+ with the exact name of the module mentioned in the alert log.
59822+ Alternatively, the administrator can add the module to the list
59823+ of modules loaded at boot by modifying init scripts.
59824+
59825+ Modification of init scripts will most likely be needed on
59826+ Ubuntu servers with encrypted home directory support enabled,
59827+ as the first non-root user logging in will cause the ecb(aes),
59828+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
59829+
59830+config GRKERNSEC_HIDESYM
59831+ bool "Hide kernel symbols"
59832+ default y if GRKERNSEC_CONFIG_AUTO
59833+ select PAX_USERCOPY_SLABS
59834+ help
59835+ If you say Y here, getting information on loaded modules, and
59836+ displaying all kernel symbols through a syscall will be restricted
59837+ to users with CAP_SYS_MODULE. For software compatibility reasons,
59838+ /proc/kallsyms will be restricted to the root user. The RBAC
59839+ system can hide that entry even from root.
59840+
59841+ This option also prevents leaking of kernel addresses through
59842+ several /proc entries.
59843+
59844+ Note that this option is only effective provided the following
59845+ conditions are met:
59846+ 1) The kernel using grsecurity is not precompiled by some distribution
59847+ 2) You have also enabled GRKERNSEC_DMESG
59848+ 3) You are using the RBAC system and hiding other files such as your
59849+ kernel image and System.map. Alternatively, enabling this option
59850+ causes the permissions on /boot, /lib/modules, and the kernel
59851+ source directory to change at compile time to prevent
59852+ reading by non-root users.
59853+ If the above conditions are met, this option will aid in providing a
59854+ useful protection against local kernel exploitation of overflows
59855+ and arbitrary read/write vulnerabilities.
59856+
59857+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
59858+ in addition to this feature.
59859+
59860+config GRKERNSEC_KERN_LOCKOUT
59861+ bool "Active kernel exploit response"
59862+ default y if GRKERNSEC_CONFIG_AUTO
59863+ depends on X86 || ARM || PPC || SPARC
59864+ help
59865+ If you say Y here, when a PaX alert is triggered due to suspicious
59866+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
59867+ or an OOPS occurs due to bad memory accesses, instead of just
59868+ terminating the offending process (and potentially allowing
59869+ a subsequent exploit from the same user), we will take one of two
59870+ actions:
59871+ If the user was root, we will panic the system
59872+ If the user was non-root, we will log the attempt, terminate
59873+ all processes owned by the user, then prevent them from creating
59874+ any new processes until the system is restarted
59875+ This deters repeated kernel exploitation/bruteforcing attempts
59876+ and is useful for later forensics.
59877+
59878+endmenu
59879+menu "Role Based Access Control Options"
59880+depends on GRKERNSEC
59881+
59882+config GRKERNSEC_RBAC_DEBUG
59883+ bool
59884+
59885+config GRKERNSEC_NO_RBAC
59886+ bool "Disable RBAC system"
59887+ help
59888+ If you say Y here, the /dev/grsec device will be removed from the kernel,
59889+ preventing the RBAC system from being enabled. You should only say Y
59890+ here if you have no intention of using the RBAC system, so as to prevent
59891+ an attacker with root access from misusing the RBAC system to hide files
59892+ and processes when loadable module support and /dev/[k]mem have been
59893+ locked down.
59894+
59895+config GRKERNSEC_ACL_HIDEKERN
59896+ bool "Hide kernel processes"
59897+ help
59898+ If you say Y here, all kernel threads will be hidden to all
59899+ processes but those whose subject has the "view hidden processes"
59900+ flag.
59901+
59902+config GRKERNSEC_ACL_MAXTRIES
59903+ int "Maximum tries before password lockout"
59904+ default 3
59905+ help
59906+ This option enforces the maximum number of times a user can attempt
59907+ to authorize themselves with the grsecurity RBAC system before being
59908+ denied the ability to attempt authorization again for a specified time.
59909+ The lower the number, the harder it will be to brute-force a password.
59910+
59911+config GRKERNSEC_ACL_TIMEOUT
59912+ int "Time to wait after max password tries, in seconds"
59913+ default 30
59914+ help
59915+ This option specifies the time the user must wait after attempting to
59916+ authorize to the RBAC system with the maximum number of invalid
59917+ passwords. The higher the number, the harder it will be to brute-force
59918+ a password.
59919+
59920+endmenu
59921+menu "Filesystem Protections"
59922+depends on GRKERNSEC
59923+
59924+config GRKERNSEC_PROC
59925+ bool "Proc restrictions"
59926+ default y if GRKERNSEC_CONFIG_AUTO
59927+ help
59928+ If you say Y here, the permissions of the /proc filesystem
59929+ will be altered to enhance system security and privacy. You MUST
59930+ choose either a user only restriction or a user and group restriction.
59931+ Depending upon the option you choose, you can either restrict users to
59932+ see only the processes they themselves run, or choose a group that can
59933+ view all processes and files normally restricted to root if you choose
59934+ the "restrict to user only" option. NOTE: If you're running identd or
59935+ ntpd as a non-root user, you will have to run it as the group you
59936+ specify here.
59937+
59938+config GRKERNSEC_PROC_USER
59939+ bool "Restrict /proc to user only"
59940+ depends on GRKERNSEC_PROC
59941+ help
59942+ If you say Y here, non-root users will only be able to view their own
59943+ processes, and restricts them from viewing network-related information,
59944+ and viewing kernel symbol and module information.
59945+
59946+config GRKERNSEC_PROC_USERGROUP
59947+ bool "Allow special group"
59948+ default y if GRKERNSEC_CONFIG_AUTO
59949+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
59950+ help
59951+ If you say Y here, you will be able to select a group that will be
59952+ able to view all processes and network-related information. If you've
59953+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
59954+ remain hidden. This option is useful if you want to run identd as
59955+ a non-root user. The group you select may also be chosen at boot time
59956+ via "grsec_proc_gid=" on the kernel commandline.
59957+
59958+config GRKERNSEC_PROC_GID
59959+ int "GID for special group"
59960+ depends on GRKERNSEC_PROC_USERGROUP
59961+ default 1001
59962+
59963+config GRKERNSEC_PROC_ADD
59964+ bool "Additional restrictions"
59965+ default y if GRKERNSEC_CONFIG_AUTO
59966+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
59967+ help
59968+ If you say Y here, additional restrictions will be placed on
59969+ /proc that keep normal users from viewing device information and
59970+ slabinfo information that could be useful for exploits.
59971+
59972+config GRKERNSEC_LINK
59973+ bool "Linking restrictions"
59974+ default y if GRKERNSEC_CONFIG_AUTO
59975+ help
59976+ If you say Y here, /tmp race exploits will be prevented, since users
59977+ will no longer be able to follow symlinks owned by other users in
59978+ world-writable +t directories (e.g. /tmp), unless the owner of the
59979+ symlink is the owner of the directory. users will also not be
59980+ able to hardlink to files they do not own. If the sysctl option is
59981+ enabled, a sysctl option with name "linking_restrictions" is created.
59982+
59983+config GRKERNSEC_SYMLINKOWN
59984+ bool "Kernel-enforced SymlinksIfOwnerMatch"
59985+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
59986+ help
59987+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
59988+ that prevents it from being used as a security feature. As Apache
59989+ verifies the symlink by performing a stat() against the target of
59990+ the symlink before it is followed, an attacker can setup a symlink
59991+ to point to a same-owned file, then replace the symlink with one
59992+ that targets another user's file just after Apache "validates" the
59993+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
59994+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
59995+ will be in place for the group you specify. If the sysctl option
59996+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
59997+ created.
59998+
59999+config GRKERNSEC_SYMLINKOWN_GID
60000+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
60001+ depends on GRKERNSEC_SYMLINKOWN
60002+ default 1006
60003+ help
60004+ Setting this GID determines what group kernel-enforced
60005+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
60006+ is enabled, a sysctl option with name "symlinkown_gid" is created.
60007+
60008+config GRKERNSEC_FIFO
60009+ bool "FIFO restrictions"
60010+ default y if GRKERNSEC_CONFIG_AUTO
60011+ help
60012+ If you say Y here, users will not be able to write to FIFOs they don't
60013+ own in world-writable +t directories (e.g. /tmp), unless the owner of
60014+ the FIFO is the same owner of the directory it's held in. If the sysctl
60015+ option is enabled, a sysctl option with name "fifo_restrictions" is
60016+ created.
60017+
60018+config GRKERNSEC_SYSFS_RESTRICT
60019+ bool "Sysfs/debugfs restriction"
60020+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
60021+ depends on SYSFS
60022+ help
60023+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
60024+ any filesystem normally mounted under it (e.g. debugfs) will be
60025+ mostly accessible only by root. These filesystems generally provide access
60026+ to hardware and debug information that isn't appropriate for unprivileged
60027+ users of the system. Sysfs and debugfs have also become a large source
60028+ of new vulnerabilities, ranging from infoleaks to local compromise.
60029+ There has been very little oversight with an eye toward security involved
60030+ in adding new exporters of information to these filesystems, so their
60031+ use is discouraged.
60032+ For reasons of compatibility, a few directories have been whitelisted
60033+ for access by non-root users:
60034+ /sys/fs/selinux
60035+ /sys/fs/fuse
60036+ /sys/devices/system/cpu
60037+
60038+config GRKERNSEC_ROFS
60039+ bool "Runtime read-only mount protection"
60040+ help
60041+ If you say Y here, a sysctl option with name "romount_protect" will
60042+ be created. By setting this option to 1 at runtime, filesystems
60043+ will be protected in the following ways:
60044+ * No new writable mounts will be allowed
60045+ * Existing read-only mounts won't be able to be remounted read/write
60046+ * Write operations will be denied on all block devices
60047+ This option acts independently of grsec_lock: once it is set to 1,
60048+ it cannot be turned off. Therefore, please be mindful of the resulting
60049+ behavior if this option is enabled in an init script on a read-only
60050+ filesystem. This feature is mainly intended for secure embedded systems.
60051+
60052+config GRKERNSEC_DEVICE_SIDECHANNEL
60053+ bool "Eliminate stat/notify-based device sidechannels"
60054+ default y if GRKERNSEC_CONFIG_AUTO
60055+ help
60056+ If you say Y here, timing analyses on block or character
60057+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
60058+ will be thwarted for unprivileged users. If a process without
60059+ CAP_MKNOD stats such a device, the last access and last modify times
60060+ will match the device's create time. No access or modify events
60061+ will be triggered through inotify/dnotify/fanotify for such devices.
60062+ This feature will prevent attacks that may at a minimum
60063+ allow an attacker to determine the administrator's password length.
60064+
60065+config GRKERNSEC_CHROOT
60066+ bool "Chroot jail restrictions"
60067+ default y if GRKERNSEC_CONFIG_AUTO
60068+ help
60069+ If you say Y here, you will be able to choose several options that will
60070+ make breaking out of a chrooted jail much more difficult. If you
60071+ encounter no software incompatibilities with the following options, it
60072+ is recommended that you enable each one.
60073+
60074+config GRKERNSEC_CHROOT_MOUNT
60075+ bool "Deny mounts"
60076+ default y if GRKERNSEC_CONFIG_AUTO
60077+ depends on GRKERNSEC_CHROOT
60078+ help
60079+ If you say Y here, processes inside a chroot will not be able to
60080+ mount or remount filesystems. If the sysctl option is enabled, a
60081+ sysctl option with name "chroot_deny_mount" is created.
60082+
60083+config GRKERNSEC_CHROOT_DOUBLE
60084+ bool "Deny double-chroots"
60085+ default y if GRKERNSEC_CONFIG_AUTO
60086+ depends on GRKERNSEC_CHROOT
60087+ help
60088+ If you say Y here, processes inside a chroot will not be able to chroot
60089+ again outside the chroot. This is a widely used method of breaking
60090+ out of a chroot jail and should not be allowed. If the sysctl
60091+ option is enabled, a sysctl option with name
60092+ "chroot_deny_chroot" is created.
60093+
60094+config GRKERNSEC_CHROOT_PIVOT
60095+ bool "Deny pivot_root in chroot"
60096+ default y if GRKERNSEC_CONFIG_AUTO
60097+ depends on GRKERNSEC_CHROOT
60098+ help
60099+ If you say Y here, processes inside a chroot will not be able to use
60100+ a function called pivot_root() that was introduced in Linux 2.3.41. It
60101+ works similar to chroot in that it changes the root filesystem. This
60102+ function could be misused in a chrooted process to attempt to break out
60103+ of the chroot, and therefore should not be allowed. If the sysctl
60104+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
60105+ created.
60106+
60107+config GRKERNSEC_CHROOT_CHDIR
60108+ bool "Enforce chdir(\"/\") on all chroots"
60109+ default y if GRKERNSEC_CONFIG_AUTO
60110+ depends on GRKERNSEC_CHROOT
60111+ help
60112+ If you say Y here, the current working directory of all newly-chrooted
60113+ applications will be set to the the root directory of the chroot.
60114+ The man page on chroot(2) states:
60115+ Note that this call does not change the current working
60116+ directory, so that `.' can be outside the tree rooted at
60117+ `/'. In particular, the super-user can escape from a
60118+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
60119+
60120+ It is recommended that you say Y here, since it's not known to break
60121+ any software. If the sysctl option is enabled, a sysctl option with
60122+ name "chroot_enforce_chdir" is created.
60123+
60124+config GRKERNSEC_CHROOT_CHMOD
60125+ bool "Deny (f)chmod +s"
60126+ default y if GRKERNSEC_CONFIG_AUTO
60127+ depends on GRKERNSEC_CHROOT
60128+ help
60129+ If you say Y here, processes inside a chroot will not be able to chmod
60130+ or fchmod files to make them have suid or sgid bits. This protects
60131+ against another published method of breaking a chroot. If the sysctl
60132+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
60133+ created.
60134+
60135+config GRKERNSEC_CHROOT_FCHDIR
60136+ bool "Deny fchdir out of chroot"
60137+ default y if GRKERNSEC_CONFIG_AUTO
60138+ depends on GRKERNSEC_CHROOT
60139+ help
60140+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
60141+ to a file descriptor of the chrooting process that points to a directory
60142+ outside the filesystem will be stopped. If the sysctl option
60143+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
60144+
60145+config GRKERNSEC_CHROOT_MKNOD
60146+ bool "Deny mknod"
60147+ default y if GRKERNSEC_CONFIG_AUTO
60148+ depends on GRKERNSEC_CHROOT
60149+ help
60150+ If you say Y here, processes inside a chroot will not be allowed to
60151+ mknod. The problem with using mknod inside a chroot is that it
60152+ would allow an attacker to create a device entry that is the same
60153+ as one on the physical root of your system, which could range from
60154+ anything from the console device to a device for your harddrive (which
60155+ they could then use to wipe the drive or steal data). It is recommended
60156+ that you say Y here, unless you run into software incompatibilities.
60157+ If the sysctl option is enabled, a sysctl option with name
60158+ "chroot_deny_mknod" is created.
60159+
60160+config GRKERNSEC_CHROOT_SHMAT
60161+ bool "Deny shmat() out of chroot"
60162+ default y if GRKERNSEC_CONFIG_AUTO
60163+ depends on GRKERNSEC_CHROOT
60164+ help
60165+ If you say Y here, processes inside a chroot will not be able to attach
60166+ to shared memory segments that were created outside of the chroot jail.
60167+ It is recommended that you say Y here. If the sysctl option is enabled,
60168+ a sysctl option with name "chroot_deny_shmat" is created.
60169+
60170+config GRKERNSEC_CHROOT_UNIX
60171+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
60172+ default y if GRKERNSEC_CONFIG_AUTO
60173+ depends on GRKERNSEC_CHROOT
60174+ help
60175+ If you say Y here, processes inside a chroot will not be able to
60176+ connect to abstract (meaning not belonging to a filesystem) Unix
60177+ domain sockets that were bound outside of a chroot. It is recommended
60178+ that you say Y here. If the sysctl option is enabled, a sysctl option
60179+ with name "chroot_deny_unix" is created.
60180+
60181+config GRKERNSEC_CHROOT_FINDTASK
60182+ bool "Protect outside processes"
60183+ default y if GRKERNSEC_CONFIG_AUTO
60184+ depends on GRKERNSEC_CHROOT
60185+ help
60186+ If you say Y here, processes inside a chroot will not be able to
60187+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
60188+ getsid, or view any process outside of the chroot. If the sysctl
60189+ option is enabled, a sysctl option with name "chroot_findtask" is
60190+ created.
60191+
60192+config GRKERNSEC_CHROOT_NICE
60193+ bool "Restrict priority changes"
60194+ default y if GRKERNSEC_CONFIG_AUTO
60195+ depends on GRKERNSEC_CHROOT
60196+ help
60197+ If you say Y here, processes inside a chroot will not be able to raise
60198+ the priority of processes in the chroot, or alter the priority of
60199+ processes outside the chroot. This provides more security than simply
60200+ removing CAP_SYS_NICE from the process' capability set. If the
60201+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
60202+ is created.
60203+
60204+config GRKERNSEC_CHROOT_SYSCTL
60205+ bool "Deny sysctl writes"
60206+ default y if GRKERNSEC_CONFIG_AUTO
60207+ depends on GRKERNSEC_CHROOT
60208+ help
60209+ If you say Y here, an attacker in a chroot will not be able to
60210+ write to sysctl entries, either by sysctl(2) or through a /proc
60211+ interface. It is strongly recommended that you say Y here. If the
60212+ sysctl option is enabled, a sysctl option with name
60213+ "chroot_deny_sysctl" is created.
60214+
60215+config GRKERNSEC_CHROOT_CAPS
60216+ bool "Capability restrictions"
60217+ default y if GRKERNSEC_CONFIG_AUTO
60218+ depends on GRKERNSEC_CHROOT
60219+ help
60220+ If you say Y here, the capabilities on all processes within a
60221+ chroot jail will be lowered to stop module insertion, raw i/o,
60222+ system and net admin tasks, rebooting the system, modifying immutable
60223+ files, modifying IPC owned by another, and changing the system time.
60224+ This is left an option because it can break some apps. Disable this
60225+ if your chrooted apps are having problems performing those kinds of
60226+ tasks. If the sysctl option is enabled, a sysctl option with
60227+ name "chroot_caps" is created.
60228+
60229+config GRKERNSEC_CHROOT_INITRD
60230+ bool "Exempt initrd tasks from restrictions"
60231+ default y if GRKERNSEC_CONFIG_AUTO
60232+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
60233+ help
60234+ If you say Y here, tasks started prior to init will be exempted from
60235+ grsecurity's chroot restrictions. This option is mainly meant to
60236+ resolve Plymouth's performing privileged operations unnecessarily
60237+ in a chroot.
60238+
60239+endmenu
60240+menu "Kernel Auditing"
60241+depends on GRKERNSEC
60242+
60243+config GRKERNSEC_AUDIT_GROUP
60244+ bool "Single group for auditing"
60245+ help
60246+ If you say Y here, the exec and chdir logging features will only operate
60247+ on a group you specify. This option is recommended if you only want to
60248+ watch certain users instead of having a large amount of logs from the
60249+ entire system. If the sysctl option is enabled, a sysctl option with
60250+ name "audit_group" is created.
60251+
60252+config GRKERNSEC_AUDIT_GID
60253+ int "GID for auditing"
60254+ depends on GRKERNSEC_AUDIT_GROUP
60255+ default 1007
60256+
60257+config GRKERNSEC_EXECLOG
60258+ bool "Exec logging"
60259+ help
60260+ If you say Y here, all execve() calls will be logged (since the
60261+ other exec*() calls are frontends to execve(), all execution
60262+ will be logged). Useful for shell-servers that like to keep track
60263+ of their users. If the sysctl option is enabled, a sysctl option with
60264+ name "exec_logging" is created.
60265+ WARNING: This option when enabled will produce a LOT of logs, especially
60266+ on an active system.
60267+
60268+config GRKERNSEC_RESLOG
60269+ bool "Resource logging"
60270+ default y if GRKERNSEC_CONFIG_AUTO
60271+ help
60272+ If you say Y here, all attempts to overstep resource limits will
60273+ be logged with the resource name, the requested size, and the current
60274+ limit. It is highly recommended that you say Y here. If the sysctl
60275+ option is enabled, a sysctl option with name "resource_logging" is
60276+ created. If the RBAC system is enabled, the sysctl value is ignored.
60277+
60278+config GRKERNSEC_CHROOT_EXECLOG
60279+ bool "Log execs within chroot"
60280+ help
60281+ If you say Y here, all executions inside a chroot jail will be logged
60282+ to syslog. This can cause a large amount of logs if certain
60283+ applications (eg. djb's daemontools) are installed on the system, and
60284+ is therefore left as an option. If the sysctl option is enabled, a
60285+ sysctl option with name "chroot_execlog" is created.
60286+
60287+config GRKERNSEC_AUDIT_PTRACE
60288+ bool "Ptrace logging"
60289+ help
60290+ If you say Y here, all attempts to attach to a process via ptrace
60291+ will be logged. If the sysctl option is enabled, a sysctl option
60292+ with name "audit_ptrace" is created.
60293+
60294+config GRKERNSEC_AUDIT_CHDIR
60295+ bool "Chdir logging"
60296+ help
60297+ If you say Y here, all chdir() calls will be logged. If the sysctl
60298+ option is enabled, a sysctl option with name "audit_chdir" is created.
60299+
60300+config GRKERNSEC_AUDIT_MOUNT
60301+ bool "(Un)Mount logging"
60302+ help
60303+ If you say Y here, all mounts and unmounts will be logged. If the
60304+ sysctl option is enabled, a sysctl option with name "audit_mount" is
60305+ created.
60306+
60307+config GRKERNSEC_SIGNAL
60308+ bool "Signal logging"
60309+ default y if GRKERNSEC_CONFIG_AUTO
60310+ help
60311+ If you say Y here, certain important signals will be logged, such as
60312+ SIGSEGV, which will as a result inform you of when a error in a program
60313+ occurred, which in some cases could mean a possible exploit attempt.
60314+ If the sysctl option is enabled, a sysctl option with name
60315+ "signal_logging" is created.
60316+
60317+config GRKERNSEC_FORKFAIL
60318+ bool "Fork failure logging"
60319+ help
60320+ If you say Y here, all failed fork() attempts will be logged.
60321+ This could suggest a fork bomb, or someone attempting to overstep
60322+ their process limit. If the sysctl option is enabled, a sysctl option
60323+ with name "forkfail_logging" is created.
60324+
60325+config GRKERNSEC_TIME
60326+ bool "Time change logging"
60327+ default y if GRKERNSEC_CONFIG_AUTO
60328+ help
60329+ If you say Y here, any changes of the system clock will be logged.
60330+ If the sysctl option is enabled, a sysctl option with name
60331+ "timechange_logging" is created.
60332+
60333+config GRKERNSEC_PROC_IPADDR
60334+ bool "/proc/<pid>/ipaddr support"
60335+ default y if GRKERNSEC_CONFIG_AUTO
60336+ help
60337+ If you say Y here, a new entry will be added to each /proc/<pid>
60338+ directory that contains the IP address of the person using the task.
60339+ The IP is carried across local TCP and AF_UNIX stream sockets.
60340+ This information can be useful for IDS/IPSes to perform remote response
60341+ to a local attack. The entry is readable by only the owner of the
60342+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
60343+ the RBAC system), and thus does not create privacy concerns.
60344+
60345+config GRKERNSEC_RWXMAP_LOG
60346+ bool 'Denied RWX mmap/mprotect logging'
60347+ default y if GRKERNSEC_CONFIG_AUTO
60348+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
60349+ help
60350+ If you say Y here, calls to mmap() and mprotect() with explicit
60351+ usage of PROT_WRITE and PROT_EXEC together will be logged when
60352+ denied by the PAX_MPROTECT feature. This feature will also
60353+ log other problematic scenarios that can occur when PAX_MPROTECT
60354+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
60355+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
60356+ is created.
60357+
60358+endmenu
60359+
60360+menu "Executable Protections"
60361+depends on GRKERNSEC
60362+
60363+config GRKERNSEC_DMESG
60364+ bool "Dmesg(8) restriction"
60365+ default y if GRKERNSEC_CONFIG_AUTO
60366+ help
60367+ If you say Y here, non-root users will not be able to use dmesg(8)
60368+ to view the contents of the kernel's circular log buffer.
60369+ The kernel's log buffer often contains kernel addresses and other
60370+ identifying information useful to an attacker in fingerprinting a
60371+ system for a targeted exploit.
60372+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
60373+ created.
60374+
60375+config GRKERNSEC_HARDEN_PTRACE
60376+ bool "Deter ptrace-based process snooping"
60377+ default y if GRKERNSEC_CONFIG_AUTO
60378+ help
60379+ If you say Y here, TTY sniffers and other malicious monitoring
60380+ programs implemented through ptrace will be defeated. If you
60381+ have been using the RBAC system, this option has already been
60382+ enabled for several years for all users, with the ability to make
60383+ fine-grained exceptions.
60384+
60385+ This option only affects the ability of non-root users to ptrace
60386+ processes that are not a descendent of the ptracing process.
60387+ This means that strace ./binary and gdb ./binary will still work,
60388+ but attaching to arbitrary processes will not. If the sysctl
60389+ option is enabled, a sysctl option with name "harden_ptrace" is
60390+ created.
60391+
60392+config GRKERNSEC_PTRACE_READEXEC
60393+ bool "Require read access to ptrace sensitive binaries"
60394+ default y if GRKERNSEC_CONFIG_AUTO
60395+ help
60396+ If you say Y here, unprivileged users will not be able to ptrace unreadable
60397+ binaries. This option is useful in environments that
60398+ remove the read bits (e.g. file mode 4711) from suid binaries to
60399+ prevent infoleaking of their contents. This option adds
60400+ consistency to the use of that file mode, as the binary could normally
60401+ be read out when run without privileges while ptracing.
60402+
60403+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
60404+ is created.
60405+
60406+config GRKERNSEC_SETXID
60407+ bool "Enforce consistent multithreaded privileges"
60408+ default y if GRKERNSEC_CONFIG_AUTO
60409+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
60410+ help
60411+ If you say Y here, a change from a root uid to a non-root uid
60412+ in a multithreaded application will cause the resulting uids,
60413+ gids, supplementary groups, and capabilities in that thread
60414+ to be propagated to the other threads of the process. In most
60415+ cases this is unnecessary, as glibc will emulate this behavior
60416+ on behalf of the application. Other libcs do not act in the
60417+ same way, allowing the other threads of the process to continue
60418+ running with root privileges. If the sysctl option is enabled,
60419+ a sysctl option with name "consistent_setxid" is created.
60420+
60421+config GRKERNSEC_TPE
60422+ bool "Trusted Path Execution (TPE)"
60423+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
60424+ help
60425+ If you say Y here, you will be able to choose a gid to add to the
60426+ supplementary groups of users you want to mark as "untrusted."
60427+ These users will not be able to execute any files that are not in
60428+ root-owned directories writable only by root. If the sysctl option
60429+ is enabled, a sysctl option with name "tpe" is created.
60430+
60431+config GRKERNSEC_TPE_ALL
60432+ bool "Partially restrict all non-root users"
60433+ depends on GRKERNSEC_TPE
60434+ help
60435+ If you say Y here, all non-root users will be covered under
60436+ a weaker TPE restriction. This is separate from, and in addition to,
60437+ the main TPE options that you have selected elsewhere. Thus, if a
60438+ "trusted" GID is chosen, this restriction applies to even that GID.
60439+ Under this restriction, all non-root users will only be allowed to
60440+ execute files in directories they own that are not group or
60441+ world-writable, or in directories owned by root and writable only by
60442+ root. If the sysctl option is enabled, a sysctl option with name
60443+ "tpe_restrict_all" is created.
60444+
60445+config GRKERNSEC_TPE_INVERT
60446+ bool "Invert GID option"
60447+ depends on GRKERNSEC_TPE
60448+ help
60449+ If you say Y here, the group you specify in the TPE configuration will
60450+ decide what group TPE restrictions will be *disabled* for. This
60451+ option is useful if you want TPE restrictions to be applied to most
60452+ users on the system. If the sysctl option is enabled, a sysctl option
60453+ with name "tpe_invert" is created. Unlike other sysctl options, this
60454+ entry will default to on for backward-compatibility.
60455+
60456+config GRKERNSEC_TPE_GID
60457+ int
60458+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
60459+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
60460+
60461+config GRKERNSEC_TPE_UNTRUSTED_GID
60462+ int "GID for TPE-untrusted users"
60463+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
60464+ default 1005
60465+ help
60466+ Setting this GID determines what group TPE restrictions will be
60467+ *enabled* for. If the sysctl option is enabled, a sysctl option
60468+ with name "tpe_gid" is created.
60469+
60470+config GRKERNSEC_TPE_TRUSTED_GID
60471+ int "GID for TPE-trusted users"
60472+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
60473+ default 1005
60474+ help
60475+ Setting this GID determines what group TPE restrictions will be
60476+ *disabled* for. If the sysctl option is enabled, a sysctl option
60477+ with name "tpe_gid" is created.
60478+
60479+endmenu
60480+menu "Network Protections"
60481+depends on GRKERNSEC
60482+
60483+config GRKERNSEC_RANDNET
60484+ bool "Larger entropy pools"
60485+ default y if GRKERNSEC_CONFIG_AUTO
60486+ help
60487+ If you say Y here, the entropy pools used for many features of Linux
60488+ and grsecurity will be doubled in size. Since several grsecurity
60489+ features use additional randomness, it is recommended that you say Y
60490+ here. Saying Y here has a similar effect as modifying
60491+ /proc/sys/kernel/random/poolsize.
60492+
60493+config GRKERNSEC_BLACKHOLE
60494+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
60495+ default y if GRKERNSEC_CONFIG_AUTO
60496+ depends on NET
60497+ help
60498+ If you say Y here, neither TCP resets nor ICMP
60499+ destination-unreachable packets will be sent in response to packets
60500+ sent to ports for which no associated listening process exists.
60501+ This feature supports both IPV4 and IPV6 and exempts the
60502+ loopback interface from blackholing. Enabling this feature
60503+ makes a host more resilient to DoS attacks and reduces network
60504+ visibility against scanners.
60505+
60506+ The blackhole feature as-implemented is equivalent to the FreeBSD
60507+ blackhole feature, as it prevents RST responses to all packets, not
60508+ just SYNs. Under most application behavior this causes no
60509+ problems, but applications (like haproxy) may not close certain
60510+ connections in a way that cleanly terminates them on the remote
60511+ end, leaving the remote host in LAST_ACK state. Because of this
60512+ side-effect and to prevent intentional LAST_ACK DoSes, this
60513+ feature also adds automatic mitigation against such attacks.
60514+ The mitigation drastically reduces the amount of time a socket
60515+ can spend in LAST_ACK state. If you're using haproxy and not
60516+ all servers it connects to have this option enabled, consider
60517+ disabling this feature on the haproxy host.
60518+
60519+ If the sysctl option is enabled, two sysctl options with names
60520+ "ip_blackhole" and "lastack_retries" will be created.
60521+ While "ip_blackhole" takes the standard zero/non-zero on/off
60522+ toggle, "lastack_retries" uses the same kinds of values as
60523+ "tcp_retries1" and "tcp_retries2". The default value of 4
60524+ prevents a socket from lasting more than 45 seconds in LAST_ACK
60525+ state.
60526+
60527+config GRKERNSEC_NO_SIMULT_CONNECT
60528+ bool "Disable TCP Simultaneous Connect"
60529+ default y if GRKERNSEC_CONFIG_AUTO
60530+ depends on NET
60531+ help
60532+ If you say Y here, a feature by Willy Tarreau will be enabled that
60533+ removes a weakness in Linux's strict implementation of TCP that
60534+ allows two clients to connect to each other without either entering
60535+ a listening state. The weakness allows an attacker to easily prevent
60536+ a client from connecting to a known server provided the source port
60537+ for the connection is guessed correctly.
60538+
60539+ As the weakness could be used to prevent an antivirus or IPS from
60540+ fetching updates, or prevent an SSL gateway from fetching a CRL,
60541+ it should be eliminated by enabling this option. Though Linux is
60542+ one of few operating systems supporting simultaneous connect, it
60543+ has no legitimate use in practice and is rarely supported by firewalls.
60544+
60545+config GRKERNSEC_SOCKET
60546+ bool "Socket restrictions"
60547+ depends on NET
60548+ help
60549+ If you say Y here, you will be able to choose from several options.
60550+ If you assign a GID on your system and add it to the supplementary
60551+ groups of users you want to restrict socket access to, this patch
60552+ will perform up to three things, based on the option(s) you choose.
60553+
60554+config GRKERNSEC_SOCKET_ALL
60555+ bool "Deny any sockets to group"
60556+ depends on GRKERNSEC_SOCKET
60557+ help
60558+ If you say Y here, you will be able to choose a GID of whose users will
60559+ be unable to connect to other hosts from your machine or run server
60560+ applications from your machine. If the sysctl option is enabled, a
60561+ sysctl option with name "socket_all" is created.
60562+
60563+config GRKERNSEC_SOCKET_ALL_GID
60564+ int "GID to deny all sockets for"
60565+ depends on GRKERNSEC_SOCKET_ALL
60566+ default 1004
60567+ help
60568+ Here you can choose the GID to disable socket access for. Remember to
60569+ add the users you want socket access disabled for to the GID
60570+ specified here. If the sysctl option is enabled, a sysctl option
60571+ with name "socket_all_gid" is created.
60572+
60573+config GRKERNSEC_SOCKET_CLIENT
60574+ bool "Deny client sockets to group"
60575+ depends on GRKERNSEC_SOCKET
60576+ help
60577+ If you say Y here, you will be able to choose a GID of whose users will
60578+ be unable to connect to other hosts from your machine, but will be
60579+ able to run servers. If this option is enabled, all users in the group
60580+ you specify will have to use passive mode when initiating ftp transfers
60581+ from the shell on your machine. If the sysctl option is enabled, a
60582+ sysctl option with name "socket_client" is created.
60583+
60584+config GRKERNSEC_SOCKET_CLIENT_GID
60585+ int "GID to deny client sockets for"
60586+ depends on GRKERNSEC_SOCKET_CLIENT
60587+ default 1003
60588+ help
60589+ Here you can choose the GID to disable client socket access for.
60590+ Remember to add the users you want client socket access disabled for to
60591+ the GID specified here. If the sysctl option is enabled, a sysctl
60592+ option with name "socket_client_gid" is created.
60593+
60594+config GRKERNSEC_SOCKET_SERVER
60595+ bool "Deny server sockets to group"
60596+ depends on GRKERNSEC_SOCKET
60597+ help
60598+ If you say Y here, you will be able to choose a GID of whose users will
60599+ be unable to run server applications from your machine. If the sysctl
60600+ option is enabled, a sysctl option with name "socket_server" is created.
60601+
60602+config GRKERNSEC_SOCKET_SERVER_GID
60603+ int "GID to deny server sockets for"
60604+ depends on GRKERNSEC_SOCKET_SERVER
60605+ default 1002
60606+ help
60607+ Here you can choose the GID to disable server socket access for.
60608+ Remember to add the users you want server socket access disabled for to
60609+ the GID specified here. If the sysctl option is enabled, a sysctl
60610+ option with name "socket_server_gid" is created.
60611+
60612+endmenu
60613+menu "Sysctl Support"
60614+depends on GRKERNSEC && SYSCTL
60615+
60616+config GRKERNSEC_SYSCTL
60617+ bool "Sysctl support"
60618+ default y if GRKERNSEC_CONFIG_AUTO
60619+ help
60620+ If you say Y here, you will be able to change the options that
60621+ grsecurity runs with at bootup, without having to recompile your
60622+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
60623+ to enable (1) or disable (0) various features. All the sysctl entries
60624+ are mutable until the "grsec_lock" entry is set to a non-zero value.
60625+ All features enabled in the kernel configuration are disabled at boot
60626+ if you do not say Y to the "Turn on features by default" option.
60627+ All options should be set at startup, and the grsec_lock entry should
60628+ be set to a non-zero value after all the options are set.
60629+ *THIS IS EXTREMELY IMPORTANT*
60630+
60631+config GRKERNSEC_SYSCTL_DISTRO
60632+ bool "Extra sysctl support for distro makers (READ HELP)"
60633+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
60634+ help
60635+ If you say Y here, additional sysctl options will be created
60636+ for features that affect processes running as root. Therefore,
60637+ it is critical when using this option that the grsec_lock entry be
60638+ enabled after boot. Only distros with prebuilt kernel packages
60639+ with this option enabled that can ensure grsec_lock is enabled
60640+ after boot should use this option.
60641+ *Failure to set grsec_lock after boot makes all grsec features
60642+ this option covers useless*
60643+
60644+ Currently this option creates the following sysctl entries:
60645+ "Disable Privileged I/O": "disable_priv_io"
60646+
60647+config GRKERNSEC_SYSCTL_ON
60648+ bool "Turn on features by default"
60649+ default y if GRKERNSEC_CONFIG_AUTO
60650+ depends on GRKERNSEC_SYSCTL
60651+ help
60652+ If you say Y here, instead of having all features enabled in the
60653+ kernel configuration disabled at boot time, the features will be
60654+ enabled at boot time. It is recommended you say Y here unless
60655+ there is some reason you would want all sysctl-tunable features to
60656+ be disabled by default. As mentioned elsewhere, it is important
60657+ to enable the grsec_lock entry once you have finished modifying
60658+ the sysctl entries.
60659+
60660+endmenu
60661+menu "Logging Options"
60662+depends on GRKERNSEC
60663+
60664+config GRKERNSEC_FLOODTIME
60665+ int "Seconds in between log messages (minimum)"
60666+ default 10
60667+ help
60668+ This option allows you to enforce the number of seconds between
60669+ grsecurity log messages. The default should be suitable for most
60670+ people, however, if you choose to change it, choose a value small enough
60671+ to allow informative logs to be produced, but large enough to
60672+ prevent flooding.
60673+
60674+config GRKERNSEC_FLOODBURST
60675+ int "Number of messages in a burst (maximum)"
60676+ default 6
60677+ help
60678+ This option allows you to choose the maximum number of messages allowed
60679+ within the flood time interval you chose in a separate option. The
60680+ default should be suitable for most people, however if you find that
60681+ many of your logs are being interpreted as flooding, you may want to
60682+ raise this value.
60683+
60684+endmenu
60685diff --git a/grsecurity/Makefile b/grsecurity/Makefile
60686new file mode 100644
60687index 0000000..36845aa
60688--- /dev/null
60689+++ b/grsecurity/Makefile
60690@@ -0,0 +1,42 @@
60691+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
60692+# during 2001-2009 it has been completely redesigned by Brad Spengler
60693+# into an RBAC system
60694+#
60695+# All code in this directory and various hooks inserted throughout the kernel
60696+# are copyright Brad Spengler - Open Source Security, Inc., and released
60697+# under the GPL v2 or higher
60698+
60699+KBUILD_CFLAGS += -Werror
60700+
60701+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
60702+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
60703+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
60704+
60705+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
60706+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
60707+ gracl_learn.o grsec_log.o
60708+ifdef CONFIG_COMPAT
60709+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
60710+endif
60711+
60712+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
60713+
60714+ifdef CONFIG_NET
60715+obj-y += grsec_sock.o
60716+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
60717+endif
60718+
60719+ifndef CONFIG_GRKERNSEC
60720+obj-y += grsec_disabled.o
60721+endif
60722+
60723+ifdef CONFIG_GRKERNSEC_HIDESYM
60724+extra-y := grsec_hidesym.o
60725+$(obj)/grsec_hidesym.o:
60726+ @-chmod -f 500 /boot
60727+ @-chmod -f 500 /lib/modules
60728+ @-chmod -f 500 /lib64/modules
60729+ @-chmod -f 500 /lib32/modules
60730+ @-chmod -f 700 .
60731+ @echo ' grsec: protected kernel image paths'
60732+endif
60733diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
60734new file mode 100644
60735index 0000000..c0793fd
60736--- /dev/null
60737+++ b/grsecurity/gracl.c
60738@@ -0,0 +1,4178 @@
60739+#include <linux/kernel.h>
60740+#include <linux/module.h>
60741+#include <linux/sched.h>
60742+#include <linux/mm.h>
60743+#include <linux/file.h>
60744+#include <linux/fs.h>
60745+#include <linux/namei.h>
60746+#include <linux/mount.h>
60747+#include <linux/tty.h>
60748+#include <linux/proc_fs.h>
60749+#include <linux/lglock.h>
60750+#include <linux/slab.h>
60751+#include <linux/vmalloc.h>
60752+#include <linux/types.h>
60753+#include <linux/sysctl.h>
60754+#include <linux/netdevice.h>
60755+#include <linux/ptrace.h>
60756+#include <linux/gracl.h>
60757+#include <linux/gralloc.h>
60758+#include <linux/security.h>
60759+#include <linux/grinternal.h>
60760+#include <linux/pid_namespace.h>
60761+#include <linux/stop_machine.h>
60762+#include <linux/fdtable.h>
60763+#include <linux/percpu.h>
60764+#include <linux/lglock.h>
60765+#include <linux/hugetlb.h>
60766+#include <linux/posix-timers.h>
60767+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
60768+#include <linux/magic.h>
60769+#include <linux/pagemap.h>
60770+#include "../fs/btrfs/async-thread.h"
60771+#include "../fs/btrfs/ctree.h"
60772+#include "../fs/btrfs/btrfs_inode.h"
60773+#endif
60774+#include "../fs/mount.h"
60775+
60776+#include <asm/uaccess.h>
60777+#include <asm/errno.h>
60778+#include <asm/mman.h>
60779+
60780+extern struct lglock vfsmount_lock;
60781+
60782+static struct acl_role_db acl_role_set;
60783+static struct name_db name_set;
60784+static struct inodev_db inodev_set;
60785+
60786+/* for keeping track of userspace pointers used for subjects, so we
60787+ can share references in the kernel as well
60788+*/
60789+
60790+static struct path real_root;
60791+
60792+static struct acl_subj_map_db subj_map_set;
60793+
60794+static struct acl_role_label *default_role;
60795+
60796+static struct acl_role_label *role_list;
60797+
60798+static u16 acl_sp_role_value;
60799+
60800+extern char *gr_shared_page[4];
60801+static DEFINE_MUTEX(gr_dev_mutex);
60802+DEFINE_RWLOCK(gr_inode_lock);
60803+
60804+struct gr_arg *gr_usermode;
60805+
60806+static unsigned int gr_status __read_only = GR_STATUS_INIT;
60807+
60808+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
60809+extern void gr_clear_learn_entries(void);
60810+
60811+unsigned char *gr_system_salt;
60812+unsigned char *gr_system_sum;
60813+
60814+static struct sprole_pw **acl_special_roles = NULL;
60815+static __u16 num_sprole_pws = 0;
60816+
60817+static struct acl_role_label *kernel_role = NULL;
60818+
60819+static unsigned int gr_auth_attempts = 0;
60820+static unsigned long gr_auth_expires = 0UL;
60821+
60822+#ifdef CONFIG_NET
60823+extern struct vfsmount *sock_mnt;
60824+#endif
60825+
60826+extern struct vfsmount *pipe_mnt;
60827+extern struct vfsmount *shm_mnt;
60828+
60829+#ifdef CONFIG_HUGETLBFS
60830+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60831+#endif
60832+
60833+static struct acl_object_label *fakefs_obj_rw;
60834+static struct acl_object_label *fakefs_obj_rwx;
60835+
60836+extern int gr_init_uidset(void);
60837+extern void gr_free_uidset(void);
60838+extern void gr_remove_uid(uid_t uid);
60839+extern int gr_find_uid(uid_t uid);
60840+
60841+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
60842+{
60843+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
60844+ return -EFAULT;
60845+
60846+ return 0;
60847+}
60848+
60849+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
60850+{
60851+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
60852+ return -EFAULT;
60853+
60854+ return 0;
60855+}
60856+
60857+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
60858+{
60859+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
60860+ return -EFAULT;
60861+
60862+ return 0;
60863+}
60864+
60865+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
60866+{
60867+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
60868+ return -EFAULT;
60869+
60870+ return 0;
60871+}
60872+
60873+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
60874+{
60875+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
60876+ return -EFAULT;
60877+
60878+ return 0;
60879+}
60880+
60881+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
60882+{
60883+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
60884+ return -EFAULT;
60885+
60886+ return 0;
60887+}
60888+
60889+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
60890+{
60891+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
60892+ return -EFAULT;
60893+
60894+ return 0;
60895+}
60896+
60897+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
60898+{
60899+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
60900+ return -EFAULT;
60901+
60902+ return 0;
60903+}
60904+
60905+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
60906+{
60907+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
60908+ return -EFAULT;
60909+
60910+ return 0;
60911+}
60912+
60913+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
60914+{
60915+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
60916+ return -EFAULT;
60917+
60918+ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
60919+ return -EINVAL;
60920+
60921+ return 0;
60922+}
60923+
60924+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
60925+{
60926+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
60927+ return -EFAULT;
60928+
60929+ return 0;
60930+}
60931+
60932+static size_t get_gr_arg_wrapper_size_normal(void)
60933+{
60934+ return sizeof(struct gr_arg_wrapper);
60935+}
60936+
60937+#ifdef CONFIG_COMPAT
60938+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
60939+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
60940+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
60941+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
60942+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
60943+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
60944+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
60945+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
60946+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
60947+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
60948+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
60949+extern size_t get_gr_arg_wrapper_size_compat(void);
60950+
60951+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
60952+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
60953+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
60954+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
60955+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
60956+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
60957+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
60958+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
60959+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
60960+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
60961+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
60962+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
60963+
60964+#else
60965+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
60966+#define copy_gr_arg copy_gr_arg_normal
60967+#define copy_gr_hash_struct copy_gr_hash_struct_normal
60968+#define copy_acl_object_label copy_acl_object_label_normal
60969+#define copy_acl_subject_label copy_acl_subject_label_normal
60970+#define copy_acl_role_label copy_acl_role_label_normal
60971+#define copy_acl_ip_label copy_acl_ip_label_normal
60972+#define copy_pointer_from_array copy_pointer_from_array_normal
60973+#define copy_sprole_pw copy_sprole_pw_normal
60974+#define copy_role_transition copy_role_transition_normal
60975+#define copy_role_allowed_ip copy_role_allowed_ip_normal
60976+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
60977+#endif
60978+
60979+__inline__ int
60980+gr_acl_is_enabled(void)
60981+{
60982+ return (gr_status & GR_READY);
60983+}
60984+
60985+static inline dev_t __get_dev(const struct dentry *dentry)
60986+{
60987+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
60988+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
60989+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
60990+ else
60991+#endif
60992+ return dentry->d_sb->s_dev;
60993+}
60994+
60995+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
60996+{
60997+ return __get_dev(dentry);
60998+}
60999+
61000+static char gr_task_roletype_to_char(struct task_struct *task)
61001+{
61002+ switch (task->role->roletype &
61003+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
61004+ GR_ROLE_SPECIAL)) {
61005+ case GR_ROLE_DEFAULT:
61006+ return 'D';
61007+ case GR_ROLE_USER:
61008+ return 'U';
61009+ case GR_ROLE_GROUP:
61010+ return 'G';
61011+ case GR_ROLE_SPECIAL:
61012+ return 'S';
61013+ }
61014+
61015+ return 'X';
61016+}
61017+
61018+char gr_roletype_to_char(void)
61019+{
61020+ return gr_task_roletype_to_char(current);
61021+}
61022+
61023+__inline__ int
61024+gr_acl_tpe_check(void)
61025+{
61026+ if (unlikely(!(gr_status & GR_READY)))
61027+ return 0;
61028+ if (current->role->roletype & GR_ROLE_TPE)
61029+ return 1;
61030+ else
61031+ return 0;
61032+}
61033+
61034+int
61035+gr_handle_rawio(const struct inode *inode)
61036+{
61037+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61038+ if (inode && S_ISBLK(inode->i_mode) &&
61039+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61040+ !capable(CAP_SYS_RAWIO))
61041+ return 1;
61042+#endif
61043+ return 0;
61044+}
61045+
61046+static int
61047+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
61048+{
61049+ if (likely(lena != lenb))
61050+ return 0;
61051+
61052+ return !memcmp(a, b, lena);
61053+}
61054+
61055+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
61056+{
61057+ *buflen -= namelen;
61058+ if (*buflen < 0)
61059+ return -ENAMETOOLONG;
61060+ *buffer -= namelen;
61061+ memcpy(*buffer, str, namelen);
61062+ return 0;
61063+}
61064+
61065+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
61066+{
61067+ return prepend(buffer, buflen, name->name, name->len);
61068+}
61069+
61070+static int prepend_path(const struct path *path, struct path *root,
61071+ char **buffer, int *buflen)
61072+{
61073+ struct dentry *dentry = path->dentry;
61074+ struct vfsmount *vfsmnt = path->mnt;
61075+ struct mount *mnt = real_mount(vfsmnt);
61076+ bool slash = false;
61077+ int error = 0;
61078+
61079+ while (dentry != root->dentry || vfsmnt != root->mnt) {
61080+ struct dentry * parent;
61081+
61082+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
61083+ /* Global root? */
61084+ if (!mnt_has_parent(mnt)) {
61085+ goto out;
61086+ }
61087+ dentry = mnt->mnt_mountpoint;
61088+ mnt = mnt->mnt_parent;
61089+ vfsmnt = &mnt->mnt;
61090+ continue;
61091+ }
61092+ parent = dentry->d_parent;
61093+ prefetch(parent);
61094+ spin_lock(&dentry->d_lock);
61095+ error = prepend_name(buffer, buflen, &dentry->d_name);
61096+ spin_unlock(&dentry->d_lock);
61097+ if (!error)
61098+ error = prepend(buffer, buflen, "/", 1);
61099+ if (error)
61100+ break;
61101+
61102+ slash = true;
61103+ dentry = parent;
61104+ }
61105+
61106+out:
61107+ if (!error && !slash)
61108+ error = prepend(buffer, buflen, "/", 1);
61109+
61110+ return error;
61111+}
61112+
61113+/* this must be called with vfsmount_lock and rename_lock held */
61114+
61115+static char *__our_d_path(const struct path *path, struct path *root,
61116+ char *buf, int buflen)
61117+{
61118+ char *res = buf + buflen;
61119+ int error;
61120+
61121+ prepend(&res, &buflen, "\0", 1);
61122+ error = prepend_path(path, root, &res, &buflen);
61123+ if (error)
61124+ return ERR_PTR(error);
61125+
61126+ return res;
61127+}
61128+
61129+static char *
61130+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
61131+{
61132+ char *retval;
61133+
61134+ retval = __our_d_path(path, root, buf, buflen);
61135+ if (unlikely(IS_ERR(retval)))
61136+ retval = strcpy(buf, "<path too long>");
61137+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
61138+ retval[1] = '\0';
61139+
61140+ return retval;
61141+}
61142+
61143+static char *
61144+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
61145+ char *buf, int buflen)
61146+{
61147+ struct path path;
61148+ char *res;
61149+
61150+ path.dentry = (struct dentry *)dentry;
61151+ path.mnt = (struct vfsmount *)vfsmnt;
61152+
61153+ /* we can use real_root.dentry, real_root.mnt, because this is only called
61154+ by the RBAC system */
61155+ res = gen_full_path(&path, &real_root, buf, buflen);
61156+
61157+ return res;
61158+}
61159+
61160+static char *
61161+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
61162+ char *buf, int buflen)
61163+{
61164+ char *res;
61165+ struct path path;
61166+ struct path root;
61167+ struct task_struct *reaper = init_pid_ns.child_reaper;
61168+
61169+ path.dentry = (struct dentry *)dentry;
61170+ path.mnt = (struct vfsmount *)vfsmnt;
61171+
61172+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
61173+ get_fs_root(reaper->fs, &root);
61174+
61175+ br_read_lock(&vfsmount_lock);
61176+ write_seqlock(&rename_lock);
61177+ res = gen_full_path(&path, &root, buf, buflen);
61178+ write_sequnlock(&rename_lock);
61179+ br_read_unlock(&vfsmount_lock);
61180+
61181+ path_put(&root);
61182+ return res;
61183+}
61184+
61185+static char *
61186+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
61187+{
61188+ char *ret;
61189+ br_read_lock(&vfsmount_lock);
61190+ write_seqlock(&rename_lock);
61191+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
61192+ PAGE_SIZE);
61193+ write_sequnlock(&rename_lock);
61194+ br_read_unlock(&vfsmount_lock);
61195+ return ret;
61196+}
61197+
61198+static char *
61199+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
61200+{
61201+ char *ret;
61202+ char *buf;
61203+ int buflen;
61204+
61205+ br_read_lock(&vfsmount_lock);
61206+ write_seqlock(&rename_lock);
61207+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
61208+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
61209+ buflen = (int)(ret - buf);
61210+ if (buflen >= 5)
61211+ prepend(&ret, &buflen, "/proc", 5);
61212+ else
61213+ ret = strcpy(buf, "<path too long>");
61214+ write_sequnlock(&rename_lock);
61215+ br_read_unlock(&vfsmount_lock);
61216+ return ret;
61217+}
61218+
61219+char *
61220+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
61221+{
61222+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
61223+ PAGE_SIZE);
61224+}
61225+
61226+char *
61227+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
61228+{
61229+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61230+ PAGE_SIZE);
61231+}
61232+
61233+char *
61234+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
61235+{
61236+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
61237+ PAGE_SIZE);
61238+}
61239+
61240+char *
61241+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
61242+{
61243+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
61244+ PAGE_SIZE);
61245+}
61246+
61247+char *
61248+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
61249+{
61250+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
61251+ PAGE_SIZE);
61252+}
61253+
61254+__inline__ __u32
61255+to_gr_audit(const __u32 reqmode)
61256+{
61257+ /* masks off auditable permission flags, then shifts them to create
61258+ auditing flags, and adds the special case of append auditing if
61259+ we're requesting write */
61260+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
61261+}
61262+
61263+struct acl_subject_label *
61264+lookup_subject_map(const struct acl_subject_label *userp)
61265+{
61266+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
61267+ struct subject_map *match;
61268+
61269+ match = subj_map_set.s_hash[index];
61270+
61271+ while (match && match->user != userp)
61272+ match = match->next;
61273+
61274+ if (match != NULL)
61275+ return match->kernel;
61276+ else
61277+ return NULL;
61278+}
61279+
61280+static void
61281+insert_subj_map_entry(struct subject_map *subjmap)
61282+{
61283+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
61284+ struct subject_map **curr;
61285+
61286+ subjmap->prev = NULL;
61287+
61288+ curr = &subj_map_set.s_hash[index];
61289+ if (*curr != NULL)
61290+ (*curr)->prev = subjmap;
61291+
61292+ subjmap->next = *curr;
61293+ *curr = subjmap;
61294+
61295+ return;
61296+}
61297+
61298+static struct acl_role_label *
61299+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
61300+ const gid_t gid)
61301+{
61302+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
61303+ struct acl_role_label *match;
61304+ struct role_allowed_ip *ipp;
61305+ unsigned int x;
61306+ u32 curr_ip = task->signal->curr_ip;
61307+
61308+ task->signal->saved_ip = curr_ip;
61309+
61310+ match = acl_role_set.r_hash[index];
61311+
61312+ while (match) {
61313+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
61314+ for (x = 0; x < match->domain_child_num; x++) {
61315+ if (match->domain_children[x] == uid)
61316+ goto found;
61317+ }
61318+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
61319+ break;
61320+ match = match->next;
61321+ }
61322+found:
61323+ if (match == NULL) {
61324+ try_group:
61325+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
61326+ match = acl_role_set.r_hash[index];
61327+
61328+ while (match) {
61329+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
61330+ for (x = 0; x < match->domain_child_num; x++) {
61331+ if (match->domain_children[x] == gid)
61332+ goto found2;
61333+ }
61334+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
61335+ break;
61336+ match = match->next;
61337+ }
61338+found2:
61339+ if (match == NULL)
61340+ match = default_role;
61341+ if (match->allowed_ips == NULL)
61342+ return match;
61343+ else {
61344+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
61345+ if (likely
61346+ ((ntohl(curr_ip) & ipp->netmask) ==
61347+ (ntohl(ipp->addr) & ipp->netmask)))
61348+ return match;
61349+ }
61350+ match = default_role;
61351+ }
61352+ } else if (match->allowed_ips == NULL) {
61353+ return match;
61354+ } else {
61355+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
61356+ if (likely
61357+ ((ntohl(curr_ip) & ipp->netmask) ==
61358+ (ntohl(ipp->addr) & ipp->netmask)))
61359+ return match;
61360+ }
61361+ goto try_group;
61362+ }
61363+
61364+ return match;
61365+}
61366+
61367+struct acl_subject_label *
61368+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
61369+ const struct acl_role_label *role)
61370+{
61371+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
61372+ struct acl_subject_label *match;
61373+
61374+ match = role->subj_hash[index];
61375+
61376+ while (match && (match->inode != ino || match->device != dev ||
61377+ (match->mode & GR_DELETED))) {
61378+ match = match->next;
61379+ }
61380+
61381+ if (match && !(match->mode & GR_DELETED))
61382+ return match;
61383+ else
61384+ return NULL;
61385+}
61386+
61387+struct acl_subject_label *
61388+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
61389+ const struct acl_role_label *role)
61390+{
61391+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
61392+ struct acl_subject_label *match;
61393+
61394+ match = role->subj_hash[index];
61395+
61396+ while (match && (match->inode != ino || match->device != dev ||
61397+ !(match->mode & GR_DELETED))) {
61398+ match = match->next;
61399+ }
61400+
61401+ if (match && (match->mode & GR_DELETED))
61402+ return match;
61403+ else
61404+ return NULL;
61405+}
61406+
61407+static struct acl_object_label *
61408+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
61409+ const struct acl_subject_label *subj)
61410+{
61411+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
61412+ struct acl_object_label *match;
61413+
61414+ match = subj->obj_hash[index];
61415+
61416+ while (match && (match->inode != ino || match->device != dev ||
61417+ (match->mode & GR_DELETED))) {
61418+ match = match->next;
61419+ }
61420+
61421+ if (match && !(match->mode & GR_DELETED))
61422+ return match;
61423+ else
61424+ return NULL;
61425+}
61426+
61427+static struct acl_object_label *
61428+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
61429+ const struct acl_subject_label *subj)
61430+{
61431+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
61432+ struct acl_object_label *match;
61433+
61434+ match = subj->obj_hash[index];
61435+
61436+ while (match && (match->inode != ino || match->device != dev ||
61437+ !(match->mode & GR_DELETED))) {
61438+ match = match->next;
61439+ }
61440+
61441+ if (match && (match->mode & GR_DELETED))
61442+ return match;
61443+
61444+ match = subj->obj_hash[index];
61445+
61446+ while (match && (match->inode != ino || match->device != dev ||
61447+ (match->mode & GR_DELETED))) {
61448+ match = match->next;
61449+ }
61450+
61451+ if (match && !(match->mode & GR_DELETED))
61452+ return match;
61453+ else
61454+ return NULL;
61455+}
61456+
61457+static struct name_entry *
61458+lookup_name_entry(const char *name)
61459+{
61460+ unsigned int len = strlen(name);
61461+ unsigned int key = full_name_hash(name, len);
61462+ unsigned int index = key % name_set.n_size;
61463+ struct name_entry *match;
61464+
61465+ match = name_set.n_hash[index];
61466+
61467+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
61468+ match = match->next;
61469+
61470+ return match;
61471+}
61472+
61473+static struct name_entry *
61474+lookup_name_entry_create(const char *name)
61475+{
61476+ unsigned int len = strlen(name);
61477+ unsigned int key = full_name_hash(name, len);
61478+ unsigned int index = key % name_set.n_size;
61479+ struct name_entry *match;
61480+
61481+ match = name_set.n_hash[index];
61482+
61483+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
61484+ !match->deleted))
61485+ match = match->next;
61486+
61487+ if (match && match->deleted)
61488+ return match;
61489+
61490+ match = name_set.n_hash[index];
61491+
61492+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
61493+ match->deleted))
61494+ match = match->next;
61495+
61496+ if (match && !match->deleted)
61497+ return match;
61498+ else
61499+ return NULL;
61500+}
61501+
61502+static struct inodev_entry *
61503+lookup_inodev_entry(const ino_t ino, const dev_t dev)
61504+{
61505+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
61506+ struct inodev_entry *match;
61507+
61508+ match = inodev_set.i_hash[index];
61509+
61510+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
61511+ match = match->next;
61512+
61513+ return match;
61514+}
61515+
61516+static void
61517+insert_inodev_entry(struct inodev_entry *entry)
61518+{
61519+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
61520+ inodev_set.i_size);
61521+ struct inodev_entry **curr;
61522+
61523+ entry->prev = NULL;
61524+
61525+ curr = &inodev_set.i_hash[index];
61526+ if (*curr != NULL)
61527+ (*curr)->prev = entry;
61528+
61529+ entry->next = *curr;
61530+ *curr = entry;
61531+
61532+ return;
61533+}
61534+
61535+static void
61536+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
61537+{
61538+ unsigned int index =
61539+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
61540+ struct acl_role_label **curr;
61541+ struct acl_role_label *tmp, *tmp2;
61542+
61543+ curr = &acl_role_set.r_hash[index];
61544+
61545+ /* simple case, slot is empty, just set it to our role */
61546+ if (*curr == NULL) {
61547+ *curr = role;
61548+ } else {
61549+ /* example:
61550+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
61551+ 2 -> 3
61552+ */
61553+ /* first check to see if we can already be reached via this slot */
61554+ tmp = *curr;
61555+ while (tmp && tmp != role)
61556+ tmp = tmp->next;
61557+ if (tmp == role) {
61558+ /* we don't need to add ourselves to this slot's chain */
61559+ return;
61560+ }
61561+ /* we need to add ourselves to this chain, two cases */
61562+ if (role->next == NULL) {
61563+ /* simple case, append the current chain to our role */
61564+ role->next = *curr;
61565+ *curr = role;
61566+ } else {
61567+ /* 1 -> 2 -> 3 -> 4
61568+ 2 -> 3 -> 4
61569+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
61570+ */
61571+ /* trickier case: walk our role's chain until we find
61572+ the role for the start of the current slot's chain */
61573+ tmp = role;
61574+ tmp2 = *curr;
61575+ while (tmp->next && tmp->next != tmp2)
61576+ tmp = tmp->next;
61577+ if (tmp->next == tmp2) {
61578+ /* from example above, we found 3, so just
61579+ replace this slot's chain with ours */
61580+ *curr = role;
61581+ } else {
61582+ /* we didn't find a subset of our role's chain
61583+ in the current slot's chain, so append their
61584+ chain to ours, and set us as the first role in
61585+ the slot's chain
61586+
61587+ we could fold this case with the case above,
61588+ but making it explicit for clarity
61589+ */
61590+ tmp->next = tmp2;
61591+ *curr = role;
61592+ }
61593+ }
61594+ }
61595+
61596+ return;
61597+}
61598+
61599+static void
61600+insert_acl_role_label(struct acl_role_label *role)
61601+{
61602+ int i;
61603+
61604+ if (role_list == NULL) {
61605+ role_list = role;
61606+ role->prev = NULL;
61607+ } else {
61608+ role->prev = role_list;
61609+ role_list = role;
61610+ }
61611+
61612+ /* used for hash chains */
61613+ role->next = NULL;
61614+
61615+ if (role->roletype & GR_ROLE_DOMAIN) {
61616+ for (i = 0; i < role->domain_child_num; i++)
61617+ __insert_acl_role_label(role, role->domain_children[i]);
61618+ } else
61619+ __insert_acl_role_label(role, role->uidgid);
61620+}
61621+
61622+static int
61623+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
61624+{
61625+ struct name_entry **curr, *nentry;
61626+ struct inodev_entry *ientry;
61627+ unsigned int len = strlen(name);
61628+ unsigned int key = full_name_hash(name, len);
61629+ unsigned int index = key % name_set.n_size;
61630+
61631+ curr = &name_set.n_hash[index];
61632+
61633+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
61634+ curr = &((*curr)->next);
61635+
61636+ if (*curr != NULL)
61637+ return 1;
61638+
61639+ nentry = acl_alloc(sizeof (struct name_entry));
61640+ if (nentry == NULL)
61641+ return 0;
61642+ ientry = acl_alloc(sizeof (struct inodev_entry));
61643+ if (ientry == NULL)
61644+ return 0;
61645+ ientry->nentry = nentry;
61646+
61647+ nentry->key = key;
61648+ nentry->name = name;
61649+ nentry->inode = inode;
61650+ nentry->device = device;
61651+ nentry->len = len;
61652+ nentry->deleted = deleted;
61653+
61654+ nentry->prev = NULL;
61655+ curr = &name_set.n_hash[index];
61656+ if (*curr != NULL)
61657+ (*curr)->prev = nentry;
61658+ nentry->next = *curr;
61659+ *curr = nentry;
61660+
61661+ /* insert us into the table searchable by inode/dev */
61662+ insert_inodev_entry(ientry);
61663+
61664+ return 1;
61665+}
61666+
61667+static void
61668+insert_acl_obj_label(struct acl_object_label *obj,
61669+ struct acl_subject_label *subj)
61670+{
61671+ unsigned int index =
61672+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
61673+ struct acl_object_label **curr;
61674+
61675+
61676+ obj->prev = NULL;
61677+
61678+ curr = &subj->obj_hash[index];
61679+ if (*curr != NULL)
61680+ (*curr)->prev = obj;
61681+
61682+ obj->next = *curr;
61683+ *curr = obj;
61684+
61685+ return;
61686+}
61687+
61688+static void
61689+insert_acl_subj_label(struct acl_subject_label *obj,
61690+ struct acl_role_label *role)
61691+{
61692+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
61693+ struct acl_subject_label **curr;
61694+
61695+ obj->prev = NULL;
61696+
61697+ curr = &role->subj_hash[index];
61698+ if (*curr != NULL)
61699+ (*curr)->prev = obj;
61700+
61701+ obj->next = *curr;
61702+ *curr = obj;
61703+
61704+ return;
61705+}
61706+
61707+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
61708+
61709+static void *
61710+create_table(__u32 * len, int elementsize)
61711+{
61712+ unsigned int table_sizes[] = {
61713+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
61714+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
61715+ 4194301, 8388593, 16777213, 33554393, 67108859
61716+ };
61717+ void *newtable = NULL;
61718+ unsigned int pwr = 0;
61719+
61720+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
61721+ table_sizes[pwr] <= *len)
61722+ pwr++;
61723+
61724+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
61725+ return newtable;
61726+
61727+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
61728+ newtable =
61729+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
61730+ else
61731+ newtable = vmalloc(table_sizes[pwr] * elementsize);
61732+
61733+ *len = table_sizes[pwr];
61734+
61735+ return newtable;
61736+}
61737+
61738+static int
61739+init_variables(const struct gr_arg *arg)
61740+{
61741+ struct task_struct *reaper = init_pid_ns.child_reaper;
61742+ unsigned int stacksize;
61743+
61744+ subj_map_set.s_size = arg->role_db.num_subjects;
61745+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
61746+ name_set.n_size = arg->role_db.num_objects;
61747+ inodev_set.i_size = arg->role_db.num_objects;
61748+
61749+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
61750+ !name_set.n_size || !inodev_set.i_size)
61751+ return 1;
61752+
61753+ if (!gr_init_uidset())
61754+ return 1;
61755+
61756+ /* set up the stack that holds allocation info */
61757+
61758+ stacksize = arg->role_db.num_pointers + 5;
61759+
61760+ if (!acl_alloc_stack_init(stacksize))
61761+ return 1;
61762+
61763+ /* grab reference for the real root dentry and vfsmount */
61764+ get_fs_root(reaper->fs, &real_root);
61765+
61766+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61767+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
61768+#endif
61769+
61770+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
61771+ if (fakefs_obj_rw == NULL)
61772+ return 1;
61773+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
61774+
61775+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
61776+ if (fakefs_obj_rwx == NULL)
61777+ return 1;
61778+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
61779+
61780+ subj_map_set.s_hash =
61781+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
61782+ acl_role_set.r_hash =
61783+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
61784+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
61785+ inodev_set.i_hash =
61786+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
61787+
61788+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
61789+ !name_set.n_hash || !inodev_set.i_hash)
61790+ return 1;
61791+
61792+ memset(subj_map_set.s_hash, 0,
61793+ sizeof(struct subject_map *) * subj_map_set.s_size);
61794+ memset(acl_role_set.r_hash, 0,
61795+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
61796+ memset(name_set.n_hash, 0,
61797+ sizeof (struct name_entry *) * name_set.n_size);
61798+ memset(inodev_set.i_hash, 0,
61799+ sizeof (struct inodev_entry *) * inodev_set.i_size);
61800+
61801+ return 0;
61802+}
61803+
61804+/* free information not needed after startup
61805+ currently contains user->kernel pointer mappings for subjects
61806+*/
61807+
61808+static void
61809+free_init_variables(void)
61810+{
61811+ __u32 i;
61812+
61813+ if (subj_map_set.s_hash) {
61814+ for (i = 0; i < subj_map_set.s_size; i++) {
61815+ if (subj_map_set.s_hash[i]) {
61816+ kfree(subj_map_set.s_hash[i]);
61817+ subj_map_set.s_hash[i] = NULL;
61818+ }
61819+ }
61820+
61821+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
61822+ PAGE_SIZE)
61823+ kfree(subj_map_set.s_hash);
61824+ else
61825+ vfree(subj_map_set.s_hash);
61826+ }
61827+
61828+ return;
61829+}
61830+
61831+static void
61832+free_variables(void)
61833+{
61834+ struct acl_subject_label *s;
61835+ struct acl_role_label *r;
61836+ struct task_struct *task, *task2;
61837+ unsigned int x;
61838+
61839+ gr_clear_learn_entries();
61840+
61841+ read_lock(&tasklist_lock);
61842+ do_each_thread(task2, task) {
61843+ task->acl_sp_role = 0;
61844+ task->acl_role_id = 0;
61845+ task->acl = NULL;
61846+ task->role = NULL;
61847+ } while_each_thread(task2, task);
61848+ read_unlock(&tasklist_lock);
61849+
61850+ /* release the reference to the real root dentry and vfsmount */
61851+ path_put(&real_root);
61852+ memset(&real_root, 0, sizeof(real_root));
61853+
61854+ /* free all object hash tables */
61855+
61856+ FOR_EACH_ROLE_START(r)
61857+ if (r->subj_hash == NULL)
61858+ goto next_role;
61859+ FOR_EACH_SUBJECT_START(r, s, x)
61860+ if (s->obj_hash == NULL)
61861+ break;
61862+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
61863+ kfree(s->obj_hash);
61864+ else
61865+ vfree(s->obj_hash);
61866+ FOR_EACH_SUBJECT_END(s, x)
61867+ FOR_EACH_NESTED_SUBJECT_START(r, s)
61868+ if (s->obj_hash == NULL)
61869+ break;
61870+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
61871+ kfree(s->obj_hash);
61872+ else
61873+ vfree(s->obj_hash);
61874+ FOR_EACH_NESTED_SUBJECT_END(s)
61875+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
61876+ kfree(r->subj_hash);
61877+ else
61878+ vfree(r->subj_hash);
61879+ r->subj_hash = NULL;
61880+next_role:
61881+ FOR_EACH_ROLE_END(r)
61882+
61883+ acl_free_all();
61884+
61885+ if (acl_role_set.r_hash) {
61886+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
61887+ PAGE_SIZE)
61888+ kfree(acl_role_set.r_hash);
61889+ else
61890+ vfree(acl_role_set.r_hash);
61891+ }
61892+ if (name_set.n_hash) {
61893+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
61894+ PAGE_SIZE)
61895+ kfree(name_set.n_hash);
61896+ else
61897+ vfree(name_set.n_hash);
61898+ }
61899+
61900+ if (inodev_set.i_hash) {
61901+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
61902+ PAGE_SIZE)
61903+ kfree(inodev_set.i_hash);
61904+ else
61905+ vfree(inodev_set.i_hash);
61906+ }
61907+
61908+ gr_free_uidset();
61909+
61910+ memset(&name_set, 0, sizeof (struct name_db));
61911+ memset(&inodev_set, 0, sizeof (struct inodev_db));
61912+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
61913+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
61914+
61915+ default_role = NULL;
61916+ kernel_role = NULL;
61917+ role_list = NULL;
61918+
61919+ return;
61920+}
61921+
61922+static struct acl_subject_label *
61923+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
61924+
61925+static int alloc_and_copy_string(char **name, unsigned int maxlen)
61926+{
61927+ unsigned int len = strnlen_user(*name, maxlen);
61928+ char *tmp;
61929+
61930+ if (!len || len >= maxlen)
61931+ return -EINVAL;
61932+
61933+ if ((tmp = (char *) acl_alloc(len)) == NULL)
61934+ return -ENOMEM;
61935+
61936+ if (copy_from_user(tmp, *name, len))
61937+ return -EFAULT;
61938+
61939+ tmp[len-1] = '\0';
61940+ *name = tmp;
61941+
61942+ return 0;
61943+}
61944+
61945+static int
61946+copy_user_glob(struct acl_object_label *obj)
61947+{
61948+ struct acl_object_label *g_tmp, **guser;
61949+ int error;
61950+
61951+ if (obj->globbed == NULL)
61952+ return 0;
61953+
61954+ guser = &obj->globbed;
61955+ while (*guser) {
61956+ g_tmp = (struct acl_object_label *)
61957+ acl_alloc(sizeof (struct acl_object_label));
61958+ if (g_tmp == NULL)
61959+ return -ENOMEM;
61960+
61961+ if (copy_acl_object_label(g_tmp, *guser))
61962+ return -EFAULT;
61963+
61964+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
61965+ if (error)
61966+ return error;
61967+
61968+ *guser = g_tmp;
61969+ guser = &(g_tmp->next);
61970+ }
61971+
61972+ return 0;
61973+}
61974+
61975+static int
61976+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
61977+ struct acl_role_label *role)
61978+{
61979+ struct acl_object_label *o_tmp;
61980+ int ret;
61981+
61982+ while (userp) {
61983+ if ((o_tmp = (struct acl_object_label *)
61984+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
61985+ return -ENOMEM;
61986+
61987+ if (copy_acl_object_label(o_tmp, userp))
61988+ return -EFAULT;
61989+
61990+ userp = o_tmp->prev;
61991+
61992+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
61993+ if (ret)
61994+ return ret;
61995+
61996+ insert_acl_obj_label(o_tmp, subj);
61997+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
61998+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
61999+ return -ENOMEM;
62000+
62001+ ret = copy_user_glob(o_tmp);
62002+ if (ret)
62003+ return ret;
62004+
62005+ if (o_tmp->nested) {
62006+ int already_copied;
62007+
62008+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
62009+ if (IS_ERR(o_tmp->nested))
62010+ return PTR_ERR(o_tmp->nested);
62011+
62012+ /* insert into nested subject list if we haven't copied this one yet
62013+ to prevent duplicate entries */
62014+ if (!already_copied) {
62015+ o_tmp->nested->next = role->hash->first;
62016+ role->hash->first = o_tmp->nested;
62017+ }
62018+ }
62019+ }
62020+
62021+ return 0;
62022+}
62023+
62024+static __u32
62025+count_user_subjs(struct acl_subject_label *userp)
62026+{
62027+ struct acl_subject_label s_tmp;
62028+ __u32 num = 0;
62029+
62030+ while (userp) {
62031+ if (copy_acl_subject_label(&s_tmp, userp))
62032+ break;
62033+
62034+ userp = s_tmp.prev;
62035+ }
62036+
62037+ return num;
62038+}
62039+
62040+static int
62041+copy_user_allowedips(struct acl_role_label *rolep)
62042+{
62043+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
62044+
62045+ ruserip = rolep->allowed_ips;
62046+
62047+ while (ruserip) {
62048+ rlast = rtmp;
62049+
62050+ if ((rtmp = (struct role_allowed_ip *)
62051+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
62052+ return -ENOMEM;
62053+
62054+ if (copy_role_allowed_ip(rtmp, ruserip))
62055+ return -EFAULT;
62056+
62057+ ruserip = rtmp->prev;
62058+
62059+ if (!rlast) {
62060+ rtmp->prev = NULL;
62061+ rolep->allowed_ips = rtmp;
62062+ } else {
62063+ rlast->next = rtmp;
62064+ rtmp->prev = rlast;
62065+ }
62066+
62067+ if (!ruserip)
62068+ rtmp->next = NULL;
62069+ }
62070+
62071+ return 0;
62072+}
62073+
62074+static int
62075+copy_user_transitions(struct acl_role_label *rolep)
62076+{
62077+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
62078+ int error;
62079+
62080+ rusertp = rolep->transitions;
62081+
62082+ while (rusertp) {
62083+ rlast = rtmp;
62084+
62085+ if ((rtmp = (struct role_transition *)
62086+ acl_alloc(sizeof (struct role_transition))) == NULL)
62087+ return -ENOMEM;
62088+
62089+ if (copy_role_transition(rtmp, rusertp))
62090+ return -EFAULT;
62091+
62092+ rusertp = rtmp->prev;
62093+
62094+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
62095+ if (error)
62096+ return error;
62097+
62098+ if (!rlast) {
62099+ rtmp->prev = NULL;
62100+ rolep->transitions = rtmp;
62101+ } else {
62102+ rlast->next = rtmp;
62103+ rtmp->prev = rlast;
62104+ }
62105+
62106+ if (!rusertp)
62107+ rtmp->next = NULL;
62108+ }
62109+
62110+ return 0;
62111+}
62112+
62113+static __u32 count_user_objs(const struct acl_object_label __user *userp)
62114+{
62115+ struct acl_object_label o_tmp;
62116+ __u32 num = 0;
62117+
62118+ while (userp) {
62119+ if (copy_acl_object_label(&o_tmp, userp))
62120+ break;
62121+
62122+ userp = o_tmp.prev;
62123+ num++;
62124+ }
62125+
62126+ return num;
62127+}
62128+
62129+static struct acl_subject_label *
62130+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
62131+{
62132+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
62133+ __u32 num_objs;
62134+ struct acl_ip_label **i_tmp, *i_utmp2;
62135+ struct gr_hash_struct ghash;
62136+ struct subject_map *subjmap;
62137+ unsigned int i_num;
62138+ int err;
62139+
62140+ if (already_copied != NULL)
62141+ *already_copied = 0;
62142+
62143+ s_tmp = lookup_subject_map(userp);
62144+
62145+ /* we've already copied this subject into the kernel, just return
62146+ the reference to it, and don't copy it over again
62147+ */
62148+ if (s_tmp) {
62149+ if (already_copied != NULL)
62150+ *already_copied = 1;
62151+ return(s_tmp);
62152+ }
62153+
62154+ if ((s_tmp = (struct acl_subject_label *)
62155+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
62156+ return ERR_PTR(-ENOMEM);
62157+
62158+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
62159+ if (subjmap == NULL)
62160+ return ERR_PTR(-ENOMEM);
62161+
62162+ subjmap->user = userp;
62163+ subjmap->kernel = s_tmp;
62164+ insert_subj_map_entry(subjmap);
62165+
62166+ if (copy_acl_subject_label(s_tmp, userp))
62167+ return ERR_PTR(-EFAULT);
62168+
62169+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
62170+ if (err)
62171+ return ERR_PTR(err);
62172+
62173+ if (!strcmp(s_tmp->filename, "/"))
62174+ role->root_label = s_tmp;
62175+
62176+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
62177+ return ERR_PTR(-EFAULT);
62178+
62179+ /* copy user and group transition tables */
62180+
62181+ if (s_tmp->user_trans_num) {
62182+ uid_t *uidlist;
62183+
62184+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
62185+ if (uidlist == NULL)
62186+ return ERR_PTR(-ENOMEM);
62187+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
62188+ return ERR_PTR(-EFAULT);
62189+
62190+ s_tmp->user_transitions = uidlist;
62191+ }
62192+
62193+ if (s_tmp->group_trans_num) {
62194+ gid_t *gidlist;
62195+
62196+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
62197+ if (gidlist == NULL)
62198+ return ERR_PTR(-ENOMEM);
62199+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
62200+ return ERR_PTR(-EFAULT);
62201+
62202+ s_tmp->group_transitions = gidlist;
62203+ }
62204+
62205+ /* set up object hash table */
62206+ num_objs = count_user_objs(ghash.first);
62207+
62208+ s_tmp->obj_hash_size = num_objs;
62209+ s_tmp->obj_hash =
62210+ (struct acl_object_label **)
62211+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
62212+
62213+ if (!s_tmp->obj_hash)
62214+ return ERR_PTR(-ENOMEM);
62215+
62216+ memset(s_tmp->obj_hash, 0,
62217+ s_tmp->obj_hash_size *
62218+ sizeof (struct acl_object_label *));
62219+
62220+ /* add in objects */
62221+ err = copy_user_objs(ghash.first, s_tmp, role);
62222+
62223+ if (err)
62224+ return ERR_PTR(err);
62225+
62226+ /* set pointer for parent subject */
62227+ if (s_tmp->parent_subject) {
62228+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
62229+
62230+ if (IS_ERR(s_tmp2))
62231+ return s_tmp2;
62232+
62233+ s_tmp->parent_subject = s_tmp2;
62234+ }
62235+
62236+ /* add in ip acls */
62237+
62238+ if (!s_tmp->ip_num) {
62239+ s_tmp->ips = NULL;
62240+ goto insert;
62241+ }
62242+
62243+ i_tmp =
62244+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
62245+ sizeof (struct acl_ip_label *));
62246+
62247+ if (!i_tmp)
62248+ return ERR_PTR(-ENOMEM);
62249+
62250+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
62251+ *(i_tmp + i_num) =
62252+ (struct acl_ip_label *)
62253+ acl_alloc(sizeof (struct acl_ip_label));
62254+ if (!*(i_tmp + i_num))
62255+ return ERR_PTR(-ENOMEM);
62256+
62257+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
62258+ return ERR_PTR(-EFAULT);
62259+
62260+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
62261+ return ERR_PTR(-EFAULT);
62262+
62263+ if ((*(i_tmp + i_num))->iface == NULL)
62264+ continue;
62265+
62266+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
62267+ if (err)
62268+ return ERR_PTR(err);
62269+ }
62270+
62271+ s_tmp->ips = i_tmp;
62272+
62273+insert:
62274+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
62275+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
62276+ return ERR_PTR(-ENOMEM);
62277+
62278+ return s_tmp;
62279+}
62280+
62281+static int
62282+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
62283+{
62284+ struct acl_subject_label s_pre;
62285+ struct acl_subject_label * ret;
62286+ int err;
62287+
62288+ while (userp) {
62289+ if (copy_acl_subject_label(&s_pre, userp))
62290+ return -EFAULT;
62291+
62292+ ret = do_copy_user_subj(userp, role, NULL);
62293+
62294+ err = PTR_ERR(ret);
62295+ if (IS_ERR(ret))
62296+ return err;
62297+
62298+ insert_acl_subj_label(ret, role);
62299+
62300+ userp = s_pre.prev;
62301+ }
62302+
62303+ return 0;
62304+}
62305+
62306+static int
62307+copy_user_acl(struct gr_arg *arg)
62308+{
62309+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
62310+ struct acl_subject_label *subj_list;
62311+ struct sprole_pw *sptmp;
62312+ struct gr_hash_struct *ghash;
62313+ uid_t *domainlist;
62314+ unsigned int r_num;
62315+ int err = 0;
62316+ __u16 i;
62317+ __u32 num_subjs;
62318+
62319+ /* we need a default and kernel role */
62320+ if (arg->role_db.num_roles < 2)
62321+ return -EINVAL;
62322+
62323+ /* copy special role authentication info from userspace */
62324+
62325+ num_sprole_pws = arg->num_sprole_pws;
62326+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
62327+
62328+ if (!acl_special_roles && num_sprole_pws)
62329+ return -ENOMEM;
62330+
62331+ for (i = 0; i < num_sprole_pws; i++) {
62332+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
62333+ if (!sptmp)
62334+ return -ENOMEM;
62335+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
62336+ return -EFAULT;
62337+
62338+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
62339+ if (err)
62340+ return err;
62341+
62342+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
62343+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
62344+#endif
62345+
62346+ acl_special_roles[i] = sptmp;
62347+ }
62348+
62349+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
62350+
62351+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
62352+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
62353+
62354+ if (!r_tmp)
62355+ return -ENOMEM;
62356+
62357+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
62358+ return -EFAULT;
62359+
62360+ if (copy_acl_role_label(r_tmp, r_utmp2))
62361+ return -EFAULT;
62362+
62363+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
62364+ if (err)
62365+ return err;
62366+
62367+ if (!strcmp(r_tmp->rolename, "default")
62368+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
62369+ default_role = r_tmp;
62370+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
62371+ kernel_role = r_tmp;
62372+ }
62373+
62374+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
62375+ return -ENOMEM;
62376+
62377+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
62378+ return -EFAULT;
62379+
62380+ r_tmp->hash = ghash;
62381+
62382+ num_subjs = count_user_subjs(r_tmp->hash->first);
62383+
62384+ r_tmp->subj_hash_size = num_subjs;
62385+ r_tmp->subj_hash =
62386+ (struct acl_subject_label **)
62387+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
62388+
62389+ if (!r_tmp->subj_hash)
62390+ return -ENOMEM;
62391+
62392+ err = copy_user_allowedips(r_tmp);
62393+ if (err)
62394+ return err;
62395+
62396+ /* copy domain info */
62397+ if (r_tmp->domain_children != NULL) {
62398+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
62399+ if (domainlist == NULL)
62400+ return -ENOMEM;
62401+
62402+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
62403+ return -EFAULT;
62404+
62405+ r_tmp->domain_children = domainlist;
62406+ }
62407+
62408+ err = copy_user_transitions(r_tmp);
62409+ if (err)
62410+ return err;
62411+
62412+ memset(r_tmp->subj_hash, 0,
62413+ r_tmp->subj_hash_size *
62414+ sizeof (struct acl_subject_label *));
62415+
62416+ /* acquire the list of subjects, then NULL out
62417+ the list prior to parsing the subjects for this role,
62418+ as during this parsing the list is replaced with a list
62419+ of *nested* subjects for the role
62420+ */
62421+ subj_list = r_tmp->hash->first;
62422+
62423+ /* set nested subject list to null */
62424+ r_tmp->hash->first = NULL;
62425+
62426+ err = copy_user_subjs(subj_list, r_tmp);
62427+
62428+ if (err)
62429+ return err;
62430+
62431+ insert_acl_role_label(r_tmp);
62432+ }
62433+
62434+ if (default_role == NULL || kernel_role == NULL)
62435+ return -EINVAL;
62436+
62437+ return err;
62438+}
62439+
62440+static int
62441+gracl_init(struct gr_arg *args)
62442+{
62443+ int error = 0;
62444+
62445+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
62446+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
62447+
62448+ if (init_variables(args)) {
62449+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
62450+ error = -ENOMEM;
62451+ free_variables();
62452+ goto out;
62453+ }
62454+
62455+ error = copy_user_acl(args);
62456+ free_init_variables();
62457+ if (error) {
62458+ free_variables();
62459+ goto out;
62460+ }
62461+
62462+ if ((error = gr_set_acls(0))) {
62463+ free_variables();
62464+ goto out;
62465+ }
62466+
62467+ pax_open_kernel();
62468+ gr_status |= GR_READY;
62469+ pax_close_kernel();
62470+
62471+ out:
62472+ return error;
62473+}
62474+
62475+/* derived from glibc fnmatch() 0: match, 1: no match*/
62476+
62477+static int
62478+glob_match(const char *p, const char *n)
62479+{
62480+ char c;
62481+
62482+ while ((c = *p++) != '\0') {
62483+ switch (c) {
62484+ case '?':
62485+ if (*n == '\0')
62486+ return 1;
62487+ else if (*n == '/')
62488+ return 1;
62489+ break;
62490+ case '\\':
62491+ if (*n != c)
62492+ return 1;
62493+ break;
62494+ case '*':
62495+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
62496+ if (*n == '/')
62497+ return 1;
62498+ else if (c == '?') {
62499+ if (*n == '\0')
62500+ return 1;
62501+ else
62502+ ++n;
62503+ }
62504+ }
62505+ if (c == '\0') {
62506+ return 0;
62507+ } else {
62508+ const char *endp;
62509+
62510+ if ((endp = strchr(n, '/')) == NULL)
62511+ endp = n + strlen(n);
62512+
62513+ if (c == '[') {
62514+ for (--p; n < endp; ++n)
62515+ if (!glob_match(p, n))
62516+ return 0;
62517+ } else if (c == '/') {
62518+ while (*n != '\0' && *n != '/')
62519+ ++n;
62520+ if (*n == '/' && !glob_match(p, n + 1))
62521+ return 0;
62522+ } else {
62523+ for (--p; n < endp; ++n)
62524+ if (*n == c && !glob_match(p, n))
62525+ return 0;
62526+ }
62527+
62528+ return 1;
62529+ }
62530+ case '[':
62531+ {
62532+ int not;
62533+ char cold;
62534+
62535+ if (*n == '\0' || *n == '/')
62536+ return 1;
62537+
62538+ not = (*p == '!' || *p == '^');
62539+ if (not)
62540+ ++p;
62541+
62542+ c = *p++;
62543+ for (;;) {
62544+ unsigned char fn = (unsigned char)*n;
62545+
62546+ if (c == '\0')
62547+ return 1;
62548+ else {
62549+ if (c == fn)
62550+ goto matched;
62551+ cold = c;
62552+ c = *p++;
62553+
62554+ if (c == '-' && *p != ']') {
62555+ unsigned char cend = *p++;
62556+
62557+ if (cend == '\0')
62558+ return 1;
62559+
62560+ if (cold <= fn && fn <= cend)
62561+ goto matched;
62562+
62563+ c = *p++;
62564+ }
62565+ }
62566+
62567+ if (c == ']')
62568+ break;
62569+ }
62570+ if (!not)
62571+ return 1;
62572+ break;
62573+ matched:
62574+ while (c != ']') {
62575+ if (c == '\0')
62576+ return 1;
62577+
62578+ c = *p++;
62579+ }
62580+ if (not)
62581+ return 1;
62582+ }
62583+ break;
62584+ default:
62585+ if (c != *n)
62586+ return 1;
62587+ }
62588+
62589+ ++n;
62590+ }
62591+
62592+ if (*n == '\0')
62593+ return 0;
62594+
62595+ if (*n == '/')
62596+ return 0;
62597+
62598+ return 1;
62599+}
62600+
62601+static struct acl_object_label *
62602+chk_glob_label(struct acl_object_label *globbed,
62603+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
62604+{
62605+ struct acl_object_label *tmp;
62606+
62607+ if (*path == NULL)
62608+ *path = gr_to_filename_nolock(dentry, mnt);
62609+
62610+ tmp = globbed;
62611+
62612+ while (tmp) {
62613+ if (!glob_match(tmp->filename, *path))
62614+ return tmp;
62615+ tmp = tmp->next;
62616+ }
62617+
62618+ return NULL;
62619+}
62620+
62621+static struct acl_object_label *
62622+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
62623+ const ino_t curr_ino, const dev_t curr_dev,
62624+ const struct acl_subject_label *subj, char **path, const int checkglob)
62625+{
62626+ struct acl_subject_label *tmpsubj;
62627+ struct acl_object_label *retval;
62628+ struct acl_object_label *retval2;
62629+
62630+ tmpsubj = (struct acl_subject_label *) subj;
62631+ read_lock(&gr_inode_lock);
62632+ do {
62633+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
62634+ if (retval) {
62635+ if (checkglob && retval->globbed) {
62636+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
62637+ if (retval2)
62638+ retval = retval2;
62639+ }
62640+ break;
62641+ }
62642+ } while ((tmpsubj = tmpsubj->parent_subject));
62643+ read_unlock(&gr_inode_lock);
62644+
62645+ return retval;
62646+}
62647+
62648+static __inline__ struct acl_object_label *
62649+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
62650+ struct dentry *curr_dentry,
62651+ const struct acl_subject_label *subj, char **path, const int checkglob)
62652+{
62653+ int newglob = checkglob;
62654+ ino_t inode;
62655+ dev_t device;
62656+
62657+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
62658+ as we don't want a / * rule to match instead of the / object
62659+ don't do this for create lookups that call this function though, since they're looking up
62660+ on the parent and thus need globbing checks on all paths
62661+ */
62662+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
62663+ newglob = GR_NO_GLOB;
62664+
62665+ spin_lock(&curr_dentry->d_lock);
62666+ inode = curr_dentry->d_inode->i_ino;
62667+ device = __get_dev(curr_dentry);
62668+ spin_unlock(&curr_dentry->d_lock);
62669+
62670+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
62671+}
62672+
62673+#ifdef CONFIG_HUGETLBFS
62674+static inline bool
62675+is_hugetlbfs_mnt(const struct vfsmount *mnt)
62676+{
62677+ int i;
62678+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
62679+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
62680+ return true;
62681+ }
62682+
62683+ return false;
62684+}
62685+#endif
62686+
62687+static struct acl_object_label *
62688+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62689+ const struct acl_subject_label *subj, char *path, const int checkglob)
62690+{
62691+ struct dentry *dentry = (struct dentry *) l_dentry;
62692+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
62693+ struct mount *real_mnt = real_mount(mnt);
62694+ struct acl_object_label *retval;
62695+ struct dentry *parent;
62696+
62697+ br_read_lock(&vfsmount_lock);
62698+ write_seqlock(&rename_lock);
62699+
62700+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
62701+#ifdef CONFIG_NET
62702+ mnt == sock_mnt ||
62703+#endif
62704+#ifdef CONFIG_HUGETLBFS
62705+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
62706+#endif
62707+ /* ignore Eric Biederman */
62708+ IS_PRIVATE(l_dentry->d_inode))) {
62709+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
62710+ goto out;
62711+ }
62712+
62713+ for (;;) {
62714+ if (dentry == real_root.dentry && mnt == real_root.mnt)
62715+ break;
62716+
62717+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
62718+ if (!mnt_has_parent(real_mnt))
62719+ break;
62720+
62721+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
62722+ if (retval != NULL)
62723+ goto out;
62724+
62725+ dentry = real_mnt->mnt_mountpoint;
62726+ real_mnt = real_mnt->mnt_parent;
62727+ mnt = &real_mnt->mnt;
62728+ continue;
62729+ }
62730+
62731+ parent = dentry->d_parent;
62732+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
62733+ if (retval != NULL)
62734+ goto out;
62735+
62736+ dentry = parent;
62737+ }
62738+
62739+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
62740+
62741+ /* real_root is pinned so we don't have to hold a reference */
62742+ if (retval == NULL)
62743+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
62744+out:
62745+ write_sequnlock(&rename_lock);
62746+ br_read_unlock(&vfsmount_lock);
62747+
62748+ BUG_ON(retval == NULL);
62749+
62750+ return retval;
62751+}
62752+
62753+static __inline__ struct acl_object_label *
62754+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62755+ const struct acl_subject_label *subj)
62756+{
62757+ char *path = NULL;
62758+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
62759+}
62760+
62761+static __inline__ struct acl_object_label *
62762+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62763+ const struct acl_subject_label *subj)
62764+{
62765+ char *path = NULL;
62766+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
62767+}
62768+
62769+static __inline__ struct acl_object_label *
62770+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62771+ const struct acl_subject_label *subj, char *path)
62772+{
62773+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
62774+}
62775+
62776+static struct acl_subject_label *
62777+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62778+ const struct acl_role_label *role)
62779+{
62780+ struct dentry *dentry = (struct dentry *) l_dentry;
62781+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
62782+ struct mount *real_mnt = real_mount(mnt);
62783+ struct acl_subject_label *retval;
62784+ struct dentry *parent;
62785+
62786+ br_read_lock(&vfsmount_lock);
62787+ write_seqlock(&rename_lock);
62788+
62789+ for (;;) {
62790+ if (dentry == real_root.dentry && mnt == real_root.mnt)
62791+ break;
62792+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
62793+ if (!mnt_has_parent(real_mnt))
62794+ break;
62795+
62796+ spin_lock(&dentry->d_lock);
62797+ read_lock(&gr_inode_lock);
62798+ retval =
62799+ lookup_acl_subj_label(dentry->d_inode->i_ino,
62800+ __get_dev(dentry), role);
62801+ read_unlock(&gr_inode_lock);
62802+ spin_unlock(&dentry->d_lock);
62803+ if (retval != NULL)
62804+ goto out;
62805+
62806+ dentry = real_mnt->mnt_mountpoint;
62807+ real_mnt = real_mnt->mnt_parent;
62808+ mnt = &real_mnt->mnt;
62809+ continue;
62810+ }
62811+
62812+ spin_lock(&dentry->d_lock);
62813+ read_lock(&gr_inode_lock);
62814+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
62815+ __get_dev(dentry), role);
62816+ read_unlock(&gr_inode_lock);
62817+ parent = dentry->d_parent;
62818+ spin_unlock(&dentry->d_lock);
62819+
62820+ if (retval != NULL)
62821+ goto out;
62822+
62823+ dentry = parent;
62824+ }
62825+
62826+ spin_lock(&dentry->d_lock);
62827+ read_lock(&gr_inode_lock);
62828+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
62829+ __get_dev(dentry), role);
62830+ read_unlock(&gr_inode_lock);
62831+ spin_unlock(&dentry->d_lock);
62832+
62833+ if (unlikely(retval == NULL)) {
62834+ /* real_root is pinned, we don't need to hold a reference */
62835+ read_lock(&gr_inode_lock);
62836+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
62837+ __get_dev(real_root.dentry), role);
62838+ read_unlock(&gr_inode_lock);
62839+ }
62840+out:
62841+ write_sequnlock(&rename_lock);
62842+ br_read_unlock(&vfsmount_lock);
62843+
62844+ BUG_ON(retval == NULL);
62845+
62846+ return retval;
62847+}
62848+
62849+static void
62850+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
62851+{
62852+ struct task_struct *task = current;
62853+ const struct cred *cred = current_cred();
62854+
62855+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
62856+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
62857+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
62858+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
62859+
62860+ return;
62861+}
62862+
62863+static void
62864+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
62865+{
62866+ struct task_struct *task = current;
62867+ const struct cred *cred = current_cred();
62868+
62869+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
62870+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
62871+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
62872+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
62873+
62874+ return;
62875+}
62876+
62877+static void
62878+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
62879+{
62880+ struct task_struct *task = current;
62881+ const struct cred *cred = current_cred();
62882+
62883+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
62884+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
62885+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
62886+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
62887+
62888+ return;
62889+}
62890+
62891+__u32
62892+gr_search_file(const struct dentry * dentry, const __u32 mode,
62893+ const struct vfsmount * mnt)
62894+{
62895+ __u32 retval = mode;
62896+ struct acl_subject_label *curracl;
62897+ struct acl_object_label *currobj;
62898+
62899+ if (unlikely(!(gr_status & GR_READY)))
62900+ return (mode & ~GR_AUDITS);
62901+
62902+ curracl = current->acl;
62903+
62904+ currobj = chk_obj_label(dentry, mnt, curracl);
62905+ retval = currobj->mode & mode;
62906+
62907+ /* if we're opening a specified transfer file for writing
62908+ (e.g. /dev/initctl), then transfer our role to init
62909+ */
62910+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
62911+ current->role->roletype & GR_ROLE_PERSIST)) {
62912+ struct task_struct *task = init_pid_ns.child_reaper;
62913+
62914+ if (task->role != current->role) {
62915+ task->acl_sp_role = 0;
62916+ task->acl_role_id = current->acl_role_id;
62917+ task->role = current->role;
62918+ rcu_read_lock();
62919+ read_lock(&grsec_exec_file_lock);
62920+ gr_apply_subject_to_task(task);
62921+ read_unlock(&grsec_exec_file_lock);
62922+ rcu_read_unlock();
62923+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
62924+ }
62925+ }
62926+
62927+ if (unlikely
62928+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
62929+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
62930+ __u32 new_mode = mode;
62931+
62932+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
62933+
62934+ retval = new_mode;
62935+
62936+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
62937+ new_mode |= GR_INHERIT;
62938+
62939+ if (!(mode & GR_NOLEARN))
62940+ gr_log_learn(dentry, mnt, new_mode);
62941+ }
62942+
62943+ return retval;
62944+}
62945+
62946+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
62947+ const struct dentry *parent,
62948+ const struct vfsmount *mnt)
62949+{
62950+ struct name_entry *match;
62951+ struct acl_object_label *matchpo;
62952+ struct acl_subject_label *curracl;
62953+ char *path;
62954+
62955+ if (unlikely(!(gr_status & GR_READY)))
62956+ return NULL;
62957+
62958+ preempt_disable();
62959+ path = gr_to_filename_rbac(new_dentry, mnt);
62960+ match = lookup_name_entry_create(path);
62961+
62962+ curracl = current->acl;
62963+
62964+ if (match) {
62965+ read_lock(&gr_inode_lock);
62966+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
62967+ read_unlock(&gr_inode_lock);
62968+
62969+ if (matchpo) {
62970+ preempt_enable();
62971+ return matchpo;
62972+ }
62973+ }
62974+
62975+ // lookup parent
62976+
62977+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
62978+
62979+ preempt_enable();
62980+ return matchpo;
62981+}
62982+
62983+__u32
62984+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
62985+ const struct vfsmount * mnt, const __u32 mode)
62986+{
62987+ struct acl_object_label *matchpo;
62988+ __u32 retval;
62989+
62990+ if (unlikely(!(gr_status & GR_READY)))
62991+ return (mode & ~GR_AUDITS);
62992+
62993+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
62994+
62995+ retval = matchpo->mode & mode;
62996+
62997+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
62998+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
62999+ __u32 new_mode = mode;
63000+
63001+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
63002+
63003+ gr_log_learn(new_dentry, mnt, new_mode);
63004+ return new_mode;
63005+ }
63006+
63007+ return retval;
63008+}
63009+
63010+__u32
63011+gr_check_link(const struct dentry * new_dentry,
63012+ const struct dentry * parent_dentry,
63013+ const struct vfsmount * parent_mnt,
63014+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
63015+{
63016+ struct acl_object_label *obj;
63017+ __u32 oldmode, newmode;
63018+ __u32 needmode;
63019+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
63020+ GR_DELETE | GR_INHERIT;
63021+
63022+ if (unlikely(!(gr_status & GR_READY)))
63023+ return (GR_CREATE | GR_LINK);
63024+
63025+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
63026+ oldmode = obj->mode;
63027+
63028+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
63029+ newmode = obj->mode;
63030+
63031+ needmode = newmode & checkmodes;
63032+
63033+ // old name for hardlink must have at least the permissions of the new name
63034+ if ((oldmode & needmode) != needmode)
63035+ goto bad;
63036+
63037+ // if old name had restrictions/auditing, make sure the new name does as well
63038+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
63039+
63040+ // don't allow hardlinking of suid/sgid/fcapped files without permission
63041+ if (is_privileged_binary(old_dentry))
63042+ needmode |= GR_SETID;
63043+
63044+ if ((newmode & needmode) != needmode)
63045+ goto bad;
63046+
63047+ // enforce minimum permissions
63048+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
63049+ return newmode;
63050+bad:
63051+ needmode = oldmode;
63052+ if (is_privileged_binary(old_dentry))
63053+ needmode |= GR_SETID;
63054+
63055+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
63056+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
63057+ return (GR_CREATE | GR_LINK);
63058+ } else if (newmode & GR_SUPPRESS)
63059+ return GR_SUPPRESS;
63060+ else
63061+ return 0;
63062+}
63063+
63064+int
63065+gr_check_hidden_task(const struct task_struct *task)
63066+{
63067+ if (unlikely(!(gr_status & GR_READY)))
63068+ return 0;
63069+
63070+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
63071+ return 1;
63072+
63073+ return 0;
63074+}
63075+
63076+int
63077+gr_check_protected_task(const struct task_struct *task)
63078+{
63079+ if (unlikely(!(gr_status & GR_READY) || !task))
63080+ return 0;
63081+
63082+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
63083+ task->acl != current->acl)
63084+ return 1;
63085+
63086+ return 0;
63087+}
63088+
63089+int
63090+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
63091+{
63092+ struct task_struct *p;
63093+ int ret = 0;
63094+
63095+ if (unlikely(!(gr_status & GR_READY) || !pid))
63096+ return ret;
63097+
63098+ read_lock(&tasklist_lock);
63099+ do_each_pid_task(pid, type, p) {
63100+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
63101+ p->acl != current->acl) {
63102+ ret = 1;
63103+ goto out;
63104+ }
63105+ } while_each_pid_task(pid, type, p);
63106+out:
63107+ read_unlock(&tasklist_lock);
63108+
63109+ return ret;
63110+}
63111+
63112+void
63113+gr_copy_label(struct task_struct *tsk)
63114+{
63115+ tsk->signal->used_accept = 0;
63116+ tsk->acl_sp_role = 0;
63117+ tsk->acl_role_id = current->acl_role_id;
63118+ tsk->acl = current->acl;
63119+ tsk->role = current->role;
63120+ tsk->signal->curr_ip = current->signal->curr_ip;
63121+ tsk->signal->saved_ip = current->signal->saved_ip;
63122+ if (current->exec_file)
63123+ get_file(current->exec_file);
63124+ tsk->exec_file = current->exec_file;
63125+ tsk->is_writable = current->is_writable;
63126+ if (unlikely(current->signal->used_accept)) {
63127+ current->signal->curr_ip = 0;
63128+ current->signal->saved_ip = 0;
63129+ }
63130+
63131+ return;
63132+}
63133+
63134+static void
63135+gr_set_proc_res(struct task_struct *task)
63136+{
63137+ struct acl_subject_label *proc;
63138+ unsigned short i;
63139+
63140+ proc = task->acl;
63141+
63142+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
63143+ return;
63144+
63145+ for (i = 0; i < RLIM_NLIMITS; i++) {
63146+ if (!(proc->resmask & (1U << i)))
63147+ continue;
63148+
63149+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
63150+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
63151+
63152+ if (i == RLIMIT_CPU)
63153+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
63154+ }
63155+
63156+ return;
63157+}
63158+
63159+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
63160+
63161+int
63162+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
63163+{
63164+ unsigned int i;
63165+ __u16 num;
63166+ uid_t *uidlist;
63167+ uid_t curuid;
63168+ int realok = 0;
63169+ int effectiveok = 0;
63170+ int fsok = 0;
63171+ uid_t globalreal, globaleffective, globalfs;
63172+
63173+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
63174+ struct user_struct *user;
63175+
63176+ if (!uid_valid(real))
63177+ goto skipit;
63178+
63179+ /* find user based on global namespace */
63180+
63181+ globalreal = GR_GLOBAL_UID(real);
63182+
63183+ user = find_user(make_kuid(&init_user_ns, globalreal));
63184+ if (user == NULL)
63185+ goto skipit;
63186+
63187+ if (gr_process_kernel_setuid_ban(user)) {
63188+ /* for find_user */
63189+ free_uid(user);
63190+ return 1;
63191+ }
63192+
63193+ /* for find_user */
63194+ free_uid(user);
63195+
63196+skipit:
63197+#endif
63198+
63199+ if (unlikely(!(gr_status & GR_READY)))
63200+ return 0;
63201+
63202+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
63203+ gr_log_learn_uid_change(real, effective, fs);
63204+
63205+ num = current->acl->user_trans_num;
63206+ uidlist = current->acl->user_transitions;
63207+
63208+ if (uidlist == NULL)
63209+ return 0;
63210+
63211+ if (!uid_valid(real)) {
63212+ realok = 1;
63213+ globalreal = (uid_t)-1;
63214+ } else {
63215+ globalreal = GR_GLOBAL_UID(real);
63216+ }
63217+ if (!uid_valid(effective)) {
63218+ effectiveok = 1;
63219+ globaleffective = (uid_t)-1;
63220+ } else {
63221+ globaleffective = GR_GLOBAL_UID(effective);
63222+ }
63223+ if (!uid_valid(fs)) {
63224+ fsok = 1;
63225+ globalfs = (uid_t)-1;
63226+ } else {
63227+ globalfs = GR_GLOBAL_UID(fs);
63228+ }
63229+
63230+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
63231+ for (i = 0; i < num; i++) {
63232+ curuid = uidlist[i];
63233+ if (globalreal == curuid)
63234+ realok = 1;
63235+ if (globaleffective == curuid)
63236+ effectiveok = 1;
63237+ if (globalfs == curuid)
63238+ fsok = 1;
63239+ }
63240+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
63241+ for (i = 0; i < num; i++) {
63242+ curuid = uidlist[i];
63243+ if (globalreal == curuid)
63244+ break;
63245+ if (globaleffective == curuid)
63246+ break;
63247+ if (globalfs == curuid)
63248+ break;
63249+ }
63250+ /* not in deny list */
63251+ if (i == num) {
63252+ realok = 1;
63253+ effectiveok = 1;
63254+ fsok = 1;
63255+ }
63256+ }
63257+
63258+ if (realok && effectiveok && fsok)
63259+ return 0;
63260+ else {
63261+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
63262+ return 1;
63263+ }
63264+}
63265+
63266+int
63267+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
63268+{
63269+ unsigned int i;
63270+ __u16 num;
63271+ gid_t *gidlist;
63272+ gid_t curgid;
63273+ int realok = 0;
63274+ int effectiveok = 0;
63275+ int fsok = 0;
63276+ gid_t globalreal, globaleffective, globalfs;
63277+
63278+ if (unlikely(!(gr_status & GR_READY)))
63279+ return 0;
63280+
63281+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
63282+ gr_log_learn_gid_change(real, effective, fs);
63283+
63284+ num = current->acl->group_trans_num;
63285+ gidlist = current->acl->group_transitions;
63286+
63287+ if (gidlist == NULL)
63288+ return 0;
63289+
63290+ if (!gid_valid(real)) {
63291+ realok = 1;
63292+ globalreal = (gid_t)-1;
63293+ } else {
63294+ globalreal = GR_GLOBAL_GID(real);
63295+ }
63296+ if (!gid_valid(effective)) {
63297+ effectiveok = 1;
63298+ globaleffective = (gid_t)-1;
63299+ } else {
63300+ globaleffective = GR_GLOBAL_GID(effective);
63301+ }
63302+ if (!gid_valid(fs)) {
63303+ fsok = 1;
63304+ globalfs = (gid_t)-1;
63305+ } else {
63306+ globalfs = GR_GLOBAL_GID(fs);
63307+ }
63308+
63309+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
63310+ for (i = 0; i < num; i++) {
63311+ curgid = gidlist[i];
63312+ if (globalreal == curgid)
63313+ realok = 1;
63314+ if (globaleffective == curgid)
63315+ effectiveok = 1;
63316+ if (globalfs == curgid)
63317+ fsok = 1;
63318+ }
63319+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
63320+ for (i = 0; i < num; i++) {
63321+ curgid = gidlist[i];
63322+ if (globalreal == curgid)
63323+ break;
63324+ if (globaleffective == curgid)
63325+ break;
63326+ if (globalfs == curgid)
63327+ break;
63328+ }
63329+ /* not in deny list */
63330+ if (i == num) {
63331+ realok = 1;
63332+ effectiveok = 1;
63333+ fsok = 1;
63334+ }
63335+ }
63336+
63337+ if (realok && effectiveok && fsok)
63338+ return 0;
63339+ else {
63340+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
63341+ return 1;
63342+ }
63343+}
63344+
63345+extern int gr_acl_is_capable(const int cap);
63346+
63347+void
63348+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
63349+{
63350+ struct acl_role_label *role = task->role;
63351+ struct acl_subject_label *subj = NULL;
63352+ struct acl_object_label *obj;
63353+ struct file *filp;
63354+ uid_t uid;
63355+ gid_t gid;
63356+
63357+ if (unlikely(!(gr_status & GR_READY)))
63358+ return;
63359+
63360+ uid = GR_GLOBAL_UID(kuid);
63361+ gid = GR_GLOBAL_GID(kgid);
63362+
63363+ filp = task->exec_file;
63364+
63365+ /* kernel process, we'll give them the kernel role */
63366+ if (unlikely(!filp)) {
63367+ task->role = kernel_role;
63368+ task->acl = kernel_role->root_label;
63369+ return;
63370+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
63371+ role = lookup_acl_role_label(task, uid, gid);
63372+
63373+ /* don't change the role if we're not a privileged process */
63374+ if (role && task->role != role &&
63375+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
63376+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
63377+ return;
63378+
63379+ /* perform subject lookup in possibly new role
63380+ we can use this result below in the case where role == task->role
63381+ */
63382+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
63383+
63384+ /* if we changed uid/gid, but result in the same role
63385+ and are using inheritance, don't lose the inherited subject
63386+ if current subject is other than what normal lookup
63387+ would result in, we arrived via inheritance, don't
63388+ lose subject
63389+ */
63390+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
63391+ (subj == task->acl)))
63392+ task->acl = subj;
63393+
63394+ task->role = role;
63395+
63396+ task->is_writable = 0;
63397+
63398+ /* ignore additional mmap checks for processes that are writable
63399+ by the default ACL */
63400+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
63401+ if (unlikely(obj->mode & GR_WRITE))
63402+ task->is_writable = 1;
63403+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
63404+ if (unlikely(obj->mode & GR_WRITE))
63405+ task->is_writable = 1;
63406+
63407+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63408+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
63409+#endif
63410+
63411+ gr_set_proc_res(task);
63412+
63413+ return;
63414+}
63415+
63416+int
63417+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
63418+ const int unsafe_flags)
63419+{
63420+ struct task_struct *task = current;
63421+ struct acl_subject_label *newacl;
63422+ struct acl_object_label *obj;
63423+ __u32 retmode;
63424+
63425+ if (unlikely(!(gr_status & GR_READY)))
63426+ return 0;
63427+
63428+ newacl = chk_subj_label(dentry, mnt, task->role);
63429+
63430+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
63431+ did an exec
63432+ */
63433+ rcu_read_lock();
63434+ read_lock(&tasklist_lock);
63435+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
63436+ (task->parent->acl->mode & GR_POVERRIDE))) {
63437+ read_unlock(&tasklist_lock);
63438+ rcu_read_unlock();
63439+ goto skip_check;
63440+ }
63441+ read_unlock(&tasklist_lock);
63442+ rcu_read_unlock();
63443+
63444+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
63445+ !(task->role->roletype & GR_ROLE_GOD) &&
63446+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
63447+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
63448+ if (unsafe_flags & LSM_UNSAFE_SHARE)
63449+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
63450+ else
63451+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
63452+ return -EACCES;
63453+ }
63454+
63455+skip_check:
63456+
63457+ obj = chk_obj_label(dentry, mnt, task->acl);
63458+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
63459+
63460+ if (!(task->acl->mode & GR_INHERITLEARN) &&
63461+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
63462+ if (obj->nested)
63463+ task->acl = obj->nested;
63464+ else
63465+ task->acl = newacl;
63466+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
63467+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
63468+
63469+ task->is_writable = 0;
63470+
63471+ /* ignore additional mmap checks for processes that are writable
63472+ by the default ACL */
63473+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
63474+ if (unlikely(obj->mode & GR_WRITE))
63475+ task->is_writable = 1;
63476+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
63477+ if (unlikely(obj->mode & GR_WRITE))
63478+ task->is_writable = 1;
63479+
63480+ gr_set_proc_res(task);
63481+
63482+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63483+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
63484+#endif
63485+ return 0;
63486+}
63487+
63488+/* always called with valid inodev ptr */
63489+static void
63490+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
63491+{
63492+ struct acl_object_label *matchpo;
63493+ struct acl_subject_label *matchps;
63494+ struct acl_subject_label *subj;
63495+ struct acl_role_label *role;
63496+ unsigned int x;
63497+
63498+ FOR_EACH_ROLE_START(role)
63499+ FOR_EACH_SUBJECT_START(role, subj, x)
63500+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
63501+ matchpo->mode |= GR_DELETED;
63502+ FOR_EACH_SUBJECT_END(subj,x)
63503+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
63504+ /* nested subjects aren't in the role's subj_hash table */
63505+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
63506+ matchpo->mode |= GR_DELETED;
63507+ FOR_EACH_NESTED_SUBJECT_END(subj)
63508+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
63509+ matchps->mode |= GR_DELETED;
63510+ FOR_EACH_ROLE_END(role)
63511+
63512+ inodev->nentry->deleted = 1;
63513+
63514+ return;
63515+}
63516+
63517+void
63518+gr_handle_delete(const ino_t ino, const dev_t dev)
63519+{
63520+ struct inodev_entry *inodev;
63521+
63522+ if (unlikely(!(gr_status & GR_READY)))
63523+ return;
63524+
63525+ write_lock(&gr_inode_lock);
63526+ inodev = lookup_inodev_entry(ino, dev);
63527+ if (inodev != NULL)
63528+ do_handle_delete(inodev, ino, dev);
63529+ write_unlock(&gr_inode_lock);
63530+
63531+ return;
63532+}
63533+
63534+static void
63535+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
63536+ const ino_t newinode, const dev_t newdevice,
63537+ struct acl_subject_label *subj)
63538+{
63539+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
63540+ struct acl_object_label *match;
63541+
63542+ match = subj->obj_hash[index];
63543+
63544+ while (match && (match->inode != oldinode ||
63545+ match->device != olddevice ||
63546+ !(match->mode & GR_DELETED)))
63547+ match = match->next;
63548+
63549+ if (match && (match->inode == oldinode)
63550+ && (match->device == olddevice)
63551+ && (match->mode & GR_DELETED)) {
63552+ if (match->prev == NULL) {
63553+ subj->obj_hash[index] = match->next;
63554+ if (match->next != NULL)
63555+ match->next->prev = NULL;
63556+ } else {
63557+ match->prev->next = match->next;
63558+ if (match->next != NULL)
63559+ match->next->prev = match->prev;
63560+ }
63561+ match->prev = NULL;
63562+ match->next = NULL;
63563+ match->inode = newinode;
63564+ match->device = newdevice;
63565+ match->mode &= ~GR_DELETED;
63566+
63567+ insert_acl_obj_label(match, subj);
63568+ }
63569+
63570+ return;
63571+}
63572+
63573+static void
63574+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
63575+ const ino_t newinode, const dev_t newdevice,
63576+ struct acl_role_label *role)
63577+{
63578+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
63579+ struct acl_subject_label *match;
63580+
63581+ match = role->subj_hash[index];
63582+
63583+ while (match && (match->inode != oldinode ||
63584+ match->device != olddevice ||
63585+ !(match->mode & GR_DELETED)))
63586+ match = match->next;
63587+
63588+ if (match && (match->inode == oldinode)
63589+ && (match->device == olddevice)
63590+ && (match->mode & GR_DELETED)) {
63591+ if (match->prev == NULL) {
63592+ role->subj_hash[index] = match->next;
63593+ if (match->next != NULL)
63594+ match->next->prev = NULL;
63595+ } else {
63596+ match->prev->next = match->next;
63597+ if (match->next != NULL)
63598+ match->next->prev = match->prev;
63599+ }
63600+ match->prev = NULL;
63601+ match->next = NULL;
63602+ match->inode = newinode;
63603+ match->device = newdevice;
63604+ match->mode &= ~GR_DELETED;
63605+
63606+ insert_acl_subj_label(match, role);
63607+ }
63608+
63609+ return;
63610+}
63611+
63612+static void
63613+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
63614+ const ino_t newinode, const dev_t newdevice)
63615+{
63616+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
63617+ struct inodev_entry *match;
63618+
63619+ match = inodev_set.i_hash[index];
63620+
63621+ while (match && (match->nentry->inode != oldinode ||
63622+ match->nentry->device != olddevice || !match->nentry->deleted))
63623+ match = match->next;
63624+
63625+ if (match && (match->nentry->inode == oldinode)
63626+ && (match->nentry->device == olddevice) &&
63627+ match->nentry->deleted) {
63628+ if (match->prev == NULL) {
63629+ inodev_set.i_hash[index] = match->next;
63630+ if (match->next != NULL)
63631+ match->next->prev = NULL;
63632+ } else {
63633+ match->prev->next = match->next;
63634+ if (match->next != NULL)
63635+ match->next->prev = match->prev;
63636+ }
63637+ match->prev = NULL;
63638+ match->next = NULL;
63639+ match->nentry->inode = newinode;
63640+ match->nentry->device = newdevice;
63641+ match->nentry->deleted = 0;
63642+
63643+ insert_inodev_entry(match);
63644+ }
63645+
63646+ return;
63647+}
63648+
63649+static void
63650+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
63651+{
63652+ struct acl_subject_label *subj;
63653+ struct acl_role_label *role;
63654+ unsigned int x;
63655+
63656+ FOR_EACH_ROLE_START(role)
63657+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
63658+
63659+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
63660+ if ((subj->inode == ino) && (subj->device == dev)) {
63661+ subj->inode = ino;
63662+ subj->device = dev;
63663+ }
63664+ /* nested subjects aren't in the role's subj_hash table */
63665+ update_acl_obj_label(matchn->inode, matchn->device,
63666+ ino, dev, subj);
63667+ FOR_EACH_NESTED_SUBJECT_END(subj)
63668+ FOR_EACH_SUBJECT_START(role, subj, x)
63669+ update_acl_obj_label(matchn->inode, matchn->device,
63670+ ino, dev, subj);
63671+ FOR_EACH_SUBJECT_END(subj,x)
63672+ FOR_EACH_ROLE_END(role)
63673+
63674+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
63675+
63676+ return;
63677+}
63678+
63679+static void
63680+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
63681+ const struct vfsmount *mnt)
63682+{
63683+ ino_t ino = dentry->d_inode->i_ino;
63684+ dev_t dev = __get_dev(dentry);
63685+
63686+ __do_handle_create(matchn, ino, dev);
63687+
63688+ return;
63689+}
63690+
63691+void
63692+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
63693+{
63694+ struct name_entry *matchn;
63695+
63696+ if (unlikely(!(gr_status & GR_READY)))
63697+ return;
63698+
63699+ preempt_disable();
63700+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
63701+
63702+ if (unlikely((unsigned long)matchn)) {
63703+ write_lock(&gr_inode_lock);
63704+ do_handle_create(matchn, dentry, mnt);
63705+ write_unlock(&gr_inode_lock);
63706+ }
63707+ preempt_enable();
63708+
63709+ return;
63710+}
63711+
63712+void
63713+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63714+{
63715+ struct name_entry *matchn;
63716+
63717+ if (unlikely(!(gr_status & GR_READY)))
63718+ return;
63719+
63720+ preempt_disable();
63721+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
63722+
63723+ if (unlikely((unsigned long)matchn)) {
63724+ write_lock(&gr_inode_lock);
63725+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
63726+ write_unlock(&gr_inode_lock);
63727+ }
63728+ preempt_enable();
63729+
63730+ return;
63731+}
63732+
63733+void
63734+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63735+ struct dentry *old_dentry,
63736+ struct dentry *new_dentry,
63737+ struct vfsmount *mnt, const __u8 replace)
63738+{
63739+ struct name_entry *matchn;
63740+ struct inodev_entry *inodev;
63741+ struct inode *inode = new_dentry->d_inode;
63742+ ino_t old_ino = old_dentry->d_inode->i_ino;
63743+ dev_t old_dev = __get_dev(old_dentry);
63744+
63745+ /* vfs_rename swaps the name and parent link for old_dentry and
63746+ new_dentry
63747+ at this point, old_dentry has the new name, parent link, and inode
63748+ for the renamed file
63749+ if a file is being replaced by a rename, new_dentry has the inode
63750+ and name for the replaced file
63751+ */
63752+
63753+ if (unlikely(!(gr_status & GR_READY)))
63754+ return;
63755+
63756+ preempt_disable();
63757+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
63758+
63759+ /* we wouldn't have to check d_inode if it weren't for
63760+ NFS silly-renaming
63761+ */
63762+
63763+ write_lock(&gr_inode_lock);
63764+ if (unlikely(replace && inode)) {
63765+ ino_t new_ino = inode->i_ino;
63766+ dev_t new_dev = __get_dev(new_dentry);
63767+
63768+ inodev = lookup_inodev_entry(new_ino, new_dev);
63769+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
63770+ do_handle_delete(inodev, new_ino, new_dev);
63771+ }
63772+
63773+ inodev = lookup_inodev_entry(old_ino, old_dev);
63774+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
63775+ do_handle_delete(inodev, old_ino, old_dev);
63776+
63777+ if (unlikely((unsigned long)matchn))
63778+ do_handle_create(matchn, old_dentry, mnt);
63779+
63780+ write_unlock(&gr_inode_lock);
63781+ preempt_enable();
63782+
63783+ return;
63784+}
63785+
63786+static int
63787+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
63788+ unsigned char **sum)
63789+{
63790+ struct acl_role_label *r;
63791+ struct role_allowed_ip *ipp;
63792+ struct role_transition *trans;
63793+ unsigned int i;
63794+ int found = 0;
63795+ u32 curr_ip = current->signal->curr_ip;
63796+
63797+ current->signal->saved_ip = curr_ip;
63798+
63799+ /* check transition table */
63800+
63801+ for (trans = current->role->transitions; trans; trans = trans->next) {
63802+ if (!strcmp(rolename, trans->rolename)) {
63803+ found = 1;
63804+ break;
63805+ }
63806+ }
63807+
63808+ if (!found)
63809+ return 0;
63810+
63811+ /* handle special roles that do not require authentication
63812+ and check ip */
63813+
63814+ FOR_EACH_ROLE_START(r)
63815+ if (!strcmp(rolename, r->rolename) &&
63816+ (r->roletype & GR_ROLE_SPECIAL)) {
63817+ found = 0;
63818+ if (r->allowed_ips != NULL) {
63819+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
63820+ if ((ntohl(curr_ip) & ipp->netmask) ==
63821+ (ntohl(ipp->addr) & ipp->netmask))
63822+ found = 1;
63823+ }
63824+ } else
63825+ found = 2;
63826+ if (!found)
63827+ return 0;
63828+
63829+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
63830+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
63831+ *salt = NULL;
63832+ *sum = NULL;
63833+ return 1;
63834+ }
63835+ }
63836+ FOR_EACH_ROLE_END(r)
63837+
63838+ for (i = 0; i < num_sprole_pws; i++) {
63839+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
63840+ *salt = acl_special_roles[i]->salt;
63841+ *sum = acl_special_roles[i]->sum;
63842+ return 1;
63843+ }
63844+ }
63845+
63846+ return 0;
63847+}
63848+
63849+static void
63850+assign_special_role(char *rolename)
63851+{
63852+ struct acl_object_label *obj;
63853+ struct acl_role_label *r;
63854+ struct acl_role_label *assigned = NULL;
63855+ struct task_struct *tsk;
63856+ struct file *filp;
63857+
63858+ FOR_EACH_ROLE_START(r)
63859+ if (!strcmp(rolename, r->rolename) &&
63860+ (r->roletype & GR_ROLE_SPECIAL)) {
63861+ assigned = r;
63862+ break;
63863+ }
63864+ FOR_EACH_ROLE_END(r)
63865+
63866+ if (!assigned)
63867+ return;
63868+
63869+ read_lock(&tasklist_lock);
63870+ read_lock(&grsec_exec_file_lock);
63871+
63872+ tsk = current->real_parent;
63873+ if (tsk == NULL)
63874+ goto out_unlock;
63875+
63876+ filp = tsk->exec_file;
63877+ if (filp == NULL)
63878+ goto out_unlock;
63879+
63880+ tsk->is_writable = 0;
63881+
63882+ tsk->acl_sp_role = 1;
63883+ tsk->acl_role_id = ++acl_sp_role_value;
63884+ tsk->role = assigned;
63885+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
63886+
63887+ /* ignore additional mmap checks for processes that are writable
63888+ by the default ACL */
63889+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
63890+ if (unlikely(obj->mode & GR_WRITE))
63891+ tsk->is_writable = 1;
63892+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
63893+ if (unlikely(obj->mode & GR_WRITE))
63894+ tsk->is_writable = 1;
63895+
63896+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63897+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
63898+#endif
63899+
63900+out_unlock:
63901+ read_unlock(&grsec_exec_file_lock);
63902+ read_unlock(&tasklist_lock);
63903+ return;
63904+}
63905+
63906+int gr_check_secure_terminal(struct task_struct *task)
63907+{
63908+ struct task_struct *p, *p2, *p3;
63909+ struct files_struct *files;
63910+ struct fdtable *fdt;
63911+ struct file *our_file = NULL, *file;
63912+ int i;
63913+
63914+ if (task->signal->tty == NULL)
63915+ return 1;
63916+
63917+ files = get_files_struct(task);
63918+ if (files != NULL) {
63919+ rcu_read_lock();
63920+ fdt = files_fdtable(files);
63921+ for (i=0; i < fdt->max_fds; i++) {
63922+ file = fcheck_files(files, i);
63923+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
63924+ get_file(file);
63925+ our_file = file;
63926+ }
63927+ }
63928+ rcu_read_unlock();
63929+ put_files_struct(files);
63930+ }
63931+
63932+ if (our_file == NULL)
63933+ return 1;
63934+
63935+ read_lock(&tasklist_lock);
63936+ do_each_thread(p2, p) {
63937+ files = get_files_struct(p);
63938+ if (files == NULL ||
63939+ (p->signal && p->signal->tty == task->signal->tty)) {
63940+ if (files != NULL)
63941+ put_files_struct(files);
63942+ continue;
63943+ }
63944+ rcu_read_lock();
63945+ fdt = files_fdtable(files);
63946+ for (i=0; i < fdt->max_fds; i++) {
63947+ file = fcheck_files(files, i);
63948+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
63949+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
63950+ p3 = task;
63951+ while (task_pid_nr(p3) > 0) {
63952+ if (p3 == p)
63953+ break;
63954+ p3 = p3->real_parent;
63955+ }
63956+ if (p3 == p)
63957+ break;
63958+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
63959+ gr_handle_alertkill(p);
63960+ rcu_read_unlock();
63961+ put_files_struct(files);
63962+ read_unlock(&tasklist_lock);
63963+ fput(our_file);
63964+ return 0;
63965+ }
63966+ }
63967+ rcu_read_unlock();
63968+ put_files_struct(files);
63969+ } while_each_thread(p2, p);
63970+ read_unlock(&tasklist_lock);
63971+
63972+ fput(our_file);
63973+ return 1;
63974+}
63975+
63976+static int gr_rbac_disable(void *unused)
63977+{
63978+ pax_open_kernel();
63979+ gr_status &= ~GR_READY;
63980+ pax_close_kernel();
63981+
63982+ return 0;
63983+}
63984+
63985+ssize_t
63986+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
63987+{
63988+ struct gr_arg_wrapper uwrap;
63989+ unsigned char *sprole_salt = NULL;
63990+ unsigned char *sprole_sum = NULL;
63991+ int error = 0;
63992+ int error2 = 0;
63993+ size_t req_count = 0;
63994+
63995+ mutex_lock(&gr_dev_mutex);
63996+
63997+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
63998+ error = -EPERM;
63999+ goto out;
64000+ }
64001+
64002+#ifdef CONFIG_COMPAT
64003+ pax_open_kernel();
64004+ if (is_compat_task()) {
64005+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
64006+ copy_gr_arg = &copy_gr_arg_compat;
64007+ copy_acl_object_label = &copy_acl_object_label_compat;
64008+ copy_acl_subject_label = &copy_acl_subject_label_compat;
64009+ copy_acl_role_label = &copy_acl_role_label_compat;
64010+ copy_acl_ip_label = &copy_acl_ip_label_compat;
64011+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
64012+ copy_role_transition = &copy_role_transition_compat;
64013+ copy_sprole_pw = &copy_sprole_pw_compat;
64014+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
64015+ copy_pointer_from_array = &copy_pointer_from_array_compat;
64016+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
64017+ } else {
64018+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
64019+ copy_gr_arg = &copy_gr_arg_normal;
64020+ copy_acl_object_label = &copy_acl_object_label_normal;
64021+ copy_acl_subject_label = &copy_acl_subject_label_normal;
64022+ copy_acl_role_label = &copy_acl_role_label_normal;
64023+ copy_acl_ip_label = &copy_acl_ip_label_normal;
64024+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
64025+ copy_role_transition = &copy_role_transition_normal;
64026+ copy_sprole_pw = &copy_sprole_pw_normal;
64027+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
64028+ copy_pointer_from_array = &copy_pointer_from_array_normal;
64029+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
64030+ }
64031+ pax_close_kernel();
64032+#endif
64033+
64034+ req_count = get_gr_arg_wrapper_size();
64035+
64036+ if (count != req_count) {
64037+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
64038+ error = -EINVAL;
64039+ goto out;
64040+ }
64041+
64042+
64043+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
64044+ gr_auth_expires = 0;
64045+ gr_auth_attempts = 0;
64046+ }
64047+
64048+ error = copy_gr_arg_wrapper(buf, &uwrap);
64049+ if (error)
64050+ goto out;
64051+
64052+ error = copy_gr_arg(uwrap.arg, gr_usermode);
64053+ if (error)
64054+ goto out;
64055+
64056+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
64057+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
64058+ time_after(gr_auth_expires, get_seconds())) {
64059+ error = -EBUSY;
64060+ goto out;
64061+ }
64062+
64063+ /* if non-root trying to do anything other than use a special role,
64064+ do not attempt authentication, do not count towards authentication
64065+ locking
64066+ */
64067+
64068+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
64069+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
64070+ gr_is_global_nonroot(current_uid())) {
64071+ error = -EPERM;
64072+ goto out;
64073+ }
64074+
64075+ /* ensure pw and special role name are null terminated */
64076+
64077+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
64078+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
64079+
64080+ /* Okay.
64081+ * We have our enough of the argument structure..(we have yet
64082+ * to copy_from_user the tables themselves) . Copy the tables
64083+ * only if we need them, i.e. for loading operations. */
64084+
64085+ switch (gr_usermode->mode) {
64086+ case GR_STATUS:
64087+ if (gr_status & GR_READY) {
64088+ error = 1;
64089+ if (!gr_check_secure_terminal(current))
64090+ error = 3;
64091+ } else
64092+ error = 2;
64093+ goto out;
64094+ case GR_SHUTDOWN:
64095+ if ((gr_status & GR_READY)
64096+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
64097+ stop_machine(gr_rbac_disable, NULL, NULL);
64098+ free_variables();
64099+ memset(gr_usermode, 0, sizeof (struct gr_arg));
64100+ memset(gr_system_salt, 0, GR_SALT_LEN);
64101+ memset(gr_system_sum, 0, GR_SHA_LEN);
64102+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
64103+ } else if (gr_status & GR_READY) {
64104+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
64105+ error = -EPERM;
64106+ } else {
64107+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
64108+ error = -EAGAIN;
64109+ }
64110+ break;
64111+ case GR_ENABLE:
64112+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
64113+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
64114+ else {
64115+ if (gr_status & GR_READY)
64116+ error = -EAGAIN;
64117+ else
64118+ error = error2;
64119+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
64120+ }
64121+ break;
64122+ case GR_RELOAD:
64123+ if (!(gr_status & GR_READY)) {
64124+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
64125+ error = -EAGAIN;
64126+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
64127+ stop_machine(gr_rbac_disable, NULL, NULL);
64128+ free_variables();
64129+ error2 = gracl_init(gr_usermode);
64130+ if (!error2)
64131+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
64132+ else {
64133+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
64134+ error = error2;
64135+ }
64136+ } else {
64137+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
64138+ error = -EPERM;
64139+ }
64140+ break;
64141+ case GR_SEGVMOD:
64142+ if (unlikely(!(gr_status & GR_READY))) {
64143+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
64144+ error = -EAGAIN;
64145+ break;
64146+ }
64147+
64148+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
64149+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
64150+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
64151+ struct acl_subject_label *segvacl;
64152+ segvacl =
64153+ lookup_acl_subj_label(gr_usermode->segv_inode,
64154+ gr_usermode->segv_device,
64155+ current->role);
64156+ if (segvacl) {
64157+ segvacl->crashes = 0;
64158+ segvacl->expires = 0;
64159+ }
64160+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
64161+ gr_remove_uid(gr_usermode->segv_uid);
64162+ }
64163+ } else {
64164+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
64165+ error = -EPERM;
64166+ }
64167+ break;
64168+ case GR_SPROLE:
64169+ case GR_SPROLEPAM:
64170+ if (unlikely(!(gr_status & GR_READY))) {
64171+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
64172+ error = -EAGAIN;
64173+ break;
64174+ }
64175+
64176+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
64177+ current->role->expires = 0;
64178+ current->role->auth_attempts = 0;
64179+ }
64180+
64181+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
64182+ time_after(current->role->expires, get_seconds())) {
64183+ error = -EBUSY;
64184+ goto out;
64185+ }
64186+
64187+ if (lookup_special_role_auth
64188+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
64189+ && ((!sprole_salt && !sprole_sum)
64190+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
64191+ char *p = "";
64192+ assign_special_role(gr_usermode->sp_role);
64193+ read_lock(&tasklist_lock);
64194+ if (current->real_parent)
64195+ p = current->real_parent->role->rolename;
64196+ read_unlock(&tasklist_lock);
64197+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
64198+ p, acl_sp_role_value);
64199+ } else {
64200+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
64201+ error = -EPERM;
64202+ if(!(current->role->auth_attempts++))
64203+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
64204+
64205+ goto out;
64206+ }
64207+ break;
64208+ case GR_UNSPROLE:
64209+ if (unlikely(!(gr_status & GR_READY))) {
64210+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
64211+ error = -EAGAIN;
64212+ break;
64213+ }
64214+
64215+ if (current->role->roletype & GR_ROLE_SPECIAL) {
64216+ char *p = "";
64217+ int i = 0;
64218+
64219+ read_lock(&tasklist_lock);
64220+ if (current->real_parent) {
64221+ p = current->real_parent->role->rolename;
64222+ i = current->real_parent->acl_role_id;
64223+ }
64224+ read_unlock(&tasklist_lock);
64225+
64226+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
64227+ gr_set_acls(1);
64228+ } else {
64229+ error = -EPERM;
64230+ goto out;
64231+ }
64232+ break;
64233+ default:
64234+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
64235+ error = -EINVAL;
64236+ break;
64237+ }
64238+
64239+ if (error != -EPERM)
64240+ goto out;
64241+
64242+ if(!(gr_auth_attempts++))
64243+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
64244+
64245+ out:
64246+ mutex_unlock(&gr_dev_mutex);
64247+
64248+ if (!error)
64249+ error = req_count;
64250+
64251+ return error;
64252+}
64253+
64254+/* must be called with
64255+ rcu_read_lock();
64256+ read_lock(&tasklist_lock);
64257+ read_lock(&grsec_exec_file_lock);
64258+*/
64259+int gr_apply_subject_to_task(struct task_struct *task)
64260+{
64261+ struct acl_object_label *obj;
64262+ char *tmpname;
64263+ struct acl_subject_label *tmpsubj;
64264+ struct file *filp;
64265+ struct name_entry *nmatch;
64266+
64267+ filp = task->exec_file;
64268+ if (filp == NULL)
64269+ return 0;
64270+
64271+ /* the following is to apply the correct subject
64272+ on binaries running when the RBAC system
64273+ is enabled, when the binaries have been
64274+ replaced or deleted since their execution
64275+ -----
64276+ when the RBAC system starts, the inode/dev
64277+ from exec_file will be one the RBAC system
64278+ is unaware of. It only knows the inode/dev
64279+ of the present file on disk, or the absence
64280+ of it.
64281+ */
64282+ preempt_disable();
64283+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
64284+
64285+ nmatch = lookup_name_entry(tmpname);
64286+ preempt_enable();
64287+ tmpsubj = NULL;
64288+ if (nmatch) {
64289+ if (nmatch->deleted)
64290+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
64291+ else
64292+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
64293+ if (tmpsubj != NULL)
64294+ task->acl = tmpsubj;
64295+ }
64296+ if (tmpsubj == NULL)
64297+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
64298+ task->role);
64299+ if (task->acl) {
64300+ task->is_writable = 0;
64301+ /* ignore additional mmap checks for processes that are writable
64302+ by the default ACL */
64303+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64304+ if (unlikely(obj->mode & GR_WRITE))
64305+ task->is_writable = 1;
64306+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64307+ if (unlikely(obj->mode & GR_WRITE))
64308+ task->is_writable = 1;
64309+
64310+ gr_set_proc_res(task);
64311+
64312+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64313+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64314+#endif
64315+ } else {
64316+ return 1;
64317+ }
64318+
64319+ return 0;
64320+}
64321+
64322+int
64323+gr_set_acls(const int type)
64324+{
64325+ struct task_struct *task, *task2;
64326+ struct acl_role_label *role = current->role;
64327+ __u16 acl_role_id = current->acl_role_id;
64328+ const struct cred *cred;
64329+ int ret;
64330+
64331+ rcu_read_lock();
64332+ read_lock(&tasklist_lock);
64333+ read_lock(&grsec_exec_file_lock);
64334+ do_each_thread(task2, task) {
64335+ /* check to see if we're called from the exit handler,
64336+ if so, only replace ACLs that have inherited the admin
64337+ ACL */
64338+
64339+ if (type && (task->role != role ||
64340+ task->acl_role_id != acl_role_id))
64341+ continue;
64342+
64343+ task->acl_role_id = 0;
64344+ task->acl_sp_role = 0;
64345+
64346+ if (task->exec_file) {
64347+ cred = __task_cred(task);
64348+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
64349+ ret = gr_apply_subject_to_task(task);
64350+ if (ret) {
64351+ read_unlock(&grsec_exec_file_lock);
64352+ read_unlock(&tasklist_lock);
64353+ rcu_read_unlock();
64354+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
64355+ return ret;
64356+ }
64357+ } else {
64358+ // it's a kernel process
64359+ task->role = kernel_role;
64360+ task->acl = kernel_role->root_label;
64361+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
64362+ task->acl->mode &= ~GR_PROCFIND;
64363+#endif
64364+ }
64365+ } while_each_thread(task2, task);
64366+ read_unlock(&grsec_exec_file_lock);
64367+ read_unlock(&tasklist_lock);
64368+ rcu_read_unlock();
64369+
64370+ return 0;
64371+}
64372+
64373+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
64374+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
64375+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
64376+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
64377+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
64378+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
64379+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
64380+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
64381+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
64382+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
64383+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
64384+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
64385+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
64386+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
64387+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
64388+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
64389+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
64390+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
64391+};
64392+
64393+void
64394+gr_learn_resource(const struct task_struct *task,
64395+ const int res, const unsigned long wanted, const int gt)
64396+{
64397+ struct acl_subject_label *acl;
64398+ const struct cred *cred;
64399+
64400+ if (unlikely((gr_status & GR_READY) &&
64401+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
64402+ goto skip_reslog;
64403+
64404+ gr_log_resource(task, res, wanted, gt);
64405+skip_reslog:
64406+
64407+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
64408+ return;
64409+
64410+ acl = task->acl;
64411+
64412+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
64413+ !(acl->resmask & (1U << (unsigned short) res))))
64414+ return;
64415+
64416+ if (wanted >= acl->res[res].rlim_cur) {
64417+ unsigned long res_add;
64418+
64419+ res_add = wanted + res_learn_bumps[res];
64420+
64421+ acl->res[res].rlim_cur = res_add;
64422+
64423+ if (wanted > acl->res[res].rlim_max)
64424+ acl->res[res].rlim_max = res_add;
64425+
64426+ /* only log the subject filename, since resource logging is supported for
64427+ single-subject learning only */
64428+ rcu_read_lock();
64429+ cred = __task_cred(task);
64430+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
64431+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
64432+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
64433+ "", (unsigned long) res, &task->signal->saved_ip);
64434+ rcu_read_unlock();
64435+ }
64436+
64437+ return;
64438+}
64439+EXPORT_SYMBOL(gr_learn_resource);
64440+#endif
64441+
64442+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
64443+void
64444+pax_set_initial_flags(struct linux_binprm *bprm)
64445+{
64446+ struct task_struct *task = current;
64447+ struct acl_subject_label *proc;
64448+ unsigned long flags;
64449+
64450+ if (unlikely(!(gr_status & GR_READY)))
64451+ return;
64452+
64453+ flags = pax_get_flags(task);
64454+
64455+ proc = task->acl;
64456+
64457+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
64458+ flags &= ~MF_PAX_PAGEEXEC;
64459+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
64460+ flags &= ~MF_PAX_SEGMEXEC;
64461+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
64462+ flags &= ~MF_PAX_RANDMMAP;
64463+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
64464+ flags &= ~MF_PAX_EMUTRAMP;
64465+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
64466+ flags &= ~MF_PAX_MPROTECT;
64467+
64468+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
64469+ flags |= MF_PAX_PAGEEXEC;
64470+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
64471+ flags |= MF_PAX_SEGMEXEC;
64472+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
64473+ flags |= MF_PAX_RANDMMAP;
64474+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
64475+ flags |= MF_PAX_EMUTRAMP;
64476+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
64477+ flags |= MF_PAX_MPROTECT;
64478+
64479+ pax_set_flags(task, flags);
64480+
64481+ return;
64482+}
64483+#endif
64484+
64485+int
64486+gr_handle_proc_ptrace(struct task_struct *task)
64487+{
64488+ struct file *filp;
64489+ struct task_struct *tmp = task;
64490+ struct task_struct *curtemp = current;
64491+ __u32 retmode;
64492+
64493+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
64494+ if (unlikely(!(gr_status & GR_READY)))
64495+ return 0;
64496+#endif
64497+
64498+ read_lock(&tasklist_lock);
64499+ read_lock(&grsec_exec_file_lock);
64500+ filp = task->exec_file;
64501+
64502+ while (task_pid_nr(tmp) > 0) {
64503+ if (tmp == curtemp)
64504+ break;
64505+ tmp = tmp->real_parent;
64506+ }
64507+
64508+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
64509+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
64510+ read_unlock(&grsec_exec_file_lock);
64511+ read_unlock(&tasklist_lock);
64512+ return 1;
64513+ }
64514+
64515+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64516+ if (!(gr_status & GR_READY)) {
64517+ read_unlock(&grsec_exec_file_lock);
64518+ read_unlock(&tasklist_lock);
64519+ return 0;
64520+ }
64521+#endif
64522+
64523+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
64524+ read_unlock(&grsec_exec_file_lock);
64525+ read_unlock(&tasklist_lock);
64526+
64527+ if (retmode & GR_NOPTRACE)
64528+ return 1;
64529+
64530+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
64531+ && (current->acl != task->acl || (current->acl != current->role->root_label
64532+ && task_pid_nr(current) != task_pid_nr(task))))
64533+ return 1;
64534+
64535+ return 0;
64536+}
64537+
64538+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
64539+{
64540+ if (unlikely(!(gr_status & GR_READY)))
64541+ return;
64542+
64543+ if (!(current->role->roletype & GR_ROLE_GOD))
64544+ return;
64545+
64546+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
64547+ p->role->rolename, gr_task_roletype_to_char(p),
64548+ p->acl->filename);
64549+}
64550+
64551+int
64552+gr_handle_ptrace(struct task_struct *task, const long request)
64553+{
64554+ struct task_struct *tmp = task;
64555+ struct task_struct *curtemp = current;
64556+ __u32 retmode;
64557+
64558+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
64559+ if (unlikely(!(gr_status & GR_READY)))
64560+ return 0;
64561+#endif
64562+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
64563+ read_lock(&tasklist_lock);
64564+ while (task_pid_nr(tmp) > 0) {
64565+ if (tmp == curtemp)
64566+ break;
64567+ tmp = tmp->real_parent;
64568+ }
64569+
64570+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
64571+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
64572+ read_unlock(&tasklist_lock);
64573+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
64574+ return 1;
64575+ }
64576+ read_unlock(&tasklist_lock);
64577+ }
64578+
64579+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64580+ if (!(gr_status & GR_READY))
64581+ return 0;
64582+#endif
64583+
64584+ read_lock(&grsec_exec_file_lock);
64585+ if (unlikely(!task->exec_file)) {
64586+ read_unlock(&grsec_exec_file_lock);
64587+ return 0;
64588+ }
64589+
64590+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
64591+ read_unlock(&grsec_exec_file_lock);
64592+
64593+ if (retmode & GR_NOPTRACE) {
64594+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
64595+ return 1;
64596+ }
64597+
64598+ if (retmode & GR_PTRACERD) {
64599+ switch (request) {
64600+ case PTRACE_SEIZE:
64601+ case PTRACE_POKETEXT:
64602+ case PTRACE_POKEDATA:
64603+ case PTRACE_POKEUSR:
64604+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
64605+ case PTRACE_SETREGS:
64606+ case PTRACE_SETFPREGS:
64607+#endif
64608+#ifdef CONFIG_X86
64609+ case PTRACE_SETFPXREGS:
64610+#endif
64611+#ifdef CONFIG_ALTIVEC
64612+ case PTRACE_SETVRREGS:
64613+#endif
64614+ return 1;
64615+ default:
64616+ return 0;
64617+ }
64618+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
64619+ !(current->role->roletype & GR_ROLE_GOD) &&
64620+ (current->acl != task->acl)) {
64621+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
64622+ return 1;
64623+ }
64624+
64625+ return 0;
64626+}
64627+
64628+static int is_writable_mmap(const struct file *filp)
64629+{
64630+ struct task_struct *task = current;
64631+ struct acl_object_label *obj, *obj2;
64632+
64633+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
64634+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
64635+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64636+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
64637+ task->role->root_label);
64638+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
64639+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
64640+ return 1;
64641+ }
64642+ }
64643+ return 0;
64644+}
64645+
64646+int
64647+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
64648+{
64649+ __u32 mode;
64650+
64651+ if (unlikely(!file || !(prot & PROT_EXEC)))
64652+ return 1;
64653+
64654+ if (is_writable_mmap(file))
64655+ return 0;
64656+
64657+ mode =
64658+ gr_search_file(file->f_path.dentry,
64659+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
64660+ file->f_path.mnt);
64661+
64662+ if (!gr_tpe_allow(file))
64663+ return 0;
64664+
64665+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
64666+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64667+ return 0;
64668+ } else if (unlikely(!(mode & GR_EXEC))) {
64669+ return 0;
64670+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
64671+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64672+ return 1;
64673+ }
64674+
64675+ return 1;
64676+}
64677+
64678+int
64679+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64680+{
64681+ __u32 mode;
64682+
64683+ if (unlikely(!file || !(prot & PROT_EXEC)))
64684+ return 1;
64685+
64686+ if (is_writable_mmap(file))
64687+ return 0;
64688+
64689+ mode =
64690+ gr_search_file(file->f_path.dentry,
64691+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
64692+ file->f_path.mnt);
64693+
64694+ if (!gr_tpe_allow(file))
64695+ return 0;
64696+
64697+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
64698+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64699+ return 0;
64700+ } else if (unlikely(!(mode & GR_EXEC))) {
64701+ return 0;
64702+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
64703+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64704+ return 1;
64705+ }
64706+
64707+ return 1;
64708+}
64709+
64710+void
64711+gr_acl_handle_psacct(struct task_struct *task, const long code)
64712+{
64713+ unsigned long runtime;
64714+ unsigned long cputime;
64715+ unsigned int wday, cday;
64716+ __u8 whr, chr;
64717+ __u8 wmin, cmin;
64718+ __u8 wsec, csec;
64719+ struct timespec timeval;
64720+
64721+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
64722+ !(task->acl->mode & GR_PROCACCT)))
64723+ return;
64724+
64725+ do_posix_clock_monotonic_gettime(&timeval);
64726+ runtime = timeval.tv_sec - task->start_time.tv_sec;
64727+ wday = runtime / (3600 * 24);
64728+ runtime -= wday * (3600 * 24);
64729+ whr = runtime / 3600;
64730+ runtime -= whr * 3600;
64731+ wmin = runtime / 60;
64732+ runtime -= wmin * 60;
64733+ wsec = runtime;
64734+
64735+ cputime = (task->utime + task->stime) / HZ;
64736+ cday = cputime / (3600 * 24);
64737+ cputime -= cday * (3600 * 24);
64738+ chr = cputime / 3600;
64739+ cputime -= chr * 3600;
64740+ cmin = cputime / 60;
64741+ cputime -= cmin * 60;
64742+ csec = cputime;
64743+
64744+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
64745+
64746+ return;
64747+}
64748+
64749+void gr_set_kernel_label(struct task_struct *task)
64750+{
64751+ if (gr_status & GR_READY) {
64752+ task->role = kernel_role;
64753+ task->acl = kernel_role->root_label;
64754+ }
64755+ return;
64756+}
64757+
64758+#ifdef CONFIG_TASKSTATS
64759+int gr_is_taskstats_denied(int pid)
64760+{
64761+ struct task_struct *task;
64762+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64763+ const struct cred *cred;
64764+#endif
64765+ int ret = 0;
64766+
64767+ /* restrict taskstats viewing to un-chrooted root users
64768+ who have the 'view' subject flag if the RBAC system is enabled
64769+ */
64770+
64771+ rcu_read_lock();
64772+ read_lock(&tasklist_lock);
64773+ task = find_task_by_vpid(pid);
64774+ if (task) {
64775+#ifdef CONFIG_GRKERNSEC_CHROOT
64776+ if (proc_is_chrooted(task))
64777+ ret = -EACCES;
64778+#endif
64779+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64780+ cred = __task_cred(task);
64781+#ifdef CONFIG_GRKERNSEC_PROC_USER
64782+ if (gr_is_global_nonroot(cred->uid))
64783+ ret = -EACCES;
64784+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64785+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
64786+ ret = -EACCES;
64787+#endif
64788+#endif
64789+ if (gr_status & GR_READY) {
64790+ if (!(task->acl->mode & GR_VIEW))
64791+ ret = -EACCES;
64792+ }
64793+ } else
64794+ ret = -ENOENT;
64795+
64796+ read_unlock(&tasklist_lock);
64797+ rcu_read_unlock();
64798+
64799+ return ret;
64800+}
64801+#endif
64802+
64803+/* AUXV entries are filled via a descendant of search_binary_handler
64804+ after we've already applied the subject for the target
64805+*/
64806+int gr_acl_enable_at_secure(void)
64807+{
64808+ if (unlikely(!(gr_status & GR_READY)))
64809+ return 0;
64810+
64811+ if (current->acl->mode & GR_ATSECURE)
64812+ return 1;
64813+
64814+ return 0;
64815+}
64816+
64817+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
64818+{
64819+ struct task_struct *task = current;
64820+ struct dentry *dentry = file->f_path.dentry;
64821+ struct vfsmount *mnt = file->f_path.mnt;
64822+ struct acl_object_label *obj, *tmp;
64823+ struct acl_subject_label *subj;
64824+ unsigned int bufsize;
64825+ int is_not_root;
64826+ char *path;
64827+ dev_t dev = __get_dev(dentry);
64828+
64829+ if (unlikely(!(gr_status & GR_READY)))
64830+ return 1;
64831+
64832+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64833+ return 1;
64834+
64835+ /* ignore Eric Biederman */
64836+ if (IS_PRIVATE(dentry->d_inode))
64837+ return 1;
64838+
64839+ subj = task->acl;
64840+ read_lock(&gr_inode_lock);
64841+ do {
64842+ obj = lookup_acl_obj_label(ino, dev, subj);
64843+ if (obj != NULL) {
64844+ read_unlock(&gr_inode_lock);
64845+ return (obj->mode & GR_FIND) ? 1 : 0;
64846+ }
64847+ } while ((subj = subj->parent_subject));
64848+ read_unlock(&gr_inode_lock);
64849+
64850+ /* this is purely an optimization since we're looking for an object
64851+ for the directory we're doing a readdir on
64852+ if it's possible for any globbed object to match the entry we're
64853+ filling into the directory, then the object we find here will be
64854+ an anchor point with attached globbed objects
64855+ */
64856+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
64857+ if (obj->globbed == NULL)
64858+ return (obj->mode & GR_FIND) ? 1 : 0;
64859+
64860+ is_not_root = ((obj->filename[0] == '/') &&
64861+ (obj->filename[1] == '\0')) ? 0 : 1;
64862+ bufsize = PAGE_SIZE - namelen - is_not_root;
64863+
64864+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
64865+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
64866+ return 1;
64867+
64868+ preempt_disable();
64869+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
64870+ bufsize);
64871+
64872+ bufsize = strlen(path);
64873+
64874+ /* if base is "/", don't append an additional slash */
64875+ if (is_not_root)
64876+ *(path + bufsize) = '/';
64877+ memcpy(path + bufsize + is_not_root, name, namelen);
64878+ *(path + bufsize + namelen + is_not_root) = '\0';
64879+
64880+ tmp = obj->globbed;
64881+ while (tmp) {
64882+ if (!glob_match(tmp->filename, path)) {
64883+ preempt_enable();
64884+ return (tmp->mode & GR_FIND) ? 1 : 0;
64885+ }
64886+ tmp = tmp->next;
64887+ }
64888+ preempt_enable();
64889+ return (obj->mode & GR_FIND) ? 1 : 0;
64890+}
64891+
64892+void gr_put_exec_file(struct task_struct *task)
64893+{
64894+ struct file *filp;
64895+
64896+ write_lock(&grsec_exec_file_lock);
64897+ filp = task->exec_file;
64898+ task->exec_file = NULL;
64899+ write_unlock(&grsec_exec_file_lock);
64900+
64901+ if (filp)
64902+ fput(filp);
64903+
64904+ return;
64905+}
64906+
64907+
64908+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
64909+EXPORT_SYMBOL(gr_acl_is_enabled);
64910+#endif
64911+EXPORT_SYMBOL(gr_set_kernel_label);
64912+#ifdef CONFIG_SECURITY
64913+EXPORT_SYMBOL(gr_check_user_change);
64914+EXPORT_SYMBOL(gr_check_group_change);
64915+#endif
64916+
64917diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
64918new file mode 100644
64919index 0000000..34fefda
64920--- /dev/null
64921+++ b/grsecurity/gracl_alloc.c
64922@@ -0,0 +1,105 @@
64923+#include <linux/kernel.h>
64924+#include <linux/mm.h>
64925+#include <linux/slab.h>
64926+#include <linux/vmalloc.h>
64927+#include <linux/gracl.h>
64928+#include <linux/grsecurity.h>
64929+
64930+static unsigned long alloc_stack_next = 1;
64931+static unsigned long alloc_stack_size = 1;
64932+static void **alloc_stack;
64933+
64934+static __inline__ int
64935+alloc_pop(void)
64936+{
64937+ if (alloc_stack_next == 1)
64938+ return 0;
64939+
64940+ kfree(alloc_stack[alloc_stack_next - 2]);
64941+
64942+ alloc_stack_next--;
64943+
64944+ return 1;
64945+}
64946+
64947+static __inline__ int
64948+alloc_push(void *buf)
64949+{
64950+ if (alloc_stack_next >= alloc_stack_size)
64951+ return 1;
64952+
64953+ alloc_stack[alloc_stack_next - 1] = buf;
64954+
64955+ alloc_stack_next++;
64956+
64957+ return 0;
64958+}
64959+
64960+void *
64961+acl_alloc(unsigned long len)
64962+{
64963+ void *ret = NULL;
64964+
64965+ if (!len || len > PAGE_SIZE)
64966+ goto out;
64967+
64968+ ret = kmalloc(len, GFP_KERNEL);
64969+
64970+ if (ret) {
64971+ if (alloc_push(ret)) {
64972+ kfree(ret);
64973+ ret = NULL;
64974+ }
64975+ }
64976+
64977+out:
64978+ return ret;
64979+}
64980+
64981+void *
64982+acl_alloc_num(unsigned long num, unsigned long len)
64983+{
64984+ if (!len || (num > (PAGE_SIZE / len)))
64985+ return NULL;
64986+
64987+ return acl_alloc(num * len);
64988+}
64989+
64990+void
64991+acl_free_all(void)
64992+{
64993+ if (gr_acl_is_enabled() || !alloc_stack)
64994+ return;
64995+
64996+ while (alloc_pop()) ;
64997+
64998+ if (alloc_stack) {
64999+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
65000+ kfree(alloc_stack);
65001+ else
65002+ vfree(alloc_stack);
65003+ }
65004+
65005+ alloc_stack = NULL;
65006+ alloc_stack_size = 1;
65007+ alloc_stack_next = 1;
65008+
65009+ return;
65010+}
65011+
65012+int
65013+acl_alloc_stack_init(unsigned long size)
65014+{
65015+ if ((size * sizeof (void *)) <= PAGE_SIZE)
65016+ alloc_stack =
65017+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
65018+ else
65019+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
65020+
65021+ alloc_stack_size = size;
65022+
65023+ if (!alloc_stack)
65024+ return 0;
65025+ else
65026+ return 1;
65027+}
65028diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
65029new file mode 100644
65030index 0000000..bdd51ea
65031--- /dev/null
65032+++ b/grsecurity/gracl_cap.c
65033@@ -0,0 +1,110 @@
65034+#include <linux/kernel.h>
65035+#include <linux/module.h>
65036+#include <linux/sched.h>
65037+#include <linux/gracl.h>
65038+#include <linux/grsecurity.h>
65039+#include <linux/grinternal.h>
65040+
65041+extern const char *captab_log[];
65042+extern int captab_log_entries;
65043+
65044+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
65045+{
65046+ struct acl_subject_label *curracl;
65047+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
65048+ kernel_cap_t cap_audit = __cap_empty_set;
65049+
65050+ if (!gr_acl_is_enabled())
65051+ return 1;
65052+
65053+ curracl = task->acl;
65054+
65055+ cap_drop = curracl->cap_lower;
65056+ cap_mask = curracl->cap_mask;
65057+ cap_audit = curracl->cap_invert_audit;
65058+
65059+ while ((curracl = curracl->parent_subject)) {
65060+ /* if the cap isn't specified in the current computed mask but is specified in the
65061+ current level subject, and is lowered in the current level subject, then add
65062+ it to the set of dropped capabilities
65063+ otherwise, add the current level subject's mask to the current computed mask
65064+ */
65065+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
65066+ cap_raise(cap_mask, cap);
65067+ if (cap_raised(curracl->cap_lower, cap))
65068+ cap_raise(cap_drop, cap);
65069+ if (cap_raised(curracl->cap_invert_audit, cap))
65070+ cap_raise(cap_audit, cap);
65071+ }
65072+ }
65073+
65074+ if (!cap_raised(cap_drop, cap)) {
65075+ if (cap_raised(cap_audit, cap))
65076+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
65077+ return 1;
65078+ }
65079+
65080+ curracl = task->acl;
65081+
65082+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
65083+ && cap_raised(cred->cap_effective, cap)) {
65084+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65085+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
65086+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
65087+ gr_to_filename(task->exec_file->f_path.dentry,
65088+ task->exec_file->f_path.mnt) : curracl->filename,
65089+ curracl->filename, 0UL,
65090+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
65091+ return 1;
65092+ }
65093+
65094+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
65095+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
65096+
65097+ return 0;
65098+}
65099+
65100+int
65101+gr_acl_is_capable(const int cap)
65102+{
65103+ return gr_task_acl_is_capable(current, current_cred(), cap);
65104+}
65105+
65106+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
65107+{
65108+ struct acl_subject_label *curracl;
65109+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
65110+
65111+ if (!gr_acl_is_enabled())
65112+ return 1;
65113+
65114+ curracl = task->acl;
65115+
65116+ cap_drop = curracl->cap_lower;
65117+ cap_mask = curracl->cap_mask;
65118+
65119+ while ((curracl = curracl->parent_subject)) {
65120+ /* if the cap isn't specified in the current computed mask but is specified in the
65121+ current level subject, and is lowered in the current level subject, then add
65122+ it to the set of dropped capabilities
65123+ otherwise, add the current level subject's mask to the current computed mask
65124+ */
65125+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
65126+ cap_raise(cap_mask, cap);
65127+ if (cap_raised(curracl->cap_lower, cap))
65128+ cap_raise(cap_drop, cap);
65129+ }
65130+ }
65131+
65132+ if (!cap_raised(cap_drop, cap))
65133+ return 1;
65134+
65135+ return 0;
65136+}
65137+
65138+int
65139+gr_acl_is_capable_nolog(const int cap)
65140+{
65141+ return gr_task_acl_is_capable_nolog(current, cap);
65142+}
65143+
65144diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
65145new file mode 100644
65146index 0000000..a43dd06
65147--- /dev/null
65148+++ b/grsecurity/gracl_compat.c
65149@@ -0,0 +1,269 @@
65150+#include <linux/kernel.h>
65151+#include <linux/gracl.h>
65152+#include <linux/compat.h>
65153+#include <linux/gracl_compat.h>
65154+
65155+#include <asm/uaccess.h>
65156+
65157+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
65158+{
65159+ struct gr_arg_wrapper_compat uwrapcompat;
65160+
65161+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
65162+ return -EFAULT;
65163+
65164+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
65165+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
65166+ return -EINVAL;
65167+
65168+ uwrap->arg = compat_ptr(uwrapcompat.arg);
65169+ uwrap->version = uwrapcompat.version;
65170+ uwrap->size = sizeof(struct gr_arg);
65171+
65172+ return 0;
65173+}
65174+
65175+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
65176+{
65177+ struct gr_arg_compat argcompat;
65178+
65179+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
65180+ return -EFAULT;
65181+
65182+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
65183+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
65184+ arg->role_db.num_roles = argcompat.role_db.num_roles;
65185+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
65186+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
65187+ arg->role_db.num_objects = argcompat.role_db.num_objects;
65188+
65189+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
65190+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
65191+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
65192+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
65193+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
65194+ arg->segv_device = argcompat.segv_device;
65195+ arg->segv_inode = argcompat.segv_inode;
65196+ arg->segv_uid = argcompat.segv_uid;
65197+ arg->num_sprole_pws = argcompat.num_sprole_pws;
65198+ arg->mode = argcompat.mode;
65199+
65200+ return 0;
65201+}
65202+
65203+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
65204+{
65205+ struct acl_object_label_compat objcompat;
65206+
65207+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
65208+ return -EFAULT;
65209+
65210+ obj->filename = compat_ptr(objcompat.filename);
65211+ obj->inode = objcompat.inode;
65212+ obj->device = objcompat.device;
65213+ obj->mode = objcompat.mode;
65214+
65215+ obj->nested = compat_ptr(objcompat.nested);
65216+ obj->globbed = compat_ptr(objcompat.globbed);
65217+
65218+ obj->prev = compat_ptr(objcompat.prev);
65219+ obj->next = compat_ptr(objcompat.next);
65220+
65221+ return 0;
65222+}
65223+
65224+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
65225+{
65226+ unsigned int i;
65227+ struct acl_subject_label_compat subjcompat;
65228+
65229+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
65230+ return -EFAULT;
65231+
65232+ subj->filename = compat_ptr(subjcompat.filename);
65233+ subj->inode = subjcompat.inode;
65234+ subj->device = subjcompat.device;
65235+ subj->mode = subjcompat.mode;
65236+ subj->cap_mask = subjcompat.cap_mask;
65237+ subj->cap_lower = subjcompat.cap_lower;
65238+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
65239+
65240+ for (i = 0; i < GR_NLIMITS; i++) {
65241+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
65242+ subj->res[i].rlim_cur = RLIM_INFINITY;
65243+ else
65244+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
65245+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
65246+ subj->res[i].rlim_max = RLIM_INFINITY;
65247+ else
65248+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
65249+ }
65250+ subj->resmask = subjcompat.resmask;
65251+
65252+ subj->user_trans_type = subjcompat.user_trans_type;
65253+ subj->group_trans_type = subjcompat.group_trans_type;
65254+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
65255+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
65256+ subj->user_trans_num = subjcompat.user_trans_num;
65257+ subj->group_trans_num = subjcompat.group_trans_num;
65258+
65259+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
65260+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
65261+ subj->ip_type = subjcompat.ip_type;
65262+ subj->ips = compat_ptr(subjcompat.ips);
65263+ subj->ip_num = subjcompat.ip_num;
65264+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
65265+
65266+ subj->crashes = subjcompat.crashes;
65267+ subj->expires = subjcompat.expires;
65268+
65269+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
65270+ subj->hash = compat_ptr(subjcompat.hash);
65271+ subj->prev = compat_ptr(subjcompat.prev);
65272+ subj->next = compat_ptr(subjcompat.next);
65273+
65274+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
65275+ subj->obj_hash_size = subjcompat.obj_hash_size;
65276+ subj->pax_flags = subjcompat.pax_flags;
65277+
65278+ return 0;
65279+}
65280+
65281+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
65282+{
65283+ struct acl_role_label_compat rolecompat;
65284+
65285+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
65286+ return -EFAULT;
65287+
65288+ role->rolename = compat_ptr(rolecompat.rolename);
65289+ role->uidgid = rolecompat.uidgid;
65290+ role->roletype = rolecompat.roletype;
65291+
65292+ role->auth_attempts = rolecompat.auth_attempts;
65293+ role->expires = rolecompat.expires;
65294+
65295+ role->root_label = compat_ptr(rolecompat.root_label);
65296+ role->hash = compat_ptr(rolecompat.hash);
65297+
65298+ role->prev = compat_ptr(rolecompat.prev);
65299+ role->next = compat_ptr(rolecompat.next);
65300+
65301+ role->transitions = compat_ptr(rolecompat.transitions);
65302+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
65303+ role->domain_children = compat_ptr(rolecompat.domain_children);
65304+ role->domain_child_num = rolecompat.domain_child_num;
65305+
65306+ role->umask = rolecompat.umask;
65307+
65308+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
65309+ role->subj_hash_size = rolecompat.subj_hash_size;
65310+
65311+ return 0;
65312+}
65313+
65314+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
65315+{
65316+ struct role_allowed_ip_compat roleip_compat;
65317+
65318+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
65319+ return -EFAULT;
65320+
65321+ roleip->addr = roleip_compat.addr;
65322+ roleip->netmask = roleip_compat.netmask;
65323+
65324+ roleip->prev = compat_ptr(roleip_compat.prev);
65325+ roleip->next = compat_ptr(roleip_compat.next);
65326+
65327+ return 0;
65328+}
65329+
65330+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
65331+{
65332+ struct role_transition_compat trans_compat;
65333+
65334+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
65335+ return -EFAULT;
65336+
65337+ trans->rolename = compat_ptr(trans_compat.rolename);
65338+
65339+ trans->prev = compat_ptr(trans_compat.prev);
65340+ trans->next = compat_ptr(trans_compat.next);
65341+
65342+ return 0;
65343+
65344+}
65345+
65346+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
65347+{
65348+ struct gr_hash_struct_compat hash_compat;
65349+
65350+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
65351+ return -EFAULT;
65352+
65353+ hash->table = compat_ptr(hash_compat.table);
65354+ hash->nametable = compat_ptr(hash_compat.nametable);
65355+ hash->first = compat_ptr(hash_compat.first);
65356+
65357+ hash->table_size = hash_compat.table_size;
65358+ hash->used_size = hash_compat.used_size;
65359+
65360+ hash->type = hash_compat.type;
65361+
65362+ return 0;
65363+}
65364+
65365+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
65366+{
65367+ compat_uptr_t ptrcompat;
65368+
65369+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
65370+ return -EFAULT;
65371+
65372+ *(void **)ptr = compat_ptr(ptrcompat);
65373+
65374+ return 0;
65375+}
65376+
65377+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
65378+{
65379+ struct acl_ip_label_compat ip_compat;
65380+
65381+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
65382+ return -EFAULT;
65383+
65384+ ip->iface = compat_ptr(ip_compat.iface);
65385+ ip->addr = ip_compat.addr;
65386+ ip->netmask = ip_compat.netmask;
65387+ ip->low = ip_compat.low;
65388+ ip->high = ip_compat.high;
65389+ ip->mode = ip_compat.mode;
65390+ ip->type = ip_compat.type;
65391+
65392+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
65393+
65394+ ip->prev = compat_ptr(ip_compat.prev);
65395+ ip->next = compat_ptr(ip_compat.next);
65396+
65397+ return 0;
65398+}
65399+
65400+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
65401+{
65402+ struct sprole_pw_compat pw_compat;
65403+
65404+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
65405+ return -EFAULT;
65406+
65407+ pw->rolename = compat_ptr(pw_compat.rolename);
65408+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
65409+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
65410+
65411+ return 0;
65412+}
65413+
65414+size_t get_gr_arg_wrapper_size_compat(void)
65415+{
65416+ return sizeof(struct gr_arg_wrapper_compat);
65417+}
65418+
65419diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
65420new file mode 100644
65421index 0000000..a340c17
65422--- /dev/null
65423+++ b/grsecurity/gracl_fs.c
65424@@ -0,0 +1,431 @@
65425+#include <linux/kernel.h>
65426+#include <linux/sched.h>
65427+#include <linux/types.h>
65428+#include <linux/fs.h>
65429+#include <linux/file.h>
65430+#include <linux/stat.h>
65431+#include <linux/grsecurity.h>
65432+#include <linux/grinternal.h>
65433+#include <linux/gracl.h>
65434+
65435+umode_t
65436+gr_acl_umask(void)
65437+{
65438+ if (unlikely(!gr_acl_is_enabled()))
65439+ return 0;
65440+
65441+ return current->role->umask;
65442+}
65443+
65444+__u32
65445+gr_acl_handle_hidden_file(const struct dentry * dentry,
65446+ const struct vfsmount * mnt)
65447+{
65448+ __u32 mode;
65449+
65450+ if (unlikely(!dentry->d_inode))
65451+ return GR_FIND;
65452+
65453+ mode =
65454+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
65455+
65456+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
65457+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
65458+ return mode;
65459+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
65460+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
65461+ return 0;
65462+ } else if (unlikely(!(mode & GR_FIND)))
65463+ return 0;
65464+
65465+ return GR_FIND;
65466+}
65467+
65468+__u32
65469+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
65470+ int acc_mode)
65471+{
65472+ __u32 reqmode = GR_FIND;
65473+ __u32 mode;
65474+
65475+ if (unlikely(!dentry->d_inode))
65476+ return reqmode;
65477+
65478+ if (acc_mode & MAY_APPEND)
65479+ reqmode |= GR_APPEND;
65480+ else if (acc_mode & MAY_WRITE)
65481+ reqmode |= GR_WRITE;
65482+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
65483+ reqmode |= GR_READ;
65484+
65485+ mode =
65486+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
65487+ mnt);
65488+
65489+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
65490+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
65491+ reqmode & GR_READ ? " reading" : "",
65492+ reqmode & GR_WRITE ? " writing" : reqmode &
65493+ GR_APPEND ? " appending" : "");
65494+ return reqmode;
65495+ } else
65496+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
65497+ {
65498+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
65499+ reqmode & GR_READ ? " reading" : "",
65500+ reqmode & GR_WRITE ? " writing" : reqmode &
65501+ GR_APPEND ? " appending" : "");
65502+ return 0;
65503+ } else if (unlikely((mode & reqmode) != reqmode))
65504+ return 0;
65505+
65506+ return reqmode;
65507+}
65508+
65509+__u32
65510+gr_acl_handle_creat(const struct dentry * dentry,
65511+ const struct dentry * p_dentry,
65512+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
65513+ const int imode)
65514+{
65515+ __u32 reqmode = GR_WRITE | GR_CREATE;
65516+ __u32 mode;
65517+
65518+ if (acc_mode & MAY_APPEND)
65519+ reqmode |= GR_APPEND;
65520+ // if a directory was required or the directory already exists, then
65521+ // don't count this open as a read
65522+ if ((acc_mode & MAY_READ) &&
65523+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
65524+ reqmode |= GR_READ;
65525+ if ((open_flags & O_CREAT) &&
65526+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
65527+ reqmode |= GR_SETID;
65528+
65529+ mode =
65530+ gr_check_create(dentry, p_dentry, p_mnt,
65531+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
65532+
65533+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
65534+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
65535+ reqmode & GR_READ ? " reading" : "",
65536+ reqmode & GR_WRITE ? " writing" : reqmode &
65537+ GR_APPEND ? " appending" : "");
65538+ return reqmode;
65539+ } else
65540+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
65541+ {
65542+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
65543+ reqmode & GR_READ ? " reading" : "",
65544+ reqmode & GR_WRITE ? " writing" : reqmode &
65545+ GR_APPEND ? " appending" : "");
65546+ return 0;
65547+ } else if (unlikely((mode & reqmode) != reqmode))
65548+ return 0;
65549+
65550+ return reqmode;
65551+}
65552+
65553+__u32
65554+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
65555+ const int fmode)
65556+{
65557+ __u32 mode, reqmode = GR_FIND;
65558+
65559+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
65560+ reqmode |= GR_EXEC;
65561+ if (fmode & S_IWOTH)
65562+ reqmode |= GR_WRITE;
65563+ if (fmode & S_IROTH)
65564+ reqmode |= GR_READ;
65565+
65566+ mode =
65567+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
65568+ mnt);
65569+
65570+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
65571+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
65572+ reqmode & GR_READ ? " reading" : "",
65573+ reqmode & GR_WRITE ? " writing" : "",
65574+ reqmode & GR_EXEC ? " executing" : "");
65575+ return reqmode;
65576+ } else
65577+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
65578+ {
65579+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
65580+ reqmode & GR_READ ? " reading" : "",
65581+ reqmode & GR_WRITE ? " writing" : "",
65582+ reqmode & GR_EXEC ? " executing" : "");
65583+ return 0;
65584+ } else if (unlikely((mode & reqmode) != reqmode))
65585+ return 0;
65586+
65587+ return reqmode;
65588+}
65589+
65590+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
65591+{
65592+ __u32 mode;
65593+
65594+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
65595+
65596+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
65597+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
65598+ return mode;
65599+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
65600+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
65601+ return 0;
65602+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
65603+ return 0;
65604+
65605+ return (reqmode);
65606+}
65607+
65608+__u32
65609+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
65610+{
65611+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
65612+}
65613+
65614+__u32
65615+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
65616+{
65617+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
65618+}
65619+
65620+__u32
65621+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
65622+{
65623+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
65624+}
65625+
65626+__u32
65627+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
65628+{
65629+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
65630+}
65631+
65632+__u32
65633+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
65634+ umode_t *modeptr)
65635+{
65636+ umode_t mode;
65637+
65638+ *modeptr &= ~gr_acl_umask();
65639+ mode = *modeptr;
65640+
65641+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
65642+ return 1;
65643+
65644+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
65645+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
65646+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
65647+ GR_CHMOD_ACL_MSG);
65648+ } else {
65649+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
65650+ }
65651+}
65652+
65653+__u32
65654+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
65655+{
65656+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
65657+}
65658+
65659+__u32
65660+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
65661+{
65662+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
65663+}
65664+
65665+__u32
65666+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
65667+{
65668+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
65669+}
65670+
65671+__u32
65672+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
65673+{
65674+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
65675+ GR_UNIXCONNECT_ACL_MSG);
65676+}
65677+
65678+/* hardlinks require at minimum create and link permission,
65679+ any additional privilege required is based on the
65680+ privilege of the file being linked to
65681+*/
65682+__u32
65683+gr_acl_handle_link(const struct dentry * new_dentry,
65684+ const struct dentry * parent_dentry,
65685+ const struct vfsmount * parent_mnt,
65686+ const struct dentry * old_dentry,
65687+ const struct vfsmount * old_mnt, const struct filename *to)
65688+{
65689+ __u32 mode;
65690+ __u32 needmode = GR_CREATE | GR_LINK;
65691+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
65692+
65693+ mode =
65694+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
65695+ old_mnt);
65696+
65697+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
65698+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
65699+ return mode;
65700+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
65701+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
65702+ return 0;
65703+ } else if (unlikely((mode & needmode) != needmode))
65704+ return 0;
65705+
65706+ return 1;
65707+}
65708+
65709+__u32
65710+gr_acl_handle_symlink(const struct dentry * new_dentry,
65711+ const struct dentry * parent_dentry,
65712+ const struct vfsmount * parent_mnt, const struct filename *from)
65713+{
65714+ __u32 needmode = GR_WRITE | GR_CREATE;
65715+ __u32 mode;
65716+
65717+ mode =
65718+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
65719+ GR_CREATE | GR_AUDIT_CREATE |
65720+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
65721+
65722+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
65723+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
65724+ return mode;
65725+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
65726+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
65727+ return 0;
65728+ } else if (unlikely((mode & needmode) != needmode))
65729+ return 0;
65730+
65731+ return (GR_WRITE | GR_CREATE);
65732+}
65733+
65734+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
65735+{
65736+ __u32 mode;
65737+
65738+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
65739+
65740+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
65741+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
65742+ return mode;
65743+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
65744+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
65745+ return 0;
65746+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
65747+ return 0;
65748+
65749+ return (reqmode);
65750+}
65751+
65752+__u32
65753+gr_acl_handle_mknod(const struct dentry * new_dentry,
65754+ const struct dentry * parent_dentry,
65755+ const struct vfsmount * parent_mnt,
65756+ const int mode)
65757+{
65758+ __u32 reqmode = GR_WRITE | GR_CREATE;
65759+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
65760+ reqmode |= GR_SETID;
65761+
65762+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
65763+ reqmode, GR_MKNOD_ACL_MSG);
65764+}
65765+
65766+__u32
65767+gr_acl_handle_mkdir(const struct dentry *new_dentry,
65768+ const struct dentry *parent_dentry,
65769+ const struct vfsmount *parent_mnt)
65770+{
65771+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
65772+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
65773+}
65774+
65775+#define RENAME_CHECK_SUCCESS(old, new) \
65776+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
65777+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
65778+
65779+int
65780+gr_acl_handle_rename(struct dentry *new_dentry,
65781+ struct dentry *parent_dentry,
65782+ const struct vfsmount *parent_mnt,
65783+ struct dentry *old_dentry,
65784+ struct inode *old_parent_inode,
65785+ struct vfsmount *old_mnt, const struct filename *newname)
65786+{
65787+ __u32 comp1, comp2;
65788+ int error = 0;
65789+
65790+ if (unlikely(!gr_acl_is_enabled()))
65791+ return 0;
65792+
65793+ if (!new_dentry->d_inode) {
65794+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
65795+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
65796+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
65797+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
65798+ GR_DELETE | GR_AUDIT_DELETE |
65799+ GR_AUDIT_READ | GR_AUDIT_WRITE |
65800+ GR_SUPPRESS, old_mnt);
65801+ } else {
65802+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
65803+ GR_CREATE | GR_DELETE |
65804+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
65805+ GR_AUDIT_READ | GR_AUDIT_WRITE |
65806+ GR_SUPPRESS, parent_mnt);
65807+ comp2 =
65808+ gr_search_file(old_dentry,
65809+ GR_READ | GR_WRITE | GR_AUDIT_READ |
65810+ GR_DELETE | GR_AUDIT_DELETE |
65811+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
65812+ }
65813+
65814+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
65815+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
65816+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
65817+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
65818+ && !(comp2 & GR_SUPPRESS)) {
65819+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
65820+ error = -EACCES;
65821+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
65822+ error = -EACCES;
65823+
65824+ return error;
65825+}
65826+
65827+void
65828+gr_acl_handle_exit(void)
65829+{
65830+ u16 id;
65831+ char *rolename;
65832+
65833+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
65834+ !(current->role->roletype & GR_ROLE_PERSIST))) {
65835+ id = current->acl_role_id;
65836+ rolename = current->role->rolename;
65837+ gr_set_acls(1);
65838+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
65839+ }
65840+
65841+ gr_put_exec_file(current);
65842+ return;
65843+}
65844+
65845+int
65846+gr_acl_handle_procpidmem(const struct task_struct *task)
65847+{
65848+ if (unlikely(!gr_acl_is_enabled()))
65849+ return 0;
65850+
65851+ if (task != current && task->acl->mode & GR_PROTPROCFD)
65852+ return -EACCES;
65853+
65854+ return 0;
65855+}
65856diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
65857new file mode 100644
65858index 0000000..8132048
65859--- /dev/null
65860+++ b/grsecurity/gracl_ip.c
65861@@ -0,0 +1,387 @@
65862+#include <linux/kernel.h>
65863+#include <asm/uaccess.h>
65864+#include <asm/errno.h>
65865+#include <net/sock.h>
65866+#include <linux/file.h>
65867+#include <linux/fs.h>
65868+#include <linux/net.h>
65869+#include <linux/in.h>
65870+#include <linux/skbuff.h>
65871+#include <linux/ip.h>
65872+#include <linux/udp.h>
65873+#include <linux/types.h>
65874+#include <linux/sched.h>
65875+#include <linux/netdevice.h>
65876+#include <linux/inetdevice.h>
65877+#include <linux/gracl.h>
65878+#include <linux/grsecurity.h>
65879+#include <linux/grinternal.h>
65880+
65881+#define GR_BIND 0x01
65882+#define GR_CONNECT 0x02
65883+#define GR_INVERT 0x04
65884+#define GR_BINDOVERRIDE 0x08
65885+#define GR_CONNECTOVERRIDE 0x10
65886+#define GR_SOCK_FAMILY 0x20
65887+
65888+static const char * gr_protocols[IPPROTO_MAX] = {
65889+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
65890+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
65891+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
65892+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
65893+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
65894+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
65895+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
65896+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
65897+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
65898+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
65899+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
65900+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
65901+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
65902+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
65903+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
65904+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
65905+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
65906+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
65907+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
65908+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
65909+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
65910+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
65911+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
65912+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
65913+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
65914+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
65915+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
65916+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
65917+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
65918+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
65919+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
65920+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
65921+ };
65922+
65923+static const char * gr_socktypes[SOCK_MAX] = {
65924+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
65925+ "unknown:7", "unknown:8", "unknown:9", "packet"
65926+ };
65927+
65928+static const char * gr_sockfamilies[AF_MAX+1] = {
65929+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
65930+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
65931+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
65932+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
65933+ };
65934+
65935+const char *
65936+gr_proto_to_name(unsigned char proto)
65937+{
65938+ return gr_protocols[proto];
65939+}
65940+
65941+const char *
65942+gr_socktype_to_name(unsigned char type)
65943+{
65944+ return gr_socktypes[type];
65945+}
65946+
65947+const char *
65948+gr_sockfamily_to_name(unsigned char family)
65949+{
65950+ return gr_sockfamilies[family];
65951+}
65952+
65953+int
65954+gr_search_socket(const int domain, const int type, const int protocol)
65955+{
65956+ struct acl_subject_label *curr;
65957+ const struct cred *cred = current_cred();
65958+
65959+ if (unlikely(!gr_acl_is_enabled()))
65960+ goto exit;
65961+
65962+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
65963+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
65964+ goto exit; // let the kernel handle it
65965+
65966+ curr = current->acl;
65967+
65968+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
65969+ /* the family is allowed, if this is PF_INET allow it only if
65970+ the extra sock type/protocol checks pass */
65971+ if (domain == PF_INET)
65972+ goto inet_check;
65973+ goto exit;
65974+ } else {
65975+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
65976+ __u32 fakeip = 0;
65977+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
65978+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
65979+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
65980+ gr_to_filename(current->exec_file->f_path.dentry,
65981+ current->exec_file->f_path.mnt) :
65982+ curr->filename, curr->filename,
65983+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
65984+ &current->signal->saved_ip);
65985+ goto exit;
65986+ }
65987+ goto exit_fail;
65988+ }
65989+
65990+inet_check:
65991+ /* the rest of this checking is for IPv4 only */
65992+ if (!curr->ips)
65993+ goto exit;
65994+
65995+ if ((curr->ip_type & (1U << type)) &&
65996+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
65997+ goto exit;
65998+
65999+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
66000+ /* we don't place acls on raw sockets , and sometimes
66001+ dgram/ip sockets are opened for ioctl and not
66002+ bind/connect, so we'll fake a bind learn log */
66003+ if (type == SOCK_RAW || type == SOCK_PACKET) {
66004+ __u32 fakeip = 0;
66005+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
66006+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
66007+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
66008+ gr_to_filename(current->exec_file->f_path.dentry,
66009+ current->exec_file->f_path.mnt) :
66010+ curr->filename, curr->filename,
66011+ &fakeip, 0, type,
66012+ protocol, GR_CONNECT, &current->signal->saved_ip);
66013+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
66014+ __u32 fakeip = 0;
66015+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
66016+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
66017+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
66018+ gr_to_filename(current->exec_file->f_path.dentry,
66019+ current->exec_file->f_path.mnt) :
66020+ curr->filename, curr->filename,
66021+ &fakeip, 0, type,
66022+ protocol, GR_BIND, &current->signal->saved_ip);
66023+ }
66024+ /* we'll log when they use connect or bind */
66025+ goto exit;
66026+ }
66027+
66028+exit_fail:
66029+ if (domain == PF_INET)
66030+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
66031+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
66032+ else
66033+#ifndef CONFIG_IPV6
66034+ if (domain != PF_INET6)
66035+#endif
66036+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
66037+ gr_socktype_to_name(type), protocol);
66038+
66039+ return 0;
66040+exit:
66041+ return 1;
66042+}
66043+
66044+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
66045+{
66046+ if ((ip->mode & mode) &&
66047+ (ip_port >= ip->low) &&
66048+ (ip_port <= ip->high) &&
66049+ ((ntohl(ip_addr) & our_netmask) ==
66050+ (ntohl(our_addr) & our_netmask))
66051+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
66052+ && (ip->type & (1U << type))) {
66053+ if (ip->mode & GR_INVERT)
66054+ return 2; // specifically denied
66055+ else
66056+ return 1; // allowed
66057+ }
66058+
66059+ return 0; // not specifically allowed, may continue parsing
66060+}
66061+
66062+static int
66063+gr_search_connectbind(const int full_mode, struct sock *sk,
66064+ struct sockaddr_in *addr, const int type)
66065+{
66066+ char iface[IFNAMSIZ] = {0};
66067+ struct acl_subject_label *curr;
66068+ struct acl_ip_label *ip;
66069+ struct inet_sock *isk;
66070+ struct net_device *dev;
66071+ struct in_device *idev;
66072+ unsigned long i;
66073+ int ret;
66074+ int mode = full_mode & (GR_BIND | GR_CONNECT);
66075+ __u32 ip_addr = 0;
66076+ __u32 our_addr;
66077+ __u32 our_netmask;
66078+ char *p;
66079+ __u16 ip_port = 0;
66080+ const struct cred *cred = current_cred();
66081+
66082+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
66083+ return 0;
66084+
66085+ curr = current->acl;
66086+ isk = inet_sk(sk);
66087+
66088+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
66089+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
66090+ addr->sin_addr.s_addr = curr->inaddr_any_override;
66091+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
66092+ struct sockaddr_in saddr;
66093+ int err;
66094+
66095+ saddr.sin_family = AF_INET;
66096+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
66097+ saddr.sin_port = isk->inet_sport;
66098+
66099+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
66100+ if (err)
66101+ return err;
66102+
66103+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
66104+ if (err)
66105+ return err;
66106+ }
66107+
66108+ if (!curr->ips)
66109+ return 0;
66110+
66111+ ip_addr = addr->sin_addr.s_addr;
66112+ ip_port = ntohs(addr->sin_port);
66113+
66114+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
66115+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
66116+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
66117+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
66118+ gr_to_filename(current->exec_file->f_path.dentry,
66119+ current->exec_file->f_path.mnt) :
66120+ curr->filename, curr->filename,
66121+ &ip_addr, ip_port, type,
66122+ sk->sk_protocol, mode, &current->signal->saved_ip);
66123+ return 0;
66124+ }
66125+
66126+ for (i = 0; i < curr->ip_num; i++) {
66127+ ip = *(curr->ips + i);
66128+ if (ip->iface != NULL) {
66129+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
66130+ p = strchr(iface, ':');
66131+ if (p != NULL)
66132+ *p = '\0';
66133+ dev = dev_get_by_name(sock_net(sk), iface);
66134+ if (dev == NULL)
66135+ continue;
66136+ idev = in_dev_get(dev);
66137+ if (idev == NULL) {
66138+ dev_put(dev);
66139+ continue;
66140+ }
66141+ rcu_read_lock();
66142+ for_ifa(idev) {
66143+ if (!strcmp(ip->iface, ifa->ifa_label)) {
66144+ our_addr = ifa->ifa_address;
66145+ our_netmask = 0xffffffff;
66146+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
66147+ if (ret == 1) {
66148+ rcu_read_unlock();
66149+ in_dev_put(idev);
66150+ dev_put(dev);
66151+ return 0;
66152+ } else if (ret == 2) {
66153+ rcu_read_unlock();
66154+ in_dev_put(idev);
66155+ dev_put(dev);
66156+ goto denied;
66157+ }
66158+ }
66159+ } endfor_ifa(idev);
66160+ rcu_read_unlock();
66161+ in_dev_put(idev);
66162+ dev_put(dev);
66163+ } else {
66164+ our_addr = ip->addr;
66165+ our_netmask = ip->netmask;
66166+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
66167+ if (ret == 1)
66168+ return 0;
66169+ else if (ret == 2)
66170+ goto denied;
66171+ }
66172+ }
66173+
66174+denied:
66175+ if (mode == GR_BIND)
66176+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
66177+ else if (mode == GR_CONNECT)
66178+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
66179+
66180+ return -EACCES;
66181+}
66182+
66183+int
66184+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
66185+{
66186+ /* always allow disconnection of dgram sockets with connect */
66187+ if (addr->sin_family == AF_UNSPEC)
66188+ return 0;
66189+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
66190+}
66191+
66192+int
66193+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
66194+{
66195+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
66196+}
66197+
66198+int gr_search_listen(struct socket *sock)
66199+{
66200+ struct sock *sk = sock->sk;
66201+ struct sockaddr_in addr;
66202+
66203+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
66204+ addr.sin_port = inet_sk(sk)->inet_sport;
66205+
66206+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
66207+}
66208+
66209+int gr_search_accept(struct socket *sock)
66210+{
66211+ struct sock *sk = sock->sk;
66212+ struct sockaddr_in addr;
66213+
66214+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
66215+ addr.sin_port = inet_sk(sk)->inet_sport;
66216+
66217+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
66218+}
66219+
66220+int
66221+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
66222+{
66223+ if (addr)
66224+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
66225+ else {
66226+ struct sockaddr_in sin;
66227+ const struct inet_sock *inet = inet_sk(sk);
66228+
66229+ sin.sin_addr.s_addr = inet->inet_daddr;
66230+ sin.sin_port = inet->inet_dport;
66231+
66232+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
66233+ }
66234+}
66235+
66236+int
66237+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
66238+{
66239+ struct sockaddr_in sin;
66240+
66241+ if (unlikely(skb->len < sizeof (struct udphdr)))
66242+ return 0; // skip this packet
66243+
66244+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
66245+ sin.sin_port = udp_hdr(skb)->source;
66246+
66247+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
66248+}
66249diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
66250new file mode 100644
66251index 0000000..25f54ef
66252--- /dev/null
66253+++ b/grsecurity/gracl_learn.c
66254@@ -0,0 +1,207 @@
66255+#include <linux/kernel.h>
66256+#include <linux/mm.h>
66257+#include <linux/sched.h>
66258+#include <linux/poll.h>
66259+#include <linux/string.h>
66260+#include <linux/file.h>
66261+#include <linux/types.h>
66262+#include <linux/vmalloc.h>
66263+#include <linux/grinternal.h>
66264+
66265+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
66266+ size_t count, loff_t *ppos);
66267+extern int gr_acl_is_enabled(void);
66268+
66269+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
66270+static int gr_learn_attached;
66271+
66272+/* use a 512k buffer */
66273+#define LEARN_BUFFER_SIZE (512 * 1024)
66274+
66275+static DEFINE_SPINLOCK(gr_learn_lock);
66276+static DEFINE_MUTEX(gr_learn_user_mutex);
66277+
66278+/* we need to maintain two buffers, so that the kernel context of grlearn
66279+ uses a semaphore around the userspace copying, and the other kernel contexts
66280+ use a spinlock when copying into the buffer, since they cannot sleep
66281+*/
66282+static char *learn_buffer;
66283+static char *learn_buffer_user;
66284+static int learn_buffer_len;
66285+static int learn_buffer_user_len;
66286+
66287+static ssize_t
66288+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
66289+{
66290+ DECLARE_WAITQUEUE(wait, current);
66291+ ssize_t retval = 0;
66292+
66293+ add_wait_queue(&learn_wait, &wait);
66294+ set_current_state(TASK_INTERRUPTIBLE);
66295+ do {
66296+ mutex_lock(&gr_learn_user_mutex);
66297+ spin_lock(&gr_learn_lock);
66298+ if (learn_buffer_len)
66299+ break;
66300+ spin_unlock(&gr_learn_lock);
66301+ mutex_unlock(&gr_learn_user_mutex);
66302+ if (file->f_flags & O_NONBLOCK) {
66303+ retval = -EAGAIN;
66304+ goto out;
66305+ }
66306+ if (signal_pending(current)) {
66307+ retval = -ERESTARTSYS;
66308+ goto out;
66309+ }
66310+
66311+ schedule();
66312+ } while (1);
66313+
66314+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
66315+ learn_buffer_user_len = learn_buffer_len;
66316+ retval = learn_buffer_len;
66317+ learn_buffer_len = 0;
66318+
66319+ spin_unlock(&gr_learn_lock);
66320+
66321+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
66322+ retval = -EFAULT;
66323+
66324+ mutex_unlock(&gr_learn_user_mutex);
66325+out:
66326+ set_current_state(TASK_RUNNING);
66327+ remove_wait_queue(&learn_wait, &wait);
66328+ return retval;
66329+}
66330+
66331+static unsigned int
66332+poll_learn(struct file * file, poll_table * wait)
66333+{
66334+ poll_wait(file, &learn_wait, wait);
66335+
66336+ if (learn_buffer_len)
66337+ return (POLLIN | POLLRDNORM);
66338+
66339+ return 0;
66340+}
66341+
66342+void
66343+gr_clear_learn_entries(void)
66344+{
66345+ char *tmp;
66346+
66347+ mutex_lock(&gr_learn_user_mutex);
66348+ spin_lock(&gr_learn_lock);
66349+ tmp = learn_buffer;
66350+ learn_buffer = NULL;
66351+ spin_unlock(&gr_learn_lock);
66352+ if (tmp)
66353+ vfree(tmp);
66354+ if (learn_buffer_user != NULL) {
66355+ vfree(learn_buffer_user);
66356+ learn_buffer_user = NULL;
66357+ }
66358+ learn_buffer_len = 0;
66359+ mutex_unlock(&gr_learn_user_mutex);
66360+
66361+ return;
66362+}
66363+
66364+void
66365+gr_add_learn_entry(const char *fmt, ...)
66366+{
66367+ va_list args;
66368+ unsigned int len;
66369+
66370+ if (!gr_learn_attached)
66371+ return;
66372+
66373+ spin_lock(&gr_learn_lock);
66374+
66375+ /* leave a gap at the end so we know when it's "full" but don't have to
66376+ compute the exact length of the string we're trying to append
66377+ */
66378+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
66379+ spin_unlock(&gr_learn_lock);
66380+ wake_up_interruptible(&learn_wait);
66381+ return;
66382+ }
66383+ if (learn_buffer == NULL) {
66384+ spin_unlock(&gr_learn_lock);
66385+ return;
66386+ }
66387+
66388+ va_start(args, fmt);
66389+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
66390+ va_end(args);
66391+
66392+ learn_buffer_len += len + 1;
66393+
66394+ spin_unlock(&gr_learn_lock);
66395+ wake_up_interruptible(&learn_wait);
66396+
66397+ return;
66398+}
66399+
66400+static int
66401+open_learn(struct inode *inode, struct file *file)
66402+{
66403+ if (file->f_mode & FMODE_READ && gr_learn_attached)
66404+ return -EBUSY;
66405+ if (file->f_mode & FMODE_READ) {
66406+ int retval = 0;
66407+ mutex_lock(&gr_learn_user_mutex);
66408+ if (learn_buffer == NULL)
66409+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
66410+ if (learn_buffer_user == NULL)
66411+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
66412+ if (learn_buffer == NULL) {
66413+ retval = -ENOMEM;
66414+ goto out_error;
66415+ }
66416+ if (learn_buffer_user == NULL) {
66417+ retval = -ENOMEM;
66418+ goto out_error;
66419+ }
66420+ learn_buffer_len = 0;
66421+ learn_buffer_user_len = 0;
66422+ gr_learn_attached = 1;
66423+out_error:
66424+ mutex_unlock(&gr_learn_user_mutex);
66425+ return retval;
66426+ }
66427+ return 0;
66428+}
66429+
66430+static int
66431+close_learn(struct inode *inode, struct file *file)
66432+{
66433+ if (file->f_mode & FMODE_READ) {
66434+ char *tmp = NULL;
66435+ mutex_lock(&gr_learn_user_mutex);
66436+ spin_lock(&gr_learn_lock);
66437+ tmp = learn_buffer;
66438+ learn_buffer = NULL;
66439+ spin_unlock(&gr_learn_lock);
66440+ if (tmp)
66441+ vfree(tmp);
66442+ if (learn_buffer_user != NULL) {
66443+ vfree(learn_buffer_user);
66444+ learn_buffer_user = NULL;
66445+ }
66446+ learn_buffer_len = 0;
66447+ learn_buffer_user_len = 0;
66448+ gr_learn_attached = 0;
66449+ mutex_unlock(&gr_learn_user_mutex);
66450+ }
66451+
66452+ return 0;
66453+}
66454+
66455+const struct file_operations grsec_fops = {
66456+ .read = read_learn,
66457+ .write = write_grsec_handler,
66458+ .open = open_learn,
66459+ .release = close_learn,
66460+ .poll = poll_learn,
66461+};
66462diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
66463new file mode 100644
66464index 0000000..39645c9
66465--- /dev/null
66466+++ b/grsecurity/gracl_res.c
66467@@ -0,0 +1,68 @@
66468+#include <linux/kernel.h>
66469+#include <linux/sched.h>
66470+#include <linux/gracl.h>
66471+#include <linux/grinternal.h>
66472+
66473+static const char *restab_log[] = {
66474+ [RLIMIT_CPU] = "RLIMIT_CPU",
66475+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
66476+ [RLIMIT_DATA] = "RLIMIT_DATA",
66477+ [RLIMIT_STACK] = "RLIMIT_STACK",
66478+ [RLIMIT_CORE] = "RLIMIT_CORE",
66479+ [RLIMIT_RSS] = "RLIMIT_RSS",
66480+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
66481+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
66482+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
66483+ [RLIMIT_AS] = "RLIMIT_AS",
66484+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
66485+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
66486+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
66487+ [RLIMIT_NICE] = "RLIMIT_NICE",
66488+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
66489+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
66490+ [GR_CRASH_RES] = "RLIMIT_CRASH"
66491+};
66492+
66493+void
66494+gr_log_resource(const struct task_struct *task,
66495+ const int res, const unsigned long wanted, const int gt)
66496+{
66497+ const struct cred *cred;
66498+ unsigned long rlim;
66499+
66500+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
66501+ return;
66502+
66503+ // not yet supported resource
66504+ if (unlikely(!restab_log[res]))
66505+ return;
66506+
66507+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
66508+ rlim = task_rlimit_max(task, res);
66509+ else
66510+ rlim = task_rlimit(task, res);
66511+
66512+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
66513+ return;
66514+
66515+ rcu_read_lock();
66516+ cred = __task_cred(task);
66517+
66518+ if (res == RLIMIT_NPROC &&
66519+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
66520+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
66521+ goto out_rcu_unlock;
66522+ else if (res == RLIMIT_MEMLOCK &&
66523+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
66524+ goto out_rcu_unlock;
66525+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
66526+ goto out_rcu_unlock;
66527+ rcu_read_unlock();
66528+
66529+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
66530+
66531+ return;
66532+out_rcu_unlock:
66533+ rcu_read_unlock();
66534+ return;
66535+}
66536diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
66537new file mode 100644
66538index 0000000..3c38bfe
66539--- /dev/null
66540+++ b/grsecurity/gracl_segv.c
66541@@ -0,0 +1,305 @@
66542+#include <linux/kernel.h>
66543+#include <linux/mm.h>
66544+#include <asm/uaccess.h>
66545+#include <asm/errno.h>
66546+#include <asm/mman.h>
66547+#include <net/sock.h>
66548+#include <linux/file.h>
66549+#include <linux/fs.h>
66550+#include <linux/net.h>
66551+#include <linux/in.h>
66552+#include <linux/slab.h>
66553+#include <linux/types.h>
66554+#include <linux/sched.h>
66555+#include <linux/timer.h>
66556+#include <linux/gracl.h>
66557+#include <linux/grsecurity.h>
66558+#include <linux/grinternal.h>
66559+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66560+#include <linux/magic.h>
66561+#include <linux/pagemap.h>
66562+#include "../fs/btrfs/async-thread.h"
66563+#include "../fs/btrfs/ctree.h"
66564+#include "../fs/btrfs/btrfs_inode.h"
66565+#endif
66566+
66567+static struct crash_uid *uid_set;
66568+static unsigned short uid_used;
66569+static DEFINE_SPINLOCK(gr_uid_lock);
66570+extern rwlock_t gr_inode_lock;
66571+extern struct acl_subject_label *
66572+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
66573+ struct acl_role_label *role);
66574+
66575+static inline dev_t __get_dev(const struct dentry *dentry)
66576+{
66577+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66578+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
66579+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
66580+ else
66581+#endif
66582+ return dentry->d_sb->s_dev;
66583+}
66584+
66585+int
66586+gr_init_uidset(void)
66587+{
66588+ uid_set =
66589+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
66590+ uid_used = 0;
66591+
66592+ return uid_set ? 1 : 0;
66593+}
66594+
66595+void
66596+gr_free_uidset(void)
66597+{
66598+ if (uid_set)
66599+ kfree(uid_set);
66600+
66601+ return;
66602+}
66603+
66604+int
66605+gr_find_uid(const uid_t uid)
66606+{
66607+ struct crash_uid *tmp = uid_set;
66608+ uid_t buid;
66609+ int low = 0, high = uid_used - 1, mid;
66610+
66611+ while (high >= low) {
66612+ mid = (low + high) >> 1;
66613+ buid = tmp[mid].uid;
66614+ if (buid == uid)
66615+ return mid;
66616+ if (buid > uid)
66617+ high = mid - 1;
66618+ if (buid < uid)
66619+ low = mid + 1;
66620+ }
66621+
66622+ return -1;
66623+}
66624+
66625+static __inline__ void
66626+gr_insertsort(void)
66627+{
66628+ unsigned short i, j;
66629+ struct crash_uid index;
66630+
66631+ for (i = 1; i < uid_used; i++) {
66632+ index = uid_set[i];
66633+ j = i;
66634+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
66635+ uid_set[j] = uid_set[j - 1];
66636+ j--;
66637+ }
66638+ uid_set[j] = index;
66639+ }
66640+
66641+ return;
66642+}
66643+
66644+static __inline__ void
66645+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
66646+{
66647+ int loc;
66648+ uid_t uid = GR_GLOBAL_UID(kuid);
66649+
66650+ if (uid_used == GR_UIDTABLE_MAX)
66651+ return;
66652+
66653+ loc = gr_find_uid(uid);
66654+
66655+ if (loc >= 0) {
66656+ uid_set[loc].expires = expires;
66657+ return;
66658+ }
66659+
66660+ uid_set[uid_used].uid = uid;
66661+ uid_set[uid_used].expires = expires;
66662+ uid_used++;
66663+
66664+ gr_insertsort();
66665+
66666+ return;
66667+}
66668+
66669+void
66670+gr_remove_uid(const unsigned short loc)
66671+{
66672+ unsigned short i;
66673+
66674+ for (i = loc + 1; i < uid_used; i++)
66675+ uid_set[i - 1] = uid_set[i];
66676+
66677+ uid_used--;
66678+
66679+ return;
66680+}
66681+
66682+int
66683+gr_check_crash_uid(const kuid_t kuid)
66684+{
66685+ int loc;
66686+ int ret = 0;
66687+ uid_t uid;
66688+
66689+ if (unlikely(!gr_acl_is_enabled()))
66690+ return 0;
66691+
66692+ uid = GR_GLOBAL_UID(kuid);
66693+
66694+ spin_lock(&gr_uid_lock);
66695+ loc = gr_find_uid(uid);
66696+
66697+ if (loc < 0)
66698+ goto out_unlock;
66699+
66700+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
66701+ gr_remove_uid(loc);
66702+ else
66703+ ret = 1;
66704+
66705+out_unlock:
66706+ spin_unlock(&gr_uid_lock);
66707+ return ret;
66708+}
66709+
66710+static __inline__ int
66711+proc_is_setxid(const struct cred *cred)
66712+{
66713+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
66714+ !uid_eq(cred->uid, cred->fsuid))
66715+ return 1;
66716+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
66717+ !gid_eq(cred->gid, cred->fsgid))
66718+ return 1;
66719+
66720+ return 0;
66721+}
66722+
66723+extern int gr_fake_force_sig(int sig, struct task_struct *t);
66724+
66725+void
66726+gr_handle_crash(struct task_struct *task, const int sig)
66727+{
66728+ struct acl_subject_label *curr;
66729+ struct task_struct *tsk, *tsk2;
66730+ const struct cred *cred;
66731+ const struct cred *cred2;
66732+
66733+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
66734+ return;
66735+
66736+ if (unlikely(!gr_acl_is_enabled()))
66737+ return;
66738+
66739+ curr = task->acl;
66740+
66741+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
66742+ return;
66743+
66744+ if (time_before_eq(curr->expires, get_seconds())) {
66745+ curr->expires = 0;
66746+ curr->crashes = 0;
66747+ }
66748+
66749+ curr->crashes++;
66750+
66751+ if (!curr->expires)
66752+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
66753+
66754+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
66755+ time_after(curr->expires, get_seconds())) {
66756+ rcu_read_lock();
66757+ cred = __task_cred(task);
66758+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
66759+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
66760+ spin_lock(&gr_uid_lock);
66761+ gr_insert_uid(cred->uid, curr->expires);
66762+ spin_unlock(&gr_uid_lock);
66763+ curr->expires = 0;
66764+ curr->crashes = 0;
66765+ read_lock(&tasklist_lock);
66766+ do_each_thread(tsk2, tsk) {
66767+ cred2 = __task_cred(tsk);
66768+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
66769+ gr_fake_force_sig(SIGKILL, tsk);
66770+ } while_each_thread(tsk2, tsk);
66771+ read_unlock(&tasklist_lock);
66772+ } else {
66773+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
66774+ read_lock(&tasklist_lock);
66775+ read_lock(&grsec_exec_file_lock);
66776+ do_each_thread(tsk2, tsk) {
66777+ if (likely(tsk != task)) {
66778+ // if this thread has the same subject as the one that triggered
66779+ // RES_CRASH and it's the same binary, kill it
66780+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
66781+ gr_fake_force_sig(SIGKILL, tsk);
66782+ }
66783+ } while_each_thread(tsk2, tsk);
66784+ read_unlock(&grsec_exec_file_lock);
66785+ read_unlock(&tasklist_lock);
66786+ }
66787+ rcu_read_unlock();
66788+ }
66789+
66790+ return;
66791+}
66792+
66793+int
66794+gr_check_crash_exec(const struct file *filp)
66795+{
66796+ struct acl_subject_label *curr;
66797+
66798+ if (unlikely(!gr_acl_is_enabled()))
66799+ return 0;
66800+
66801+ read_lock(&gr_inode_lock);
66802+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
66803+ __get_dev(filp->f_path.dentry),
66804+ current->role);
66805+ read_unlock(&gr_inode_lock);
66806+
66807+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
66808+ (!curr->crashes && !curr->expires))
66809+ return 0;
66810+
66811+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
66812+ time_after(curr->expires, get_seconds()))
66813+ return 1;
66814+ else if (time_before_eq(curr->expires, get_seconds())) {
66815+ curr->crashes = 0;
66816+ curr->expires = 0;
66817+ }
66818+
66819+ return 0;
66820+}
66821+
66822+void
66823+gr_handle_alertkill(struct task_struct *task)
66824+{
66825+ struct acl_subject_label *curracl;
66826+ __u32 curr_ip;
66827+ struct task_struct *p, *p2;
66828+
66829+ if (unlikely(!gr_acl_is_enabled()))
66830+ return;
66831+
66832+ curracl = task->acl;
66833+ curr_ip = task->signal->curr_ip;
66834+
66835+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
66836+ read_lock(&tasklist_lock);
66837+ do_each_thread(p2, p) {
66838+ if (p->signal->curr_ip == curr_ip)
66839+ gr_fake_force_sig(SIGKILL, p);
66840+ } while_each_thread(p2, p);
66841+ read_unlock(&tasklist_lock);
66842+ } else if (curracl->mode & GR_KILLPROC)
66843+ gr_fake_force_sig(SIGKILL, task);
66844+
66845+ return;
66846+}
66847diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
66848new file mode 100644
66849index 0000000..98011b0
66850--- /dev/null
66851+++ b/grsecurity/gracl_shm.c
66852@@ -0,0 +1,40 @@
66853+#include <linux/kernel.h>
66854+#include <linux/mm.h>
66855+#include <linux/sched.h>
66856+#include <linux/file.h>
66857+#include <linux/ipc.h>
66858+#include <linux/gracl.h>
66859+#include <linux/grsecurity.h>
66860+#include <linux/grinternal.h>
66861+
66862+int
66863+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66864+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
66865+{
66866+ struct task_struct *task;
66867+
66868+ if (!gr_acl_is_enabled())
66869+ return 1;
66870+
66871+ rcu_read_lock();
66872+ read_lock(&tasklist_lock);
66873+
66874+ task = find_task_by_vpid(shm_cprid);
66875+
66876+ if (unlikely(!task))
66877+ task = find_task_by_vpid(shm_lapid);
66878+
66879+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
66880+ (task_pid_nr(task) == shm_lapid)) &&
66881+ (task->acl->mode & GR_PROTSHM) &&
66882+ (task->acl != current->acl))) {
66883+ read_unlock(&tasklist_lock);
66884+ rcu_read_unlock();
66885+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
66886+ return 0;
66887+ }
66888+ read_unlock(&tasklist_lock);
66889+ rcu_read_unlock();
66890+
66891+ return 1;
66892+}
66893diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
66894new file mode 100644
66895index 0000000..bc0be01
66896--- /dev/null
66897+++ b/grsecurity/grsec_chdir.c
66898@@ -0,0 +1,19 @@
66899+#include <linux/kernel.h>
66900+#include <linux/sched.h>
66901+#include <linux/fs.h>
66902+#include <linux/file.h>
66903+#include <linux/grsecurity.h>
66904+#include <linux/grinternal.h>
66905+
66906+void
66907+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
66908+{
66909+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66910+ if ((grsec_enable_chdir && grsec_enable_group &&
66911+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
66912+ !grsec_enable_group)) {
66913+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
66914+ }
66915+#endif
66916+ return;
66917+}
66918diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
66919new file mode 100644
66920index 0000000..bd6e105
66921--- /dev/null
66922+++ b/grsecurity/grsec_chroot.c
66923@@ -0,0 +1,370 @@
66924+#include <linux/kernel.h>
66925+#include <linux/module.h>
66926+#include <linux/sched.h>
66927+#include <linux/file.h>
66928+#include <linux/fs.h>
66929+#include <linux/mount.h>
66930+#include <linux/types.h>
66931+#include "../fs/mount.h"
66932+#include <linux/grsecurity.h>
66933+#include <linux/grinternal.h>
66934+
66935+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
66936+static int gr_init_ran;
66937+#endif
66938+
66939+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
66940+{
66941+#ifdef CONFIG_GRKERNSEC
66942+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
66943+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
66944+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
66945+ && gr_init_ran
66946+#endif
66947+ )
66948+ task->gr_is_chrooted = 1;
66949+ else {
66950+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
66951+ if (task_pid_nr(task) == 1 && !gr_init_ran)
66952+ gr_init_ran = 1;
66953+#endif
66954+ task->gr_is_chrooted = 0;
66955+ }
66956+
66957+ task->gr_chroot_dentry = path->dentry;
66958+#endif
66959+ return;
66960+}
66961+
66962+void gr_clear_chroot_entries(struct task_struct *task)
66963+{
66964+#ifdef CONFIG_GRKERNSEC
66965+ task->gr_is_chrooted = 0;
66966+ task->gr_chroot_dentry = NULL;
66967+#endif
66968+ return;
66969+}
66970+
66971+int
66972+gr_handle_chroot_unix(const pid_t pid)
66973+{
66974+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66975+ struct task_struct *p;
66976+
66977+ if (unlikely(!grsec_enable_chroot_unix))
66978+ return 1;
66979+
66980+ if (likely(!proc_is_chrooted(current)))
66981+ return 1;
66982+
66983+ rcu_read_lock();
66984+ read_lock(&tasklist_lock);
66985+ p = find_task_by_vpid_unrestricted(pid);
66986+ if (unlikely(p && !have_same_root(current, p))) {
66987+ read_unlock(&tasklist_lock);
66988+ rcu_read_unlock();
66989+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
66990+ return 0;
66991+ }
66992+ read_unlock(&tasklist_lock);
66993+ rcu_read_unlock();
66994+#endif
66995+ return 1;
66996+}
66997+
66998+int
66999+gr_handle_chroot_nice(void)
67000+{
67001+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
67002+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
67003+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
67004+ return -EPERM;
67005+ }
67006+#endif
67007+ return 0;
67008+}
67009+
67010+int
67011+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
67012+{
67013+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
67014+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
67015+ && proc_is_chrooted(current)) {
67016+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
67017+ return -EACCES;
67018+ }
67019+#endif
67020+ return 0;
67021+}
67022+
67023+int
67024+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
67025+{
67026+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67027+ struct task_struct *p;
67028+ int ret = 0;
67029+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
67030+ return ret;
67031+
67032+ read_lock(&tasklist_lock);
67033+ do_each_pid_task(pid, type, p) {
67034+ if (!have_same_root(current, p)) {
67035+ ret = 1;
67036+ goto out;
67037+ }
67038+ } while_each_pid_task(pid, type, p);
67039+out:
67040+ read_unlock(&tasklist_lock);
67041+ return ret;
67042+#endif
67043+ return 0;
67044+}
67045+
67046+int
67047+gr_pid_is_chrooted(struct task_struct *p)
67048+{
67049+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67050+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
67051+ return 0;
67052+
67053+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
67054+ !have_same_root(current, p)) {
67055+ return 1;
67056+ }
67057+#endif
67058+ return 0;
67059+}
67060+
67061+EXPORT_SYMBOL(gr_pid_is_chrooted);
67062+
67063+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
67064+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
67065+{
67066+ struct path path, currentroot;
67067+ int ret = 0;
67068+
67069+ path.dentry = (struct dentry *)u_dentry;
67070+ path.mnt = (struct vfsmount *)u_mnt;
67071+ get_fs_root(current->fs, &currentroot);
67072+ if (path_is_under(&path, &currentroot))
67073+ ret = 1;
67074+ path_put(&currentroot);
67075+
67076+ return ret;
67077+}
67078+#endif
67079+
67080+int
67081+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
67082+{
67083+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
67084+ if (!grsec_enable_chroot_fchdir)
67085+ return 1;
67086+
67087+ if (!proc_is_chrooted(current))
67088+ return 1;
67089+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
67090+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
67091+ return 0;
67092+ }
67093+#endif
67094+ return 1;
67095+}
67096+
67097+int
67098+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
67099+ const time_t shm_createtime)
67100+{
67101+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
67102+ struct task_struct *p;
67103+ time_t starttime;
67104+
67105+ if (unlikely(!grsec_enable_chroot_shmat))
67106+ return 1;
67107+
67108+ if (likely(!proc_is_chrooted(current)))
67109+ return 1;
67110+
67111+ rcu_read_lock();
67112+ read_lock(&tasklist_lock);
67113+
67114+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
67115+ starttime = p->start_time.tv_sec;
67116+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
67117+ if (have_same_root(current, p)) {
67118+ goto allow;
67119+ } else {
67120+ read_unlock(&tasklist_lock);
67121+ rcu_read_unlock();
67122+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
67123+ return 0;
67124+ }
67125+ }
67126+ /* creator exited, pid reuse, fall through to next check */
67127+ }
67128+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
67129+ if (unlikely(!have_same_root(current, p))) {
67130+ read_unlock(&tasklist_lock);
67131+ rcu_read_unlock();
67132+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
67133+ return 0;
67134+ }
67135+ }
67136+
67137+allow:
67138+ read_unlock(&tasklist_lock);
67139+ rcu_read_unlock();
67140+#endif
67141+ return 1;
67142+}
67143+
67144+void
67145+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
67146+{
67147+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
67148+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
67149+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
67150+#endif
67151+ return;
67152+}
67153+
67154+int
67155+gr_handle_chroot_mknod(const struct dentry *dentry,
67156+ const struct vfsmount *mnt, const int mode)
67157+{
67158+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
67159+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
67160+ proc_is_chrooted(current)) {
67161+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
67162+ return -EPERM;
67163+ }
67164+#endif
67165+ return 0;
67166+}
67167+
67168+int
67169+gr_handle_chroot_mount(const struct dentry *dentry,
67170+ const struct vfsmount *mnt, const char *dev_name)
67171+{
67172+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
67173+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
67174+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
67175+ return -EPERM;
67176+ }
67177+#endif
67178+ return 0;
67179+}
67180+
67181+int
67182+gr_handle_chroot_pivot(void)
67183+{
67184+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
67185+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
67186+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
67187+ return -EPERM;
67188+ }
67189+#endif
67190+ return 0;
67191+}
67192+
67193+int
67194+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
67195+{
67196+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
67197+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
67198+ !gr_is_outside_chroot(dentry, mnt)) {
67199+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
67200+ return -EPERM;
67201+ }
67202+#endif
67203+ return 0;
67204+}
67205+
67206+extern const char *captab_log[];
67207+extern int captab_log_entries;
67208+
67209+int
67210+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
67211+{
67212+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67213+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
67214+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
67215+ if (cap_raised(chroot_caps, cap)) {
67216+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
67217+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
67218+ }
67219+ return 0;
67220+ }
67221+ }
67222+#endif
67223+ return 1;
67224+}
67225+
67226+int
67227+gr_chroot_is_capable(const int cap)
67228+{
67229+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67230+ return gr_task_chroot_is_capable(current, current_cred(), cap);
67231+#endif
67232+ return 1;
67233+}
67234+
67235+int
67236+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
67237+{
67238+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67239+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
67240+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
67241+ if (cap_raised(chroot_caps, cap)) {
67242+ return 0;
67243+ }
67244+ }
67245+#endif
67246+ return 1;
67247+}
67248+
67249+int
67250+gr_chroot_is_capable_nolog(const int cap)
67251+{
67252+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67253+ return gr_task_chroot_is_capable_nolog(current, cap);
67254+#endif
67255+ return 1;
67256+}
67257+
67258+int
67259+gr_handle_chroot_sysctl(const int op)
67260+{
67261+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
67262+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
67263+ proc_is_chrooted(current))
67264+ return -EACCES;
67265+#endif
67266+ return 0;
67267+}
67268+
67269+void
67270+gr_handle_chroot_chdir(const struct path *path)
67271+{
67272+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
67273+ if (grsec_enable_chroot_chdir)
67274+ set_fs_pwd(current->fs, path);
67275+#endif
67276+ return;
67277+}
67278+
67279+int
67280+gr_handle_chroot_chmod(const struct dentry *dentry,
67281+ const struct vfsmount *mnt, const int mode)
67282+{
67283+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
67284+ /* allow chmod +s on directories, but not files */
67285+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
67286+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
67287+ proc_is_chrooted(current)) {
67288+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
67289+ return -EPERM;
67290+ }
67291+#endif
67292+ return 0;
67293+}
67294diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
67295new file mode 100644
67296index 0000000..ce65ceb
67297--- /dev/null
67298+++ b/grsecurity/grsec_disabled.c
67299@@ -0,0 +1,434 @@
67300+#include <linux/kernel.h>
67301+#include <linux/module.h>
67302+#include <linux/sched.h>
67303+#include <linux/file.h>
67304+#include <linux/fs.h>
67305+#include <linux/kdev_t.h>
67306+#include <linux/net.h>
67307+#include <linux/in.h>
67308+#include <linux/ip.h>
67309+#include <linux/skbuff.h>
67310+#include <linux/sysctl.h>
67311+
67312+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
67313+void
67314+pax_set_initial_flags(struct linux_binprm *bprm)
67315+{
67316+ return;
67317+}
67318+#endif
67319+
67320+#ifdef CONFIG_SYSCTL
67321+__u32
67322+gr_handle_sysctl(const struct ctl_table * table, const int op)
67323+{
67324+ return 0;
67325+}
67326+#endif
67327+
67328+#ifdef CONFIG_TASKSTATS
67329+int gr_is_taskstats_denied(int pid)
67330+{
67331+ return 0;
67332+}
67333+#endif
67334+
67335+int
67336+gr_acl_is_enabled(void)
67337+{
67338+ return 0;
67339+}
67340+
67341+void
67342+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
67343+{
67344+ return;
67345+}
67346+
67347+int
67348+gr_handle_rawio(const struct inode *inode)
67349+{
67350+ return 0;
67351+}
67352+
67353+void
67354+gr_acl_handle_psacct(struct task_struct *task, const long code)
67355+{
67356+ return;
67357+}
67358+
67359+int
67360+gr_handle_ptrace(struct task_struct *task, const long request)
67361+{
67362+ return 0;
67363+}
67364+
67365+int
67366+gr_handle_proc_ptrace(struct task_struct *task)
67367+{
67368+ return 0;
67369+}
67370+
67371+int
67372+gr_set_acls(const int type)
67373+{
67374+ return 0;
67375+}
67376+
67377+int
67378+gr_check_hidden_task(const struct task_struct *tsk)
67379+{
67380+ return 0;
67381+}
67382+
67383+int
67384+gr_check_protected_task(const struct task_struct *task)
67385+{
67386+ return 0;
67387+}
67388+
67389+int
67390+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
67391+{
67392+ return 0;
67393+}
67394+
67395+void
67396+gr_copy_label(struct task_struct *tsk)
67397+{
67398+ return;
67399+}
67400+
67401+void
67402+gr_set_pax_flags(struct task_struct *task)
67403+{
67404+ return;
67405+}
67406+
67407+int
67408+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
67409+ const int unsafe_share)
67410+{
67411+ return 0;
67412+}
67413+
67414+void
67415+gr_handle_delete(const ino_t ino, const dev_t dev)
67416+{
67417+ return;
67418+}
67419+
67420+void
67421+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
67422+{
67423+ return;
67424+}
67425+
67426+void
67427+gr_handle_crash(struct task_struct *task, const int sig)
67428+{
67429+ return;
67430+}
67431+
67432+int
67433+gr_check_crash_exec(const struct file *filp)
67434+{
67435+ return 0;
67436+}
67437+
67438+int
67439+gr_check_crash_uid(const kuid_t uid)
67440+{
67441+ return 0;
67442+}
67443+
67444+void
67445+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67446+ struct dentry *old_dentry,
67447+ struct dentry *new_dentry,
67448+ struct vfsmount *mnt, const __u8 replace)
67449+{
67450+ return;
67451+}
67452+
67453+int
67454+gr_search_socket(const int family, const int type, const int protocol)
67455+{
67456+ return 1;
67457+}
67458+
67459+int
67460+gr_search_connectbind(const int mode, const struct socket *sock,
67461+ const struct sockaddr_in *addr)
67462+{
67463+ return 0;
67464+}
67465+
67466+void
67467+gr_handle_alertkill(struct task_struct *task)
67468+{
67469+ return;
67470+}
67471+
67472+__u32
67473+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
67474+{
67475+ return 1;
67476+}
67477+
67478+__u32
67479+gr_acl_handle_hidden_file(const struct dentry * dentry,
67480+ const struct vfsmount * mnt)
67481+{
67482+ return 1;
67483+}
67484+
67485+__u32
67486+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
67487+ int acc_mode)
67488+{
67489+ return 1;
67490+}
67491+
67492+__u32
67493+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
67494+{
67495+ return 1;
67496+}
67497+
67498+__u32
67499+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
67500+{
67501+ return 1;
67502+}
67503+
67504+int
67505+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
67506+ unsigned int *vm_flags)
67507+{
67508+ return 1;
67509+}
67510+
67511+__u32
67512+gr_acl_handle_truncate(const struct dentry * dentry,
67513+ const struct vfsmount * mnt)
67514+{
67515+ return 1;
67516+}
67517+
67518+__u32
67519+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
67520+{
67521+ return 1;
67522+}
67523+
67524+__u32
67525+gr_acl_handle_access(const struct dentry * dentry,
67526+ const struct vfsmount * mnt, const int fmode)
67527+{
67528+ return 1;
67529+}
67530+
67531+__u32
67532+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
67533+ umode_t *mode)
67534+{
67535+ return 1;
67536+}
67537+
67538+__u32
67539+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
67540+{
67541+ return 1;
67542+}
67543+
67544+__u32
67545+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
67546+{
67547+ return 1;
67548+}
67549+
67550+void
67551+grsecurity_init(void)
67552+{
67553+ return;
67554+}
67555+
67556+umode_t gr_acl_umask(void)
67557+{
67558+ return 0;
67559+}
67560+
67561+__u32
67562+gr_acl_handle_mknod(const struct dentry * new_dentry,
67563+ const struct dentry * parent_dentry,
67564+ const struct vfsmount * parent_mnt,
67565+ const int mode)
67566+{
67567+ return 1;
67568+}
67569+
67570+__u32
67571+gr_acl_handle_mkdir(const struct dentry * new_dentry,
67572+ const struct dentry * parent_dentry,
67573+ const struct vfsmount * parent_mnt)
67574+{
67575+ return 1;
67576+}
67577+
67578+__u32
67579+gr_acl_handle_symlink(const struct dentry * new_dentry,
67580+ const struct dentry * parent_dentry,
67581+ const struct vfsmount * parent_mnt, const struct filename *from)
67582+{
67583+ return 1;
67584+}
67585+
67586+__u32
67587+gr_acl_handle_link(const struct dentry * new_dentry,
67588+ const struct dentry * parent_dentry,
67589+ const struct vfsmount * parent_mnt,
67590+ const struct dentry * old_dentry,
67591+ const struct vfsmount * old_mnt, const struct filename *to)
67592+{
67593+ return 1;
67594+}
67595+
67596+int
67597+gr_acl_handle_rename(const struct dentry *new_dentry,
67598+ const struct dentry *parent_dentry,
67599+ const struct vfsmount *parent_mnt,
67600+ const struct dentry *old_dentry,
67601+ const struct inode *old_parent_inode,
67602+ const struct vfsmount *old_mnt, const struct filename *newname)
67603+{
67604+ return 0;
67605+}
67606+
67607+int
67608+gr_acl_handle_filldir(const struct file *file, const char *name,
67609+ const int namelen, const ino_t ino)
67610+{
67611+ return 1;
67612+}
67613+
67614+int
67615+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
67616+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
67617+{
67618+ return 1;
67619+}
67620+
67621+int
67622+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
67623+{
67624+ return 0;
67625+}
67626+
67627+int
67628+gr_search_accept(const struct socket *sock)
67629+{
67630+ return 0;
67631+}
67632+
67633+int
67634+gr_search_listen(const struct socket *sock)
67635+{
67636+ return 0;
67637+}
67638+
67639+int
67640+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
67641+{
67642+ return 0;
67643+}
67644+
67645+__u32
67646+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
67647+{
67648+ return 1;
67649+}
67650+
67651+__u32
67652+gr_acl_handle_creat(const struct dentry * dentry,
67653+ const struct dentry * p_dentry,
67654+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
67655+ const int imode)
67656+{
67657+ return 1;
67658+}
67659+
67660+void
67661+gr_acl_handle_exit(void)
67662+{
67663+ return;
67664+}
67665+
67666+int
67667+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
67668+{
67669+ return 1;
67670+}
67671+
67672+void
67673+gr_set_role_label(const kuid_t uid, const kgid_t gid)
67674+{
67675+ return;
67676+}
67677+
67678+int
67679+gr_acl_handle_procpidmem(const struct task_struct *task)
67680+{
67681+ return 0;
67682+}
67683+
67684+int
67685+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
67686+{
67687+ return 0;
67688+}
67689+
67690+int
67691+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
67692+{
67693+ return 0;
67694+}
67695+
67696+void
67697+gr_set_kernel_label(struct task_struct *task)
67698+{
67699+ return;
67700+}
67701+
67702+int
67703+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
67704+{
67705+ return 0;
67706+}
67707+
67708+int
67709+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
67710+{
67711+ return 0;
67712+}
67713+
67714+int gr_acl_enable_at_secure(void)
67715+{
67716+ return 0;
67717+}
67718+
67719+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
67720+{
67721+ return dentry->d_sb->s_dev;
67722+}
67723+
67724+void gr_put_exec_file(struct task_struct *task)
67725+{
67726+ return;
67727+}
67728+
67729+EXPORT_SYMBOL(gr_set_kernel_label);
67730+#ifdef CONFIG_SECURITY
67731+EXPORT_SYMBOL(gr_check_user_change);
67732+EXPORT_SYMBOL(gr_check_group_change);
67733+#endif
67734diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
67735new file mode 100644
67736index 0000000..387032b
67737--- /dev/null
67738+++ b/grsecurity/grsec_exec.c
67739@@ -0,0 +1,187 @@
67740+#include <linux/kernel.h>
67741+#include <linux/sched.h>
67742+#include <linux/file.h>
67743+#include <linux/binfmts.h>
67744+#include <linux/fs.h>
67745+#include <linux/types.h>
67746+#include <linux/grdefs.h>
67747+#include <linux/grsecurity.h>
67748+#include <linux/grinternal.h>
67749+#include <linux/capability.h>
67750+#include <linux/module.h>
67751+#include <linux/compat.h>
67752+
67753+#include <asm/uaccess.h>
67754+
67755+#ifdef CONFIG_GRKERNSEC_EXECLOG
67756+static char gr_exec_arg_buf[132];
67757+static DEFINE_MUTEX(gr_exec_arg_mutex);
67758+#endif
67759+
67760+struct user_arg_ptr {
67761+#ifdef CONFIG_COMPAT
67762+ bool is_compat;
67763+#endif
67764+ union {
67765+ const char __user *const __user *native;
67766+#ifdef CONFIG_COMPAT
67767+ const compat_uptr_t __user *compat;
67768+#endif
67769+ } ptr;
67770+};
67771+
67772+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
67773+
67774+void
67775+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
67776+{
67777+#ifdef CONFIG_GRKERNSEC_EXECLOG
67778+ char *grarg = gr_exec_arg_buf;
67779+ unsigned int i, x, execlen = 0;
67780+ char c;
67781+
67782+ if (!((grsec_enable_execlog && grsec_enable_group &&
67783+ in_group_p(grsec_audit_gid))
67784+ || (grsec_enable_execlog && !grsec_enable_group)))
67785+ return;
67786+
67787+ mutex_lock(&gr_exec_arg_mutex);
67788+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
67789+
67790+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
67791+ const char __user *p;
67792+ unsigned int len;
67793+
67794+ p = get_user_arg_ptr(argv, i);
67795+ if (IS_ERR(p))
67796+ goto log;
67797+
67798+ len = strnlen_user(p, 128 - execlen);
67799+ if (len > 128 - execlen)
67800+ len = 128 - execlen;
67801+ else if (len > 0)
67802+ len--;
67803+ if (copy_from_user(grarg + execlen, p, len))
67804+ goto log;
67805+
67806+ /* rewrite unprintable characters */
67807+ for (x = 0; x < len; x++) {
67808+ c = *(grarg + execlen + x);
67809+ if (c < 32 || c > 126)
67810+ *(grarg + execlen + x) = ' ';
67811+ }
67812+
67813+ execlen += len;
67814+ *(grarg + execlen) = ' ';
67815+ *(grarg + execlen + 1) = '\0';
67816+ execlen++;
67817+ }
67818+
67819+ log:
67820+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
67821+ bprm->file->f_path.mnt, grarg);
67822+ mutex_unlock(&gr_exec_arg_mutex);
67823+#endif
67824+ return;
67825+}
67826+
67827+#ifdef CONFIG_GRKERNSEC
67828+extern int gr_acl_is_capable(const int cap);
67829+extern int gr_acl_is_capable_nolog(const int cap);
67830+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
67831+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
67832+extern int gr_chroot_is_capable(const int cap);
67833+extern int gr_chroot_is_capable_nolog(const int cap);
67834+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
67835+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
67836+#endif
67837+
67838+const char *captab_log[] = {
67839+ "CAP_CHOWN",
67840+ "CAP_DAC_OVERRIDE",
67841+ "CAP_DAC_READ_SEARCH",
67842+ "CAP_FOWNER",
67843+ "CAP_FSETID",
67844+ "CAP_KILL",
67845+ "CAP_SETGID",
67846+ "CAP_SETUID",
67847+ "CAP_SETPCAP",
67848+ "CAP_LINUX_IMMUTABLE",
67849+ "CAP_NET_BIND_SERVICE",
67850+ "CAP_NET_BROADCAST",
67851+ "CAP_NET_ADMIN",
67852+ "CAP_NET_RAW",
67853+ "CAP_IPC_LOCK",
67854+ "CAP_IPC_OWNER",
67855+ "CAP_SYS_MODULE",
67856+ "CAP_SYS_RAWIO",
67857+ "CAP_SYS_CHROOT",
67858+ "CAP_SYS_PTRACE",
67859+ "CAP_SYS_PACCT",
67860+ "CAP_SYS_ADMIN",
67861+ "CAP_SYS_BOOT",
67862+ "CAP_SYS_NICE",
67863+ "CAP_SYS_RESOURCE",
67864+ "CAP_SYS_TIME",
67865+ "CAP_SYS_TTY_CONFIG",
67866+ "CAP_MKNOD",
67867+ "CAP_LEASE",
67868+ "CAP_AUDIT_WRITE",
67869+ "CAP_AUDIT_CONTROL",
67870+ "CAP_SETFCAP",
67871+ "CAP_MAC_OVERRIDE",
67872+ "CAP_MAC_ADMIN",
67873+ "CAP_SYSLOG",
67874+ "CAP_WAKE_ALARM"
67875+};
67876+
67877+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
67878+
67879+int gr_is_capable(const int cap)
67880+{
67881+#ifdef CONFIG_GRKERNSEC
67882+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
67883+ return 1;
67884+ return 0;
67885+#else
67886+ return 1;
67887+#endif
67888+}
67889+
67890+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
67891+{
67892+#ifdef CONFIG_GRKERNSEC
67893+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
67894+ return 1;
67895+ return 0;
67896+#else
67897+ return 1;
67898+#endif
67899+}
67900+
67901+int gr_is_capable_nolog(const int cap)
67902+{
67903+#ifdef CONFIG_GRKERNSEC
67904+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
67905+ return 1;
67906+ return 0;
67907+#else
67908+ return 1;
67909+#endif
67910+}
67911+
67912+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
67913+{
67914+#ifdef CONFIG_GRKERNSEC
67915+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
67916+ return 1;
67917+ return 0;
67918+#else
67919+ return 1;
67920+#endif
67921+}
67922+
67923+EXPORT_SYMBOL(gr_is_capable);
67924+EXPORT_SYMBOL(gr_is_capable_nolog);
67925+EXPORT_SYMBOL(gr_task_is_capable);
67926+EXPORT_SYMBOL(gr_task_is_capable_nolog);
67927diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
67928new file mode 100644
67929index 0000000..06cc6ea
67930--- /dev/null
67931+++ b/grsecurity/grsec_fifo.c
67932@@ -0,0 +1,24 @@
67933+#include <linux/kernel.h>
67934+#include <linux/sched.h>
67935+#include <linux/fs.h>
67936+#include <linux/file.h>
67937+#include <linux/grinternal.h>
67938+
67939+int
67940+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
67941+ const struct dentry *dir, const int flag, const int acc_mode)
67942+{
67943+#ifdef CONFIG_GRKERNSEC_FIFO
67944+ const struct cred *cred = current_cred();
67945+
67946+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
67947+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
67948+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
67949+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
67950+ if (!inode_permission(dentry->d_inode, acc_mode))
67951+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
67952+ return -EACCES;
67953+ }
67954+#endif
67955+ return 0;
67956+}
67957diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
67958new file mode 100644
67959index 0000000..8ca18bf
67960--- /dev/null
67961+++ b/grsecurity/grsec_fork.c
67962@@ -0,0 +1,23 @@
67963+#include <linux/kernel.h>
67964+#include <linux/sched.h>
67965+#include <linux/grsecurity.h>
67966+#include <linux/grinternal.h>
67967+#include <linux/errno.h>
67968+
67969+void
67970+gr_log_forkfail(const int retval)
67971+{
67972+#ifdef CONFIG_GRKERNSEC_FORKFAIL
67973+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
67974+ switch (retval) {
67975+ case -EAGAIN:
67976+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
67977+ break;
67978+ case -ENOMEM:
67979+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
67980+ break;
67981+ }
67982+ }
67983+#endif
67984+ return;
67985+}
67986diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
67987new file mode 100644
67988index 0000000..ab2d875
67989--- /dev/null
67990+++ b/grsecurity/grsec_init.c
67991@@ -0,0 +1,279 @@
67992+#include <linux/kernel.h>
67993+#include <linux/sched.h>
67994+#include <linux/mm.h>
67995+#include <linux/gracl.h>
67996+#include <linux/slab.h>
67997+#include <linux/vmalloc.h>
67998+#include <linux/percpu.h>
67999+#include <linux/module.h>
68000+
68001+int grsec_enable_ptrace_readexec;
68002+int grsec_enable_setxid;
68003+int grsec_enable_symlinkown;
68004+kgid_t grsec_symlinkown_gid;
68005+int grsec_enable_brute;
68006+int grsec_enable_link;
68007+int grsec_enable_dmesg;
68008+int grsec_enable_harden_ptrace;
68009+int grsec_enable_fifo;
68010+int grsec_enable_execlog;
68011+int grsec_enable_signal;
68012+int grsec_enable_forkfail;
68013+int grsec_enable_audit_ptrace;
68014+int grsec_enable_time;
68015+int grsec_enable_group;
68016+kgid_t grsec_audit_gid;
68017+int grsec_enable_chdir;
68018+int grsec_enable_mount;
68019+int grsec_enable_rofs;
68020+int grsec_enable_chroot_findtask;
68021+int grsec_enable_chroot_mount;
68022+int grsec_enable_chroot_shmat;
68023+int grsec_enable_chroot_fchdir;
68024+int grsec_enable_chroot_double;
68025+int grsec_enable_chroot_pivot;
68026+int grsec_enable_chroot_chdir;
68027+int grsec_enable_chroot_chmod;
68028+int grsec_enable_chroot_mknod;
68029+int grsec_enable_chroot_nice;
68030+int grsec_enable_chroot_execlog;
68031+int grsec_enable_chroot_caps;
68032+int grsec_enable_chroot_sysctl;
68033+int grsec_enable_chroot_unix;
68034+int grsec_enable_tpe;
68035+kgid_t grsec_tpe_gid;
68036+int grsec_enable_blackhole;
68037+#ifdef CONFIG_IPV6_MODULE
68038+EXPORT_SYMBOL(grsec_enable_blackhole);
68039+#endif
68040+int grsec_lastack_retries;
68041+int grsec_enable_tpe_all;
68042+int grsec_enable_tpe_invert;
68043+int grsec_enable_socket_all;
68044+kgid_t grsec_socket_all_gid;
68045+int grsec_enable_socket_client;
68046+kgid_t grsec_socket_client_gid;
68047+int grsec_enable_socket_server;
68048+kgid_t grsec_socket_server_gid;
68049+int grsec_resource_logging;
68050+int grsec_disable_privio;
68051+int grsec_enable_log_rwxmaps;
68052+int grsec_lock;
68053+
68054+DEFINE_SPINLOCK(grsec_alert_lock);
68055+unsigned long grsec_alert_wtime = 0;
68056+unsigned long grsec_alert_fyet = 0;
68057+
68058+DEFINE_SPINLOCK(grsec_audit_lock);
68059+
68060+DEFINE_RWLOCK(grsec_exec_file_lock);
68061+
68062+char *gr_shared_page[4];
68063+
68064+char *gr_alert_log_fmt;
68065+char *gr_audit_log_fmt;
68066+char *gr_alert_log_buf;
68067+char *gr_audit_log_buf;
68068+
68069+extern struct gr_arg *gr_usermode;
68070+extern unsigned char *gr_system_salt;
68071+extern unsigned char *gr_system_sum;
68072+
68073+void __init
68074+grsecurity_init(void)
68075+{
68076+ int j;
68077+ /* create the per-cpu shared pages */
68078+
68079+#ifdef CONFIG_X86
68080+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
68081+#endif
68082+
68083+ for (j = 0; j < 4; j++) {
68084+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
68085+ if (gr_shared_page[j] == NULL) {
68086+ panic("Unable to allocate grsecurity shared page");
68087+ return;
68088+ }
68089+ }
68090+
68091+ /* allocate log buffers */
68092+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
68093+ if (!gr_alert_log_fmt) {
68094+ panic("Unable to allocate grsecurity alert log format buffer");
68095+ return;
68096+ }
68097+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
68098+ if (!gr_audit_log_fmt) {
68099+ panic("Unable to allocate grsecurity audit log format buffer");
68100+ return;
68101+ }
68102+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
68103+ if (!gr_alert_log_buf) {
68104+ panic("Unable to allocate grsecurity alert log buffer");
68105+ return;
68106+ }
68107+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
68108+ if (!gr_audit_log_buf) {
68109+ panic("Unable to allocate grsecurity audit log buffer");
68110+ return;
68111+ }
68112+
68113+ /* allocate memory for authentication structure */
68114+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
68115+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
68116+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
68117+
68118+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
68119+ panic("Unable to allocate grsecurity authentication structure");
68120+ return;
68121+ }
68122+
68123+
68124+#ifdef CONFIG_GRKERNSEC_IO
68125+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
68126+ grsec_disable_privio = 1;
68127+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
68128+ grsec_disable_privio = 1;
68129+#else
68130+ grsec_disable_privio = 0;
68131+#endif
68132+#endif
68133+
68134+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
68135+ /* for backward compatibility, tpe_invert always defaults to on if
68136+ enabled in the kernel
68137+ */
68138+ grsec_enable_tpe_invert = 1;
68139+#endif
68140+
68141+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
68142+#ifndef CONFIG_GRKERNSEC_SYSCTL
68143+ grsec_lock = 1;
68144+#endif
68145+
68146+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68147+ grsec_enable_log_rwxmaps = 1;
68148+#endif
68149+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
68150+ grsec_enable_group = 1;
68151+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
68152+#endif
68153+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
68154+ grsec_enable_ptrace_readexec = 1;
68155+#endif
68156+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
68157+ grsec_enable_chdir = 1;
68158+#endif
68159+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
68160+ grsec_enable_harden_ptrace = 1;
68161+#endif
68162+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68163+ grsec_enable_mount = 1;
68164+#endif
68165+#ifdef CONFIG_GRKERNSEC_LINK
68166+ grsec_enable_link = 1;
68167+#endif
68168+#ifdef CONFIG_GRKERNSEC_BRUTE
68169+ grsec_enable_brute = 1;
68170+#endif
68171+#ifdef CONFIG_GRKERNSEC_DMESG
68172+ grsec_enable_dmesg = 1;
68173+#endif
68174+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68175+ grsec_enable_blackhole = 1;
68176+ grsec_lastack_retries = 4;
68177+#endif
68178+#ifdef CONFIG_GRKERNSEC_FIFO
68179+ grsec_enable_fifo = 1;
68180+#endif
68181+#ifdef CONFIG_GRKERNSEC_EXECLOG
68182+ grsec_enable_execlog = 1;
68183+#endif
68184+#ifdef CONFIG_GRKERNSEC_SETXID
68185+ grsec_enable_setxid = 1;
68186+#endif
68187+#ifdef CONFIG_GRKERNSEC_SIGNAL
68188+ grsec_enable_signal = 1;
68189+#endif
68190+#ifdef CONFIG_GRKERNSEC_FORKFAIL
68191+ grsec_enable_forkfail = 1;
68192+#endif
68193+#ifdef CONFIG_GRKERNSEC_TIME
68194+ grsec_enable_time = 1;
68195+#endif
68196+#ifdef CONFIG_GRKERNSEC_RESLOG
68197+ grsec_resource_logging = 1;
68198+#endif
68199+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68200+ grsec_enable_chroot_findtask = 1;
68201+#endif
68202+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
68203+ grsec_enable_chroot_unix = 1;
68204+#endif
68205+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
68206+ grsec_enable_chroot_mount = 1;
68207+#endif
68208+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
68209+ grsec_enable_chroot_fchdir = 1;
68210+#endif
68211+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
68212+ grsec_enable_chroot_shmat = 1;
68213+#endif
68214+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
68215+ grsec_enable_audit_ptrace = 1;
68216+#endif
68217+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
68218+ grsec_enable_chroot_double = 1;
68219+#endif
68220+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
68221+ grsec_enable_chroot_pivot = 1;
68222+#endif
68223+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
68224+ grsec_enable_chroot_chdir = 1;
68225+#endif
68226+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
68227+ grsec_enable_chroot_chmod = 1;
68228+#endif
68229+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
68230+ grsec_enable_chroot_mknod = 1;
68231+#endif
68232+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68233+ grsec_enable_chroot_nice = 1;
68234+#endif
68235+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
68236+ grsec_enable_chroot_execlog = 1;
68237+#endif
68238+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68239+ grsec_enable_chroot_caps = 1;
68240+#endif
68241+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
68242+ grsec_enable_chroot_sysctl = 1;
68243+#endif
68244+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
68245+ grsec_enable_symlinkown = 1;
68246+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
68247+#endif
68248+#ifdef CONFIG_GRKERNSEC_TPE
68249+ grsec_enable_tpe = 1;
68250+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
68251+#ifdef CONFIG_GRKERNSEC_TPE_ALL
68252+ grsec_enable_tpe_all = 1;
68253+#endif
68254+#endif
68255+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
68256+ grsec_enable_socket_all = 1;
68257+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
68258+#endif
68259+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
68260+ grsec_enable_socket_client = 1;
68261+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
68262+#endif
68263+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
68264+ grsec_enable_socket_server = 1;
68265+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
68266+#endif
68267+#endif
68268+
68269+ return;
68270+}
68271diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
68272new file mode 100644
68273index 0000000..5e05e20
68274--- /dev/null
68275+++ b/grsecurity/grsec_link.c
68276@@ -0,0 +1,58 @@
68277+#include <linux/kernel.h>
68278+#include <linux/sched.h>
68279+#include <linux/fs.h>
68280+#include <linux/file.h>
68281+#include <linux/grinternal.h>
68282+
68283+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
68284+{
68285+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
68286+ const struct inode *link_inode = link->dentry->d_inode;
68287+
68288+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
68289+ /* ignore root-owned links, e.g. /proc/self */
68290+ gr_is_global_nonroot(link_inode->i_uid) && target &&
68291+ !uid_eq(link_inode->i_uid, target->i_uid)) {
68292+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
68293+ return 1;
68294+ }
68295+#endif
68296+ return 0;
68297+}
68298+
68299+int
68300+gr_handle_follow_link(const struct inode *parent,
68301+ const struct inode *inode,
68302+ const struct dentry *dentry, const struct vfsmount *mnt)
68303+{
68304+#ifdef CONFIG_GRKERNSEC_LINK
68305+ const struct cred *cred = current_cred();
68306+
68307+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
68308+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
68309+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
68310+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
68311+ return -EACCES;
68312+ }
68313+#endif
68314+ return 0;
68315+}
68316+
68317+int
68318+gr_handle_hardlink(const struct dentry *dentry,
68319+ const struct vfsmount *mnt,
68320+ struct inode *inode, const int mode, const struct filename *to)
68321+{
68322+#ifdef CONFIG_GRKERNSEC_LINK
68323+ const struct cred *cred = current_cred();
68324+
68325+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
68326+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
68327+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
68328+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
68329+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
68330+ return -EPERM;
68331+ }
68332+#endif
68333+ return 0;
68334+}
68335diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
68336new file mode 100644
68337index 0000000..dbe0a6b
68338--- /dev/null
68339+++ b/grsecurity/grsec_log.c
68340@@ -0,0 +1,341 @@
68341+#include <linux/kernel.h>
68342+#include <linux/sched.h>
68343+#include <linux/file.h>
68344+#include <linux/tty.h>
68345+#include <linux/fs.h>
68346+#include <linux/mm.h>
68347+#include <linux/grinternal.h>
68348+
68349+#ifdef CONFIG_TREE_PREEMPT_RCU
68350+#define DISABLE_PREEMPT() preempt_disable()
68351+#define ENABLE_PREEMPT() preempt_enable()
68352+#else
68353+#define DISABLE_PREEMPT()
68354+#define ENABLE_PREEMPT()
68355+#endif
68356+
68357+#define BEGIN_LOCKS(x) \
68358+ DISABLE_PREEMPT(); \
68359+ rcu_read_lock(); \
68360+ read_lock(&tasklist_lock); \
68361+ read_lock(&grsec_exec_file_lock); \
68362+ if (x != GR_DO_AUDIT) \
68363+ spin_lock(&grsec_alert_lock); \
68364+ else \
68365+ spin_lock(&grsec_audit_lock)
68366+
68367+#define END_LOCKS(x) \
68368+ if (x != GR_DO_AUDIT) \
68369+ spin_unlock(&grsec_alert_lock); \
68370+ else \
68371+ spin_unlock(&grsec_audit_lock); \
68372+ read_unlock(&grsec_exec_file_lock); \
68373+ read_unlock(&tasklist_lock); \
68374+ rcu_read_unlock(); \
68375+ ENABLE_PREEMPT(); \
68376+ if (x == GR_DONT_AUDIT) \
68377+ gr_handle_alertkill(current)
68378+
68379+enum {
68380+ FLOODING,
68381+ NO_FLOODING
68382+};
68383+
68384+extern char *gr_alert_log_fmt;
68385+extern char *gr_audit_log_fmt;
68386+extern char *gr_alert_log_buf;
68387+extern char *gr_audit_log_buf;
68388+
68389+static int gr_log_start(int audit)
68390+{
68391+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
68392+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
68393+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68394+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
68395+ unsigned long curr_secs = get_seconds();
68396+
68397+ if (audit == GR_DO_AUDIT)
68398+ goto set_fmt;
68399+
68400+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
68401+ grsec_alert_wtime = curr_secs;
68402+ grsec_alert_fyet = 0;
68403+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
68404+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
68405+ grsec_alert_fyet++;
68406+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
68407+ grsec_alert_wtime = curr_secs;
68408+ grsec_alert_fyet++;
68409+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
68410+ return FLOODING;
68411+ }
68412+ else return FLOODING;
68413+
68414+set_fmt:
68415+#endif
68416+ memset(buf, 0, PAGE_SIZE);
68417+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
68418+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
68419+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
68420+ } else if (current->signal->curr_ip) {
68421+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
68422+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
68423+ } else if (gr_acl_is_enabled()) {
68424+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
68425+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
68426+ } else {
68427+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
68428+ strcpy(buf, fmt);
68429+ }
68430+
68431+ return NO_FLOODING;
68432+}
68433+
68434+static void gr_log_middle(int audit, const char *msg, va_list ap)
68435+ __attribute__ ((format (printf, 2, 0)));
68436+
68437+static void gr_log_middle(int audit, const char *msg, va_list ap)
68438+{
68439+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68440+ unsigned int len = strlen(buf);
68441+
68442+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
68443+
68444+ return;
68445+}
68446+
68447+static void gr_log_middle_varargs(int audit, const char *msg, ...)
68448+ __attribute__ ((format (printf, 2, 3)));
68449+
68450+static void gr_log_middle_varargs(int audit, const char *msg, ...)
68451+{
68452+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68453+ unsigned int len = strlen(buf);
68454+ va_list ap;
68455+
68456+ va_start(ap, msg);
68457+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
68458+ va_end(ap);
68459+
68460+ return;
68461+}
68462+
68463+static void gr_log_end(int audit, int append_default)
68464+{
68465+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68466+ if (append_default) {
68467+ struct task_struct *task = current;
68468+ struct task_struct *parent = task->real_parent;
68469+ const struct cred *cred = __task_cred(task);
68470+ const struct cred *pcred = __task_cred(parent);
68471+ unsigned int len = strlen(buf);
68472+
68473+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68474+ }
68475+
68476+ printk("%s\n", buf);
68477+
68478+ return;
68479+}
68480+
68481+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
68482+{
68483+ int logtype;
68484+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
68485+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
68486+ void *voidptr = NULL;
68487+ int num1 = 0, num2 = 0;
68488+ unsigned long ulong1 = 0, ulong2 = 0;
68489+ struct dentry *dentry = NULL;
68490+ struct vfsmount *mnt = NULL;
68491+ struct file *file = NULL;
68492+ struct task_struct *task = NULL;
68493+ struct vm_area_struct *vma = NULL;
68494+ const struct cred *cred, *pcred;
68495+ va_list ap;
68496+
68497+ BEGIN_LOCKS(audit);
68498+ logtype = gr_log_start(audit);
68499+ if (logtype == FLOODING) {
68500+ END_LOCKS(audit);
68501+ return;
68502+ }
68503+ va_start(ap, argtypes);
68504+ switch (argtypes) {
68505+ case GR_TTYSNIFF:
68506+ task = va_arg(ap, struct task_struct *);
68507+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
68508+ break;
68509+ case GR_SYSCTL_HIDDEN:
68510+ str1 = va_arg(ap, char *);
68511+ gr_log_middle_varargs(audit, msg, result, str1);
68512+ break;
68513+ case GR_RBAC:
68514+ dentry = va_arg(ap, struct dentry *);
68515+ mnt = va_arg(ap, struct vfsmount *);
68516+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
68517+ break;
68518+ case GR_RBAC_STR:
68519+ dentry = va_arg(ap, struct dentry *);
68520+ mnt = va_arg(ap, struct vfsmount *);
68521+ str1 = va_arg(ap, char *);
68522+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
68523+ break;
68524+ case GR_STR_RBAC:
68525+ str1 = va_arg(ap, char *);
68526+ dentry = va_arg(ap, struct dentry *);
68527+ mnt = va_arg(ap, struct vfsmount *);
68528+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
68529+ break;
68530+ case GR_RBAC_MODE2:
68531+ dentry = va_arg(ap, struct dentry *);
68532+ mnt = va_arg(ap, struct vfsmount *);
68533+ str1 = va_arg(ap, char *);
68534+ str2 = va_arg(ap, char *);
68535+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
68536+ break;
68537+ case GR_RBAC_MODE3:
68538+ dentry = va_arg(ap, struct dentry *);
68539+ mnt = va_arg(ap, struct vfsmount *);
68540+ str1 = va_arg(ap, char *);
68541+ str2 = va_arg(ap, char *);
68542+ str3 = va_arg(ap, char *);
68543+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
68544+ break;
68545+ case GR_FILENAME:
68546+ dentry = va_arg(ap, struct dentry *);
68547+ mnt = va_arg(ap, struct vfsmount *);
68548+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
68549+ break;
68550+ case GR_STR_FILENAME:
68551+ str1 = va_arg(ap, char *);
68552+ dentry = va_arg(ap, struct dentry *);
68553+ mnt = va_arg(ap, struct vfsmount *);
68554+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
68555+ break;
68556+ case GR_FILENAME_STR:
68557+ dentry = va_arg(ap, struct dentry *);
68558+ mnt = va_arg(ap, struct vfsmount *);
68559+ str1 = va_arg(ap, char *);
68560+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
68561+ break;
68562+ case GR_FILENAME_TWO_INT:
68563+ dentry = va_arg(ap, struct dentry *);
68564+ mnt = va_arg(ap, struct vfsmount *);
68565+ num1 = va_arg(ap, int);
68566+ num2 = va_arg(ap, int);
68567+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
68568+ break;
68569+ case GR_FILENAME_TWO_INT_STR:
68570+ dentry = va_arg(ap, struct dentry *);
68571+ mnt = va_arg(ap, struct vfsmount *);
68572+ num1 = va_arg(ap, int);
68573+ num2 = va_arg(ap, int);
68574+ str1 = va_arg(ap, char *);
68575+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
68576+ break;
68577+ case GR_TEXTREL:
68578+ file = va_arg(ap, struct file *);
68579+ ulong1 = va_arg(ap, unsigned long);
68580+ ulong2 = va_arg(ap, unsigned long);
68581+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
68582+ break;
68583+ case GR_PTRACE:
68584+ task = va_arg(ap, struct task_struct *);
68585+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
68586+ break;
68587+ case GR_RESOURCE:
68588+ task = va_arg(ap, struct task_struct *);
68589+ cred = __task_cred(task);
68590+ pcred = __task_cred(task->real_parent);
68591+ ulong1 = va_arg(ap, unsigned long);
68592+ str1 = va_arg(ap, char *);
68593+ ulong2 = va_arg(ap, unsigned long);
68594+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68595+ break;
68596+ case GR_CAP:
68597+ task = va_arg(ap, struct task_struct *);
68598+ cred = __task_cred(task);
68599+ pcred = __task_cred(task->real_parent);
68600+ str1 = va_arg(ap, char *);
68601+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68602+ break;
68603+ case GR_SIG:
68604+ str1 = va_arg(ap, char *);
68605+ voidptr = va_arg(ap, void *);
68606+ gr_log_middle_varargs(audit, msg, str1, voidptr);
68607+ break;
68608+ case GR_SIG2:
68609+ task = va_arg(ap, struct task_struct *);
68610+ cred = __task_cred(task);
68611+ pcred = __task_cred(task->real_parent);
68612+ num1 = va_arg(ap, int);
68613+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68614+ break;
68615+ case GR_CRASH1:
68616+ task = va_arg(ap, struct task_struct *);
68617+ cred = __task_cred(task);
68618+ pcred = __task_cred(task->real_parent);
68619+ ulong1 = va_arg(ap, unsigned long);
68620+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
68621+ break;
68622+ case GR_CRASH2:
68623+ task = va_arg(ap, struct task_struct *);
68624+ cred = __task_cred(task);
68625+ pcred = __task_cred(task->real_parent);
68626+ ulong1 = va_arg(ap, unsigned long);
68627+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
68628+ break;
68629+ case GR_RWXMAP:
68630+ file = va_arg(ap, struct file *);
68631+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
68632+ break;
68633+ case GR_RWXMAPVMA:
68634+ vma = va_arg(ap, struct vm_area_struct *);
68635+ if (vma->vm_file)
68636+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
68637+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
68638+ str1 = "<stack>";
68639+ else if (vma->vm_start <= current->mm->brk &&
68640+ vma->vm_end >= current->mm->start_brk)
68641+ str1 = "<heap>";
68642+ else
68643+ str1 = "<anonymous mapping>";
68644+ gr_log_middle_varargs(audit, msg, str1);
68645+ break;
68646+ case GR_PSACCT:
68647+ {
68648+ unsigned int wday, cday;
68649+ __u8 whr, chr;
68650+ __u8 wmin, cmin;
68651+ __u8 wsec, csec;
68652+ char cur_tty[64] = { 0 };
68653+ char parent_tty[64] = { 0 };
68654+
68655+ task = va_arg(ap, struct task_struct *);
68656+ wday = va_arg(ap, unsigned int);
68657+ cday = va_arg(ap, unsigned int);
68658+ whr = va_arg(ap, int);
68659+ chr = va_arg(ap, int);
68660+ wmin = va_arg(ap, int);
68661+ cmin = va_arg(ap, int);
68662+ wsec = va_arg(ap, int);
68663+ csec = va_arg(ap, int);
68664+ ulong1 = va_arg(ap, unsigned long);
68665+ cred = __task_cred(task);
68666+ pcred = __task_cred(task->real_parent);
68667+
68668+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68669+ }
68670+ break;
68671+ default:
68672+ gr_log_middle(audit, msg, ap);
68673+ }
68674+ va_end(ap);
68675+ // these don't need DEFAULTSECARGS printed on the end
68676+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
68677+ gr_log_end(audit, 0);
68678+ else
68679+ gr_log_end(audit, 1);
68680+ END_LOCKS(audit);
68681+}
68682diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
68683new file mode 100644
68684index 0000000..f536303
68685--- /dev/null
68686+++ b/grsecurity/grsec_mem.c
68687@@ -0,0 +1,40 @@
68688+#include <linux/kernel.h>
68689+#include <linux/sched.h>
68690+#include <linux/mm.h>
68691+#include <linux/mman.h>
68692+#include <linux/grinternal.h>
68693+
68694+void
68695+gr_handle_ioperm(void)
68696+{
68697+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
68698+ return;
68699+}
68700+
68701+void
68702+gr_handle_iopl(void)
68703+{
68704+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
68705+ return;
68706+}
68707+
68708+void
68709+gr_handle_mem_readwrite(u64 from, u64 to)
68710+{
68711+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
68712+ return;
68713+}
68714+
68715+void
68716+gr_handle_vm86(void)
68717+{
68718+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
68719+ return;
68720+}
68721+
68722+void
68723+gr_log_badprocpid(const char *entry)
68724+{
68725+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
68726+ return;
68727+}
68728diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
68729new file mode 100644
68730index 0000000..2131422
68731--- /dev/null
68732+++ b/grsecurity/grsec_mount.c
68733@@ -0,0 +1,62 @@
68734+#include <linux/kernel.h>
68735+#include <linux/sched.h>
68736+#include <linux/mount.h>
68737+#include <linux/grsecurity.h>
68738+#include <linux/grinternal.h>
68739+
68740+void
68741+gr_log_remount(const char *devname, const int retval)
68742+{
68743+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68744+ if (grsec_enable_mount && (retval >= 0))
68745+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
68746+#endif
68747+ return;
68748+}
68749+
68750+void
68751+gr_log_unmount(const char *devname, const int retval)
68752+{
68753+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68754+ if (grsec_enable_mount && (retval >= 0))
68755+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
68756+#endif
68757+ return;
68758+}
68759+
68760+void
68761+gr_log_mount(const char *from, const char *to, const int retval)
68762+{
68763+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68764+ if (grsec_enable_mount && (retval >= 0))
68765+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
68766+#endif
68767+ return;
68768+}
68769+
68770+int
68771+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
68772+{
68773+#ifdef CONFIG_GRKERNSEC_ROFS
68774+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
68775+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
68776+ return -EPERM;
68777+ } else
68778+ return 0;
68779+#endif
68780+ return 0;
68781+}
68782+
68783+int
68784+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
68785+{
68786+#ifdef CONFIG_GRKERNSEC_ROFS
68787+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
68788+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
68789+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
68790+ return -EPERM;
68791+ } else
68792+ return 0;
68793+#endif
68794+ return 0;
68795+}
68796diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
68797new file mode 100644
68798index 0000000..6ee9d50
68799--- /dev/null
68800+++ b/grsecurity/grsec_pax.c
68801@@ -0,0 +1,45 @@
68802+#include <linux/kernel.h>
68803+#include <linux/sched.h>
68804+#include <linux/mm.h>
68805+#include <linux/file.h>
68806+#include <linux/grinternal.h>
68807+#include <linux/grsecurity.h>
68808+
68809+void
68810+gr_log_textrel(struct vm_area_struct * vma)
68811+{
68812+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68813+ if (grsec_enable_log_rwxmaps)
68814+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
68815+#endif
68816+ return;
68817+}
68818+
68819+void gr_log_ptgnustack(struct file *file)
68820+{
68821+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68822+ if (grsec_enable_log_rwxmaps)
68823+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
68824+#endif
68825+ return;
68826+}
68827+
68828+void
68829+gr_log_rwxmmap(struct file *file)
68830+{
68831+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68832+ if (grsec_enable_log_rwxmaps)
68833+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
68834+#endif
68835+ return;
68836+}
68837+
68838+void
68839+gr_log_rwxmprotect(struct vm_area_struct *vma)
68840+{
68841+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68842+ if (grsec_enable_log_rwxmaps)
68843+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
68844+#endif
68845+ return;
68846+}
68847diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
68848new file mode 100644
68849index 0000000..f7f29aa
68850--- /dev/null
68851+++ b/grsecurity/grsec_ptrace.c
68852@@ -0,0 +1,30 @@
68853+#include <linux/kernel.h>
68854+#include <linux/sched.h>
68855+#include <linux/grinternal.h>
68856+#include <linux/security.h>
68857+
68858+void
68859+gr_audit_ptrace(struct task_struct *task)
68860+{
68861+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
68862+ if (grsec_enable_audit_ptrace)
68863+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
68864+#endif
68865+ return;
68866+}
68867+
68868+int
68869+gr_ptrace_readexec(struct file *file, int unsafe_flags)
68870+{
68871+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
68872+ const struct dentry *dentry = file->f_path.dentry;
68873+ const struct vfsmount *mnt = file->f_path.mnt;
68874+
68875+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
68876+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
68877+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
68878+ return -EACCES;
68879+ }
68880+#endif
68881+ return 0;
68882+}
68883diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
68884new file mode 100644
68885index 0000000..4e29cc7
68886--- /dev/null
68887+++ b/grsecurity/grsec_sig.c
68888@@ -0,0 +1,246 @@
68889+#include <linux/kernel.h>
68890+#include <linux/sched.h>
68891+#include <linux/fs.h>
68892+#include <linux/delay.h>
68893+#include <linux/grsecurity.h>
68894+#include <linux/grinternal.h>
68895+#include <linux/hardirq.h>
68896+
68897+char *signames[] = {
68898+ [SIGSEGV] = "Segmentation fault",
68899+ [SIGILL] = "Illegal instruction",
68900+ [SIGABRT] = "Abort",
68901+ [SIGBUS] = "Invalid alignment/Bus error"
68902+};
68903+
68904+void
68905+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
68906+{
68907+#ifdef CONFIG_GRKERNSEC_SIGNAL
68908+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
68909+ (sig == SIGABRT) || (sig == SIGBUS))) {
68910+ if (task_pid_nr(t) == task_pid_nr(current)) {
68911+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
68912+ } else {
68913+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
68914+ }
68915+ }
68916+#endif
68917+ return;
68918+}
68919+
68920+int
68921+gr_handle_signal(const struct task_struct *p, const int sig)
68922+{
68923+#ifdef CONFIG_GRKERNSEC
68924+ /* ignore the 0 signal for protected task checks */
68925+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
68926+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
68927+ return -EPERM;
68928+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
68929+ return -EPERM;
68930+ }
68931+#endif
68932+ return 0;
68933+}
68934+
68935+#ifdef CONFIG_GRKERNSEC
68936+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
68937+
68938+int gr_fake_force_sig(int sig, struct task_struct *t)
68939+{
68940+ unsigned long int flags;
68941+ int ret, blocked, ignored;
68942+ struct k_sigaction *action;
68943+
68944+ spin_lock_irqsave(&t->sighand->siglock, flags);
68945+ action = &t->sighand->action[sig-1];
68946+ ignored = action->sa.sa_handler == SIG_IGN;
68947+ blocked = sigismember(&t->blocked, sig);
68948+ if (blocked || ignored) {
68949+ action->sa.sa_handler = SIG_DFL;
68950+ if (blocked) {
68951+ sigdelset(&t->blocked, sig);
68952+ recalc_sigpending_and_wake(t);
68953+ }
68954+ }
68955+ if (action->sa.sa_handler == SIG_DFL)
68956+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
68957+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
68958+
68959+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
68960+
68961+ return ret;
68962+}
68963+#endif
68964+
68965+#ifdef CONFIG_GRKERNSEC_BRUTE
68966+#define GR_USER_BAN_TIME (15 * 60)
68967+#define GR_DAEMON_BRUTE_TIME (30 * 60)
68968+
68969+static int __get_dumpable(unsigned long mm_flags)
68970+{
68971+ int ret;
68972+
68973+ ret = mm_flags & MMF_DUMPABLE_MASK;
68974+ return (ret >= 2) ? 2 : ret;
68975+}
68976+#endif
68977+
68978+void gr_handle_brute_attach(unsigned long mm_flags)
68979+{
68980+#ifdef CONFIG_GRKERNSEC_BRUTE
68981+ struct task_struct *p = current;
68982+ kuid_t uid = GLOBAL_ROOT_UID;
68983+ int daemon = 0;
68984+
68985+ if (!grsec_enable_brute)
68986+ return;
68987+
68988+ rcu_read_lock();
68989+ read_lock(&tasklist_lock);
68990+ read_lock(&grsec_exec_file_lock);
68991+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
68992+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
68993+ p->real_parent->brute = 1;
68994+ daemon = 1;
68995+ } else {
68996+ const struct cred *cred = __task_cred(p), *cred2;
68997+ struct task_struct *tsk, *tsk2;
68998+
68999+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
69000+ struct user_struct *user;
69001+
69002+ uid = cred->uid;
69003+
69004+ /* this is put upon execution past expiration */
69005+ user = find_user(uid);
69006+ if (user == NULL)
69007+ goto unlock;
69008+ user->suid_banned = 1;
69009+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
69010+ if (user->suid_ban_expires == ~0UL)
69011+ user->suid_ban_expires--;
69012+
69013+ /* only kill other threads of the same binary, from the same user */
69014+ do_each_thread(tsk2, tsk) {
69015+ cred2 = __task_cred(tsk);
69016+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
69017+ gr_fake_force_sig(SIGKILL, tsk);
69018+ } while_each_thread(tsk2, tsk);
69019+ }
69020+ }
69021+unlock:
69022+ read_unlock(&grsec_exec_file_lock);
69023+ read_unlock(&tasklist_lock);
69024+ rcu_read_unlock();
69025+
69026+ if (gr_is_global_nonroot(uid))
69027+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
69028+ else if (daemon)
69029+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
69030+
69031+#endif
69032+ return;
69033+}
69034+
69035+void gr_handle_brute_check(void)
69036+{
69037+#ifdef CONFIG_GRKERNSEC_BRUTE
69038+ struct task_struct *p = current;
69039+
69040+ if (unlikely(p->brute)) {
69041+ if (!grsec_enable_brute)
69042+ p->brute = 0;
69043+ else if (time_before(get_seconds(), p->brute_expires))
69044+ msleep(30 * 1000);
69045+ }
69046+#endif
69047+ return;
69048+}
69049+
69050+void gr_handle_kernel_exploit(void)
69051+{
69052+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
69053+ const struct cred *cred;
69054+ struct task_struct *tsk, *tsk2;
69055+ struct user_struct *user;
69056+ kuid_t uid;
69057+
69058+ if (in_irq() || in_serving_softirq() || in_nmi())
69059+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
69060+
69061+ uid = current_uid();
69062+
69063+ if (gr_is_global_root(uid))
69064+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
69065+ else {
69066+ /* kill all the processes of this user, hold a reference
69067+ to their creds struct, and prevent them from creating
69068+ another process until system reset
69069+ */
69070+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
69071+ GR_GLOBAL_UID(uid));
69072+ /* we intentionally leak this ref */
69073+ user = get_uid(current->cred->user);
69074+ if (user)
69075+ user->kernel_banned = 1;
69076+
69077+ /* kill all processes of this user */
69078+ read_lock(&tasklist_lock);
69079+ do_each_thread(tsk2, tsk) {
69080+ cred = __task_cred(tsk);
69081+ if (uid_eq(cred->uid, uid))
69082+ gr_fake_force_sig(SIGKILL, tsk);
69083+ } while_each_thread(tsk2, tsk);
69084+ read_unlock(&tasklist_lock);
69085+ }
69086+#endif
69087+}
69088+
69089+#ifdef CONFIG_GRKERNSEC_BRUTE
69090+static bool suid_ban_expired(struct user_struct *user)
69091+{
69092+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
69093+ user->suid_banned = 0;
69094+ user->suid_ban_expires = 0;
69095+ free_uid(user);
69096+ return true;
69097+ }
69098+
69099+ return false;
69100+}
69101+#endif
69102+
69103+int gr_process_kernel_exec_ban(void)
69104+{
69105+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
69106+ if (unlikely(current->cred->user->kernel_banned))
69107+ return -EPERM;
69108+#endif
69109+ return 0;
69110+}
69111+
69112+int gr_process_kernel_setuid_ban(struct user_struct *user)
69113+{
69114+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
69115+ if (unlikely(user->kernel_banned))
69116+ gr_fake_force_sig(SIGKILL, current);
69117+#endif
69118+ return 0;
69119+}
69120+
69121+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
69122+{
69123+#ifdef CONFIG_GRKERNSEC_BRUTE
69124+ struct user_struct *user = current->cred->user;
69125+ if (unlikely(user->suid_banned)) {
69126+ if (suid_ban_expired(user))
69127+ return 0;
69128+ /* disallow execution of suid binaries only */
69129+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
69130+ return -EPERM;
69131+ }
69132+#endif
69133+ return 0;
69134+}
69135diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
69136new file mode 100644
69137index 0000000..4030d57
69138--- /dev/null
69139+++ b/grsecurity/grsec_sock.c
69140@@ -0,0 +1,244 @@
69141+#include <linux/kernel.h>
69142+#include <linux/module.h>
69143+#include <linux/sched.h>
69144+#include <linux/file.h>
69145+#include <linux/net.h>
69146+#include <linux/in.h>
69147+#include <linux/ip.h>
69148+#include <net/sock.h>
69149+#include <net/inet_sock.h>
69150+#include <linux/grsecurity.h>
69151+#include <linux/grinternal.h>
69152+#include <linux/gracl.h>
69153+
69154+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
69155+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
69156+
69157+EXPORT_SYMBOL(gr_search_udp_recvmsg);
69158+EXPORT_SYMBOL(gr_search_udp_sendmsg);
69159+
69160+#ifdef CONFIG_UNIX_MODULE
69161+EXPORT_SYMBOL(gr_acl_handle_unix);
69162+EXPORT_SYMBOL(gr_acl_handle_mknod);
69163+EXPORT_SYMBOL(gr_handle_chroot_unix);
69164+EXPORT_SYMBOL(gr_handle_create);
69165+#endif
69166+
69167+#ifdef CONFIG_GRKERNSEC
69168+#define gr_conn_table_size 32749
69169+struct conn_table_entry {
69170+ struct conn_table_entry *next;
69171+ struct signal_struct *sig;
69172+};
69173+
69174+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
69175+DEFINE_SPINLOCK(gr_conn_table_lock);
69176+
69177+extern const char * gr_socktype_to_name(unsigned char type);
69178+extern const char * gr_proto_to_name(unsigned char proto);
69179+extern const char * gr_sockfamily_to_name(unsigned char family);
69180+
69181+static __inline__ int
69182+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
69183+{
69184+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
69185+}
69186+
69187+static __inline__ int
69188+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
69189+ __u16 sport, __u16 dport)
69190+{
69191+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
69192+ sig->gr_sport == sport && sig->gr_dport == dport))
69193+ return 1;
69194+ else
69195+ return 0;
69196+}
69197+
69198+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
69199+{
69200+ struct conn_table_entry **match;
69201+ unsigned int index;
69202+
69203+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
69204+ sig->gr_sport, sig->gr_dport,
69205+ gr_conn_table_size);
69206+
69207+ newent->sig = sig;
69208+
69209+ match = &gr_conn_table[index];
69210+ newent->next = *match;
69211+ *match = newent;
69212+
69213+ return;
69214+}
69215+
69216+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
69217+{
69218+ struct conn_table_entry *match, *last = NULL;
69219+ unsigned int index;
69220+
69221+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
69222+ sig->gr_sport, sig->gr_dport,
69223+ gr_conn_table_size);
69224+
69225+ match = gr_conn_table[index];
69226+ while (match && !conn_match(match->sig,
69227+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
69228+ sig->gr_dport)) {
69229+ last = match;
69230+ match = match->next;
69231+ }
69232+
69233+ if (match) {
69234+ if (last)
69235+ last->next = match->next;
69236+ else
69237+ gr_conn_table[index] = NULL;
69238+ kfree(match);
69239+ }
69240+
69241+ return;
69242+}
69243+
69244+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
69245+ __u16 sport, __u16 dport)
69246+{
69247+ struct conn_table_entry *match;
69248+ unsigned int index;
69249+
69250+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
69251+
69252+ match = gr_conn_table[index];
69253+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
69254+ match = match->next;
69255+
69256+ if (match)
69257+ return match->sig;
69258+ else
69259+ return NULL;
69260+}
69261+
69262+#endif
69263+
69264+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
69265+{
69266+#ifdef CONFIG_GRKERNSEC
69267+ struct signal_struct *sig = task->signal;
69268+ struct conn_table_entry *newent;
69269+
69270+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
69271+ if (newent == NULL)
69272+ return;
69273+ /* no bh lock needed since we are called with bh disabled */
69274+ spin_lock(&gr_conn_table_lock);
69275+ gr_del_task_from_ip_table_nolock(sig);
69276+ sig->gr_saddr = inet->inet_rcv_saddr;
69277+ sig->gr_daddr = inet->inet_daddr;
69278+ sig->gr_sport = inet->inet_sport;
69279+ sig->gr_dport = inet->inet_dport;
69280+ gr_add_to_task_ip_table_nolock(sig, newent);
69281+ spin_unlock(&gr_conn_table_lock);
69282+#endif
69283+ return;
69284+}
69285+
69286+void gr_del_task_from_ip_table(struct task_struct *task)
69287+{
69288+#ifdef CONFIG_GRKERNSEC
69289+ spin_lock_bh(&gr_conn_table_lock);
69290+ gr_del_task_from_ip_table_nolock(task->signal);
69291+ spin_unlock_bh(&gr_conn_table_lock);
69292+#endif
69293+ return;
69294+}
69295+
69296+void
69297+gr_attach_curr_ip(const struct sock *sk)
69298+{
69299+#ifdef CONFIG_GRKERNSEC
69300+ struct signal_struct *p, *set;
69301+ const struct inet_sock *inet = inet_sk(sk);
69302+
69303+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
69304+ return;
69305+
69306+ set = current->signal;
69307+
69308+ spin_lock_bh(&gr_conn_table_lock);
69309+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
69310+ inet->inet_dport, inet->inet_sport);
69311+ if (unlikely(p != NULL)) {
69312+ set->curr_ip = p->curr_ip;
69313+ set->used_accept = 1;
69314+ gr_del_task_from_ip_table_nolock(p);
69315+ spin_unlock_bh(&gr_conn_table_lock);
69316+ return;
69317+ }
69318+ spin_unlock_bh(&gr_conn_table_lock);
69319+
69320+ set->curr_ip = inet->inet_daddr;
69321+ set->used_accept = 1;
69322+#endif
69323+ return;
69324+}
69325+
69326+int
69327+gr_handle_sock_all(const int family, const int type, const int protocol)
69328+{
69329+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69330+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
69331+ (family != AF_UNIX)) {
69332+ if (family == AF_INET)
69333+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
69334+ else
69335+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
69336+ return -EACCES;
69337+ }
69338+#endif
69339+ return 0;
69340+}
69341+
69342+int
69343+gr_handle_sock_server(const struct sockaddr *sck)
69344+{
69345+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69346+ if (grsec_enable_socket_server &&
69347+ in_group_p(grsec_socket_server_gid) &&
69348+ sck && (sck->sa_family != AF_UNIX) &&
69349+ (sck->sa_family != AF_LOCAL)) {
69350+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
69351+ return -EACCES;
69352+ }
69353+#endif
69354+ return 0;
69355+}
69356+
69357+int
69358+gr_handle_sock_server_other(const struct sock *sck)
69359+{
69360+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69361+ if (grsec_enable_socket_server &&
69362+ in_group_p(grsec_socket_server_gid) &&
69363+ sck && (sck->sk_family != AF_UNIX) &&
69364+ (sck->sk_family != AF_LOCAL)) {
69365+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
69366+ return -EACCES;
69367+ }
69368+#endif
69369+ return 0;
69370+}
69371+
69372+int
69373+gr_handle_sock_client(const struct sockaddr *sck)
69374+{
69375+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69376+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
69377+ sck && (sck->sa_family != AF_UNIX) &&
69378+ (sck->sa_family != AF_LOCAL)) {
69379+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
69380+ return -EACCES;
69381+ }
69382+#endif
69383+ return 0;
69384+}
69385diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
69386new file mode 100644
69387index 0000000..7624d1c
69388--- /dev/null
69389+++ b/grsecurity/grsec_sysctl.c
69390@@ -0,0 +1,460 @@
69391+#include <linux/kernel.h>
69392+#include <linux/sched.h>
69393+#include <linux/sysctl.h>
69394+#include <linux/grsecurity.h>
69395+#include <linux/grinternal.h>
69396+
69397+int
69398+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
69399+{
69400+#ifdef CONFIG_GRKERNSEC_SYSCTL
69401+ if (dirname == NULL || name == NULL)
69402+ return 0;
69403+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
69404+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
69405+ return -EACCES;
69406+ }
69407+#endif
69408+ return 0;
69409+}
69410+
69411+#ifdef CONFIG_GRKERNSEC_ROFS
69412+static int __maybe_unused one = 1;
69413+#endif
69414+
69415+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69416+struct ctl_table grsecurity_table[] = {
69417+#ifdef CONFIG_GRKERNSEC_SYSCTL
69418+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
69419+#ifdef CONFIG_GRKERNSEC_IO
69420+ {
69421+ .procname = "disable_priv_io",
69422+ .data = &grsec_disable_privio,
69423+ .maxlen = sizeof(int),
69424+ .mode = 0600,
69425+ .proc_handler = &proc_dointvec,
69426+ },
69427+#endif
69428+#endif
69429+#ifdef CONFIG_GRKERNSEC_LINK
69430+ {
69431+ .procname = "linking_restrictions",
69432+ .data = &grsec_enable_link,
69433+ .maxlen = sizeof(int),
69434+ .mode = 0600,
69435+ .proc_handler = &proc_dointvec,
69436+ },
69437+#endif
69438+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69439+ {
69440+ .procname = "enforce_symlinksifowner",
69441+ .data = &grsec_enable_symlinkown,
69442+ .maxlen = sizeof(int),
69443+ .mode = 0600,
69444+ .proc_handler = &proc_dointvec,
69445+ },
69446+ {
69447+ .procname = "symlinkown_gid",
69448+ .data = &grsec_symlinkown_gid,
69449+ .maxlen = sizeof(int),
69450+ .mode = 0600,
69451+ .proc_handler = &proc_dointvec,
69452+ },
69453+#endif
69454+#ifdef CONFIG_GRKERNSEC_BRUTE
69455+ {
69456+ .procname = "deter_bruteforce",
69457+ .data = &grsec_enable_brute,
69458+ .maxlen = sizeof(int),
69459+ .mode = 0600,
69460+ .proc_handler = &proc_dointvec,
69461+ },
69462+#endif
69463+#ifdef CONFIG_GRKERNSEC_FIFO
69464+ {
69465+ .procname = "fifo_restrictions",
69466+ .data = &grsec_enable_fifo,
69467+ .maxlen = sizeof(int),
69468+ .mode = 0600,
69469+ .proc_handler = &proc_dointvec,
69470+ },
69471+#endif
69472+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
69473+ {
69474+ .procname = "ptrace_readexec",
69475+ .data = &grsec_enable_ptrace_readexec,
69476+ .maxlen = sizeof(int),
69477+ .mode = 0600,
69478+ .proc_handler = &proc_dointvec,
69479+ },
69480+#endif
69481+#ifdef CONFIG_GRKERNSEC_SETXID
69482+ {
69483+ .procname = "consistent_setxid",
69484+ .data = &grsec_enable_setxid,
69485+ .maxlen = sizeof(int),
69486+ .mode = 0600,
69487+ .proc_handler = &proc_dointvec,
69488+ },
69489+#endif
69490+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69491+ {
69492+ .procname = "ip_blackhole",
69493+ .data = &grsec_enable_blackhole,
69494+ .maxlen = sizeof(int),
69495+ .mode = 0600,
69496+ .proc_handler = &proc_dointvec,
69497+ },
69498+ {
69499+ .procname = "lastack_retries",
69500+ .data = &grsec_lastack_retries,
69501+ .maxlen = sizeof(int),
69502+ .mode = 0600,
69503+ .proc_handler = &proc_dointvec,
69504+ },
69505+#endif
69506+#ifdef CONFIG_GRKERNSEC_EXECLOG
69507+ {
69508+ .procname = "exec_logging",
69509+ .data = &grsec_enable_execlog,
69510+ .maxlen = sizeof(int),
69511+ .mode = 0600,
69512+ .proc_handler = &proc_dointvec,
69513+ },
69514+#endif
69515+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
69516+ {
69517+ .procname = "rwxmap_logging",
69518+ .data = &grsec_enable_log_rwxmaps,
69519+ .maxlen = sizeof(int),
69520+ .mode = 0600,
69521+ .proc_handler = &proc_dointvec,
69522+ },
69523+#endif
69524+#ifdef CONFIG_GRKERNSEC_SIGNAL
69525+ {
69526+ .procname = "signal_logging",
69527+ .data = &grsec_enable_signal,
69528+ .maxlen = sizeof(int),
69529+ .mode = 0600,
69530+ .proc_handler = &proc_dointvec,
69531+ },
69532+#endif
69533+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69534+ {
69535+ .procname = "forkfail_logging",
69536+ .data = &grsec_enable_forkfail,
69537+ .maxlen = sizeof(int),
69538+ .mode = 0600,
69539+ .proc_handler = &proc_dointvec,
69540+ },
69541+#endif
69542+#ifdef CONFIG_GRKERNSEC_TIME
69543+ {
69544+ .procname = "timechange_logging",
69545+ .data = &grsec_enable_time,
69546+ .maxlen = sizeof(int),
69547+ .mode = 0600,
69548+ .proc_handler = &proc_dointvec,
69549+ },
69550+#endif
69551+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
69552+ {
69553+ .procname = "chroot_deny_shmat",
69554+ .data = &grsec_enable_chroot_shmat,
69555+ .maxlen = sizeof(int),
69556+ .mode = 0600,
69557+ .proc_handler = &proc_dointvec,
69558+ },
69559+#endif
69560+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69561+ {
69562+ .procname = "chroot_deny_unix",
69563+ .data = &grsec_enable_chroot_unix,
69564+ .maxlen = sizeof(int),
69565+ .mode = 0600,
69566+ .proc_handler = &proc_dointvec,
69567+ },
69568+#endif
69569+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
69570+ {
69571+ .procname = "chroot_deny_mount",
69572+ .data = &grsec_enable_chroot_mount,
69573+ .maxlen = sizeof(int),
69574+ .mode = 0600,
69575+ .proc_handler = &proc_dointvec,
69576+ },
69577+#endif
69578+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
69579+ {
69580+ .procname = "chroot_deny_fchdir",
69581+ .data = &grsec_enable_chroot_fchdir,
69582+ .maxlen = sizeof(int),
69583+ .mode = 0600,
69584+ .proc_handler = &proc_dointvec,
69585+ },
69586+#endif
69587+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
69588+ {
69589+ .procname = "chroot_deny_chroot",
69590+ .data = &grsec_enable_chroot_double,
69591+ .maxlen = sizeof(int),
69592+ .mode = 0600,
69593+ .proc_handler = &proc_dointvec,
69594+ },
69595+#endif
69596+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
69597+ {
69598+ .procname = "chroot_deny_pivot",
69599+ .data = &grsec_enable_chroot_pivot,
69600+ .maxlen = sizeof(int),
69601+ .mode = 0600,
69602+ .proc_handler = &proc_dointvec,
69603+ },
69604+#endif
69605+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
69606+ {
69607+ .procname = "chroot_enforce_chdir",
69608+ .data = &grsec_enable_chroot_chdir,
69609+ .maxlen = sizeof(int),
69610+ .mode = 0600,
69611+ .proc_handler = &proc_dointvec,
69612+ },
69613+#endif
69614+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
69615+ {
69616+ .procname = "chroot_deny_chmod",
69617+ .data = &grsec_enable_chroot_chmod,
69618+ .maxlen = sizeof(int),
69619+ .mode = 0600,
69620+ .proc_handler = &proc_dointvec,
69621+ },
69622+#endif
69623+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
69624+ {
69625+ .procname = "chroot_deny_mknod",
69626+ .data = &grsec_enable_chroot_mknod,
69627+ .maxlen = sizeof(int),
69628+ .mode = 0600,
69629+ .proc_handler = &proc_dointvec,
69630+ },
69631+#endif
69632+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
69633+ {
69634+ .procname = "chroot_restrict_nice",
69635+ .data = &grsec_enable_chroot_nice,
69636+ .maxlen = sizeof(int),
69637+ .mode = 0600,
69638+ .proc_handler = &proc_dointvec,
69639+ },
69640+#endif
69641+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
69642+ {
69643+ .procname = "chroot_execlog",
69644+ .data = &grsec_enable_chroot_execlog,
69645+ .maxlen = sizeof(int),
69646+ .mode = 0600,
69647+ .proc_handler = &proc_dointvec,
69648+ },
69649+#endif
69650+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69651+ {
69652+ .procname = "chroot_caps",
69653+ .data = &grsec_enable_chroot_caps,
69654+ .maxlen = sizeof(int),
69655+ .mode = 0600,
69656+ .proc_handler = &proc_dointvec,
69657+ },
69658+#endif
69659+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
69660+ {
69661+ .procname = "chroot_deny_sysctl",
69662+ .data = &grsec_enable_chroot_sysctl,
69663+ .maxlen = sizeof(int),
69664+ .mode = 0600,
69665+ .proc_handler = &proc_dointvec,
69666+ },
69667+#endif
69668+#ifdef CONFIG_GRKERNSEC_TPE
69669+ {
69670+ .procname = "tpe",
69671+ .data = &grsec_enable_tpe,
69672+ .maxlen = sizeof(int),
69673+ .mode = 0600,
69674+ .proc_handler = &proc_dointvec,
69675+ },
69676+ {
69677+ .procname = "tpe_gid",
69678+ .data = &grsec_tpe_gid,
69679+ .maxlen = sizeof(int),
69680+ .mode = 0600,
69681+ .proc_handler = &proc_dointvec,
69682+ },
69683+#endif
69684+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69685+ {
69686+ .procname = "tpe_invert",
69687+ .data = &grsec_enable_tpe_invert,
69688+ .maxlen = sizeof(int),
69689+ .mode = 0600,
69690+ .proc_handler = &proc_dointvec,
69691+ },
69692+#endif
69693+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69694+ {
69695+ .procname = "tpe_restrict_all",
69696+ .data = &grsec_enable_tpe_all,
69697+ .maxlen = sizeof(int),
69698+ .mode = 0600,
69699+ .proc_handler = &proc_dointvec,
69700+ },
69701+#endif
69702+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69703+ {
69704+ .procname = "socket_all",
69705+ .data = &grsec_enable_socket_all,
69706+ .maxlen = sizeof(int),
69707+ .mode = 0600,
69708+ .proc_handler = &proc_dointvec,
69709+ },
69710+ {
69711+ .procname = "socket_all_gid",
69712+ .data = &grsec_socket_all_gid,
69713+ .maxlen = sizeof(int),
69714+ .mode = 0600,
69715+ .proc_handler = &proc_dointvec,
69716+ },
69717+#endif
69718+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69719+ {
69720+ .procname = "socket_client",
69721+ .data = &grsec_enable_socket_client,
69722+ .maxlen = sizeof(int),
69723+ .mode = 0600,
69724+ .proc_handler = &proc_dointvec,
69725+ },
69726+ {
69727+ .procname = "socket_client_gid",
69728+ .data = &grsec_socket_client_gid,
69729+ .maxlen = sizeof(int),
69730+ .mode = 0600,
69731+ .proc_handler = &proc_dointvec,
69732+ },
69733+#endif
69734+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69735+ {
69736+ .procname = "socket_server",
69737+ .data = &grsec_enable_socket_server,
69738+ .maxlen = sizeof(int),
69739+ .mode = 0600,
69740+ .proc_handler = &proc_dointvec,
69741+ },
69742+ {
69743+ .procname = "socket_server_gid",
69744+ .data = &grsec_socket_server_gid,
69745+ .maxlen = sizeof(int),
69746+ .mode = 0600,
69747+ .proc_handler = &proc_dointvec,
69748+ },
69749+#endif
69750+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
69751+ {
69752+ .procname = "audit_group",
69753+ .data = &grsec_enable_group,
69754+ .maxlen = sizeof(int),
69755+ .mode = 0600,
69756+ .proc_handler = &proc_dointvec,
69757+ },
69758+ {
69759+ .procname = "audit_gid",
69760+ .data = &grsec_audit_gid,
69761+ .maxlen = sizeof(int),
69762+ .mode = 0600,
69763+ .proc_handler = &proc_dointvec,
69764+ },
69765+#endif
69766+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
69767+ {
69768+ .procname = "audit_chdir",
69769+ .data = &grsec_enable_chdir,
69770+ .maxlen = sizeof(int),
69771+ .mode = 0600,
69772+ .proc_handler = &proc_dointvec,
69773+ },
69774+#endif
69775+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
69776+ {
69777+ .procname = "audit_mount",
69778+ .data = &grsec_enable_mount,
69779+ .maxlen = sizeof(int),
69780+ .mode = 0600,
69781+ .proc_handler = &proc_dointvec,
69782+ },
69783+#endif
69784+#ifdef CONFIG_GRKERNSEC_DMESG
69785+ {
69786+ .procname = "dmesg",
69787+ .data = &grsec_enable_dmesg,
69788+ .maxlen = sizeof(int),
69789+ .mode = 0600,
69790+ .proc_handler = &proc_dointvec,
69791+ },
69792+#endif
69793+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69794+ {
69795+ .procname = "chroot_findtask",
69796+ .data = &grsec_enable_chroot_findtask,
69797+ .maxlen = sizeof(int),
69798+ .mode = 0600,
69799+ .proc_handler = &proc_dointvec,
69800+ },
69801+#endif
69802+#ifdef CONFIG_GRKERNSEC_RESLOG
69803+ {
69804+ .procname = "resource_logging",
69805+ .data = &grsec_resource_logging,
69806+ .maxlen = sizeof(int),
69807+ .mode = 0600,
69808+ .proc_handler = &proc_dointvec,
69809+ },
69810+#endif
69811+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
69812+ {
69813+ .procname = "audit_ptrace",
69814+ .data = &grsec_enable_audit_ptrace,
69815+ .maxlen = sizeof(int),
69816+ .mode = 0600,
69817+ .proc_handler = &proc_dointvec,
69818+ },
69819+#endif
69820+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69821+ {
69822+ .procname = "harden_ptrace",
69823+ .data = &grsec_enable_harden_ptrace,
69824+ .maxlen = sizeof(int),
69825+ .mode = 0600,
69826+ .proc_handler = &proc_dointvec,
69827+ },
69828+#endif
69829+ {
69830+ .procname = "grsec_lock",
69831+ .data = &grsec_lock,
69832+ .maxlen = sizeof(int),
69833+ .mode = 0600,
69834+ .proc_handler = &proc_dointvec,
69835+ },
69836+#endif
69837+#ifdef CONFIG_GRKERNSEC_ROFS
69838+ {
69839+ .procname = "romount_protect",
69840+ .data = &grsec_enable_rofs,
69841+ .maxlen = sizeof(int),
69842+ .mode = 0600,
69843+ .proc_handler = &proc_dointvec_minmax,
69844+ .extra1 = &one,
69845+ .extra2 = &one,
69846+ },
69847+#endif
69848+ { }
69849+};
69850+#endif
69851diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
69852new file mode 100644
69853index 0000000..0dc13c3
69854--- /dev/null
69855+++ b/grsecurity/grsec_time.c
69856@@ -0,0 +1,16 @@
69857+#include <linux/kernel.h>
69858+#include <linux/sched.h>
69859+#include <linux/grinternal.h>
69860+#include <linux/module.h>
69861+
69862+void
69863+gr_log_timechange(void)
69864+{
69865+#ifdef CONFIG_GRKERNSEC_TIME
69866+ if (grsec_enable_time)
69867+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
69868+#endif
69869+ return;
69870+}
69871+
69872+EXPORT_SYMBOL(gr_log_timechange);
69873diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
69874new file mode 100644
69875index 0000000..ee57dcf
69876--- /dev/null
69877+++ b/grsecurity/grsec_tpe.c
69878@@ -0,0 +1,73 @@
69879+#include <linux/kernel.h>
69880+#include <linux/sched.h>
69881+#include <linux/file.h>
69882+#include <linux/fs.h>
69883+#include <linux/grinternal.h>
69884+
69885+extern int gr_acl_tpe_check(void);
69886+
69887+int
69888+gr_tpe_allow(const struct file *file)
69889+{
69890+#ifdef CONFIG_GRKERNSEC
69891+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
69892+ const struct cred *cred = current_cred();
69893+ char *msg = NULL;
69894+ char *msg2 = NULL;
69895+
69896+ // never restrict root
69897+ if (gr_is_global_root(cred->uid))
69898+ return 1;
69899+
69900+ if (grsec_enable_tpe) {
69901+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69902+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
69903+ msg = "not being in trusted group";
69904+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
69905+ msg = "being in untrusted group";
69906+#else
69907+ if (in_group_p(grsec_tpe_gid))
69908+ msg = "being in untrusted group";
69909+#endif
69910+ }
69911+ if (!msg && gr_acl_tpe_check())
69912+ msg = "being in untrusted role";
69913+
69914+ // not in any affected group/role
69915+ if (!msg)
69916+ goto next_check;
69917+
69918+ if (gr_is_global_nonroot(inode->i_uid))
69919+ msg2 = "file in non-root-owned directory";
69920+ else if (inode->i_mode & S_IWOTH)
69921+ msg2 = "file in world-writable directory";
69922+ else if (inode->i_mode & S_IWGRP)
69923+ msg2 = "file in group-writable directory";
69924+
69925+ if (msg && msg2) {
69926+ char fullmsg[70] = {0};
69927+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
69928+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
69929+ return 0;
69930+ }
69931+ msg = NULL;
69932+next_check:
69933+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69934+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
69935+ return 1;
69936+
69937+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
69938+ msg = "directory not owned by user";
69939+ else if (inode->i_mode & S_IWOTH)
69940+ msg = "file in world-writable directory";
69941+ else if (inode->i_mode & S_IWGRP)
69942+ msg = "file in group-writable directory";
69943+
69944+ if (msg) {
69945+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
69946+ return 0;
69947+ }
69948+#endif
69949+#endif
69950+ return 1;
69951+}
69952diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
69953new file mode 100644
69954index 0000000..9f7b1ac
69955--- /dev/null
69956+++ b/grsecurity/grsum.c
69957@@ -0,0 +1,61 @@
69958+#include <linux/err.h>
69959+#include <linux/kernel.h>
69960+#include <linux/sched.h>
69961+#include <linux/mm.h>
69962+#include <linux/scatterlist.h>
69963+#include <linux/crypto.h>
69964+#include <linux/gracl.h>
69965+
69966+
69967+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
69968+#error "crypto and sha256 must be built into the kernel"
69969+#endif
69970+
69971+int
69972+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
69973+{
69974+ char *p;
69975+ struct crypto_hash *tfm;
69976+ struct hash_desc desc;
69977+ struct scatterlist sg;
69978+ unsigned char temp_sum[GR_SHA_LEN];
69979+ volatile int retval = 0;
69980+ volatile int dummy = 0;
69981+ unsigned int i;
69982+
69983+ sg_init_table(&sg, 1);
69984+
69985+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
69986+ if (IS_ERR(tfm)) {
69987+ /* should never happen, since sha256 should be built in */
69988+ return 1;
69989+ }
69990+
69991+ desc.tfm = tfm;
69992+ desc.flags = 0;
69993+
69994+ crypto_hash_init(&desc);
69995+
69996+ p = salt;
69997+ sg_set_buf(&sg, p, GR_SALT_LEN);
69998+ crypto_hash_update(&desc, &sg, sg.length);
69999+
70000+ p = entry->pw;
70001+ sg_set_buf(&sg, p, strlen(p));
70002+
70003+ crypto_hash_update(&desc, &sg, sg.length);
70004+
70005+ crypto_hash_final(&desc, temp_sum);
70006+
70007+ memset(entry->pw, 0, GR_PW_LEN);
70008+
70009+ for (i = 0; i < GR_SHA_LEN; i++)
70010+ if (sum[i] != temp_sum[i])
70011+ retval = 1;
70012+ else
70013+ dummy = 1; // waste a cycle
70014+
70015+ crypto_free_hash(tfm);
70016+
70017+ return retval;
70018+}
70019diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
70020index 77ff547..181834f 100644
70021--- a/include/asm-generic/4level-fixup.h
70022+++ b/include/asm-generic/4level-fixup.h
70023@@ -13,8 +13,10 @@
70024 #define pmd_alloc(mm, pud, address) \
70025 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
70026 NULL: pmd_offset(pud, address))
70027+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
70028
70029 #define pud_alloc(mm, pgd, address) (pgd)
70030+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
70031 #define pud_offset(pgd, start) (pgd)
70032 #define pud_none(pud) 0
70033 #define pud_bad(pud) 0
70034diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
70035index b7babf0..04ad282 100644
70036--- a/include/asm-generic/atomic-long.h
70037+++ b/include/asm-generic/atomic-long.h
70038@@ -22,6 +22,12 @@
70039
70040 typedef atomic64_t atomic_long_t;
70041
70042+#ifdef CONFIG_PAX_REFCOUNT
70043+typedef atomic64_unchecked_t atomic_long_unchecked_t;
70044+#else
70045+typedef atomic64_t atomic_long_unchecked_t;
70046+#endif
70047+
70048 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
70049
70050 static inline long atomic_long_read(atomic_long_t *l)
70051@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
70052 return (long)atomic64_read(v);
70053 }
70054
70055+#ifdef CONFIG_PAX_REFCOUNT
70056+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
70057+{
70058+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70059+
70060+ return (long)atomic64_read_unchecked(v);
70061+}
70062+#endif
70063+
70064 static inline void atomic_long_set(atomic_long_t *l, long i)
70065 {
70066 atomic64_t *v = (atomic64_t *)l;
70067@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
70068 atomic64_set(v, i);
70069 }
70070
70071+#ifdef CONFIG_PAX_REFCOUNT
70072+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
70073+{
70074+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70075+
70076+ atomic64_set_unchecked(v, i);
70077+}
70078+#endif
70079+
70080 static inline void atomic_long_inc(atomic_long_t *l)
70081 {
70082 atomic64_t *v = (atomic64_t *)l;
70083@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
70084 atomic64_inc(v);
70085 }
70086
70087+#ifdef CONFIG_PAX_REFCOUNT
70088+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
70089+{
70090+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70091+
70092+ atomic64_inc_unchecked(v);
70093+}
70094+#endif
70095+
70096 static inline void atomic_long_dec(atomic_long_t *l)
70097 {
70098 atomic64_t *v = (atomic64_t *)l;
70099@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
70100 atomic64_dec(v);
70101 }
70102
70103+#ifdef CONFIG_PAX_REFCOUNT
70104+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
70105+{
70106+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70107+
70108+ atomic64_dec_unchecked(v);
70109+}
70110+#endif
70111+
70112 static inline void atomic_long_add(long i, atomic_long_t *l)
70113 {
70114 atomic64_t *v = (atomic64_t *)l;
70115@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
70116 atomic64_add(i, v);
70117 }
70118
70119+#ifdef CONFIG_PAX_REFCOUNT
70120+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
70121+{
70122+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70123+
70124+ atomic64_add_unchecked(i, v);
70125+}
70126+#endif
70127+
70128 static inline void atomic_long_sub(long i, atomic_long_t *l)
70129 {
70130 atomic64_t *v = (atomic64_t *)l;
70131@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
70132 atomic64_sub(i, v);
70133 }
70134
70135+#ifdef CONFIG_PAX_REFCOUNT
70136+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
70137+{
70138+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70139+
70140+ atomic64_sub_unchecked(i, v);
70141+}
70142+#endif
70143+
70144 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70145 {
70146 atomic64_t *v = (atomic64_t *)l;
70147@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
70148 return (long)atomic64_add_return(i, v);
70149 }
70150
70151+#ifdef CONFIG_PAX_REFCOUNT
70152+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
70153+{
70154+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70155+
70156+ return (long)atomic64_add_return_unchecked(i, v);
70157+}
70158+#endif
70159+
70160 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
70161 {
70162 atomic64_t *v = (atomic64_t *)l;
70163@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
70164 return (long)atomic64_inc_return(v);
70165 }
70166
70167+#ifdef CONFIG_PAX_REFCOUNT
70168+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
70169+{
70170+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70171+
70172+ return (long)atomic64_inc_return_unchecked(v);
70173+}
70174+#endif
70175+
70176 static inline long atomic_long_dec_return(atomic_long_t *l)
70177 {
70178 atomic64_t *v = (atomic64_t *)l;
70179@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
70180
70181 typedef atomic_t atomic_long_t;
70182
70183+#ifdef CONFIG_PAX_REFCOUNT
70184+typedef atomic_unchecked_t atomic_long_unchecked_t;
70185+#else
70186+typedef atomic_t atomic_long_unchecked_t;
70187+#endif
70188+
70189 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
70190 static inline long atomic_long_read(atomic_long_t *l)
70191 {
70192@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
70193 return (long)atomic_read(v);
70194 }
70195
70196+#ifdef CONFIG_PAX_REFCOUNT
70197+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
70198+{
70199+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70200+
70201+ return (long)atomic_read_unchecked(v);
70202+}
70203+#endif
70204+
70205 static inline void atomic_long_set(atomic_long_t *l, long i)
70206 {
70207 atomic_t *v = (atomic_t *)l;
70208@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
70209 atomic_set(v, i);
70210 }
70211
70212+#ifdef CONFIG_PAX_REFCOUNT
70213+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
70214+{
70215+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70216+
70217+ atomic_set_unchecked(v, i);
70218+}
70219+#endif
70220+
70221 static inline void atomic_long_inc(atomic_long_t *l)
70222 {
70223 atomic_t *v = (atomic_t *)l;
70224@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
70225 atomic_inc(v);
70226 }
70227
70228+#ifdef CONFIG_PAX_REFCOUNT
70229+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
70230+{
70231+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70232+
70233+ atomic_inc_unchecked(v);
70234+}
70235+#endif
70236+
70237 static inline void atomic_long_dec(atomic_long_t *l)
70238 {
70239 atomic_t *v = (atomic_t *)l;
70240@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
70241 atomic_dec(v);
70242 }
70243
70244+#ifdef CONFIG_PAX_REFCOUNT
70245+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
70246+{
70247+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70248+
70249+ atomic_dec_unchecked(v);
70250+}
70251+#endif
70252+
70253 static inline void atomic_long_add(long i, atomic_long_t *l)
70254 {
70255 atomic_t *v = (atomic_t *)l;
70256@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
70257 atomic_add(i, v);
70258 }
70259
70260+#ifdef CONFIG_PAX_REFCOUNT
70261+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
70262+{
70263+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70264+
70265+ atomic_add_unchecked(i, v);
70266+}
70267+#endif
70268+
70269 static inline void atomic_long_sub(long i, atomic_long_t *l)
70270 {
70271 atomic_t *v = (atomic_t *)l;
70272@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
70273 atomic_sub(i, v);
70274 }
70275
70276+#ifdef CONFIG_PAX_REFCOUNT
70277+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
70278+{
70279+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70280+
70281+ atomic_sub_unchecked(i, v);
70282+}
70283+#endif
70284+
70285 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70286 {
70287 atomic_t *v = (atomic_t *)l;
70288@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
70289 return (long)atomic_add_return(i, v);
70290 }
70291
70292+#ifdef CONFIG_PAX_REFCOUNT
70293+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
70294+{
70295+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70296+
70297+ return (long)atomic_add_return_unchecked(i, v);
70298+}
70299+
70300+#endif
70301+
70302 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
70303 {
70304 atomic_t *v = (atomic_t *)l;
70305@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
70306 return (long)atomic_inc_return(v);
70307 }
70308
70309+#ifdef CONFIG_PAX_REFCOUNT
70310+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
70311+{
70312+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70313+
70314+ return (long)atomic_inc_return_unchecked(v);
70315+}
70316+#endif
70317+
70318 static inline long atomic_long_dec_return(atomic_long_t *l)
70319 {
70320 atomic_t *v = (atomic_t *)l;
70321@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
70322
70323 #endif /* BITS_PER_LONG == 64 */
70324
70325+#ifdef CONFIG_PAX_REFCOUNT
70326+static inline void pax_refcount_needs_these_functions(void)
70327+{
70328+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
70329+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
70330+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
70331+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
70332+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
70333+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
70334+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
70335+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
70336+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
70337+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
70338+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
70339+#ifdef CONFIG_X86
70340+ atomic_clear_mask_unchecked(0, NULL);
70341+ atomic_set_mask_unchecked(0, NULL);
70342+#endif
70343+
70344+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
70345+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
70346+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
70347+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
70348+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
70349+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
70350+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
70351+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
70352+}
70353+#else
70354+#define atomic_read_unchecked(v) atomic_read(v)
70355+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
70356+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
70357+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
70358+#define atomic_inc_unchecked(v) atomic_inc(v)
70359+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
70360+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
70361+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
70362+#define atomic_dec_unchecked(v) atomic_dec(v)
70363+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
70364+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
70365+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
70366+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
70367+
70368+#define atomic_long_read_unchecked(v) atomic_long_read(v)
70369+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
70370+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
70371+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
70372+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
70373+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
70374+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
70375+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
70376+#endif
70377+
70378 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
70379diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
70380index 33bd2de..f31bff97 100644
70381--- a/include/asm-generic/atomic.h
70382+++ b/include/asm-generic/atomic.h
70383@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
70384 * Atomically clears the bits set in @mask from @v
70385 */
70386 #ifndef atomic_clear_mask
70387-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
70388+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
70389 {
70390 unsigned long flags;
70391
70392diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
70393index b18ce4f..2ee2843 100644
70394--- a/include/asm-generic/atomic64.h
70395+++ b/include/asm-generic/atomic64.h
70396@@ -16,6 +16,8 @@ typedef struct {
70397 long long counter;
70398 } atomic64_t;
70399
70400+typedef atomic64_t atomic64_unchecked_t;
70401+
70402 #define ATOMIC64_INIT(i) { (i) }
70403
70404 extern long long atomic64_read(const atomic64_t *v);
70405@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
70406 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
70407 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
70408
70409+#define atomic64_read_unchecked(v) atomic64_read(v)
70410+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
70411+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
70412+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
70413+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
70414+#define atomic64_inc_unchecked(v) atomic64_inc(v)
70415+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
70416+#define atomic64_dec_unchecked(v) atomic64_dec(v)
70417+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
70418+
70419 #endif /* _ASM_GENERIC_ATOMIC64_H */
70420diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
70421index 1bfcfe5..e04c5c9 100644
70422--- a/include/asm-generic/cache.h
70423+++ b/include/asm-generic/cache.h
70424@@ -6,7 +6,7 @@
70425 * cache lines need to provide their own cache.h.
70426 */
70427
70428-#define L1_CACHE_SHIFT 5
70429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
70430+#define L1_CACHE_SHIFT 5UL
70431+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
70432
70433 #endif /* __ASM_GENERIC_CACHE_H */
70434diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
70435index 0d68a1e..b74a761 100644
70436--- a/include/asm-generic/emergency-restart.h
70437+++ b/include/asm-generic/emergency-restart.h
70438@@ -1,7 +1,7 @@
70439 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
70440 #define _ASM_GENERIC_EMERGENCY_RESTART_H
70441
70442-static inline void machine_emergency_restart(void)
70443+static inline __noreturn void machine_emergency_restart(void)
70444 {
70445 machine_restart(NULL);
70446 }
70447diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
70448index 90f99c7..00ce236 100644
70449--- a/include/asm-generic/kmap_types.h
70450+++ b/include/asm-generic/kmap_types.h
70451@@ -2,9 +2,9 @@
70452 #define _ASM_GENERIC_KMAP_TYPES_H
70453
70454 #ifdef __WITH_KM_FENCE
70455-# define KM_TYPE_NR 41
70456+# define KM_TYPE_NR 42
70457 #else
70458-# define KM_TYPE_NR 20
70459+# define KM_TYPE_NR 21
70460 #endif
70461
70462 #endif
70463diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
70464index 9ceb03b..62b0b8f 100644
70465--- a/include/asm-generic/local.h
70466+++ b/include/asm-generic/local.h
70467@@ -23,24 +23,37 @@ typedef struct
70468 atomic_long_t a;
70469 } local_t;
70470
70471+typedef struct {
70472+ atomic_long_unchecked_t a;
70473+} local_unchecked_t;
70474+
70475 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
70476
70477 #define local_read(l) atomic_long_read(&(l)->a)
70478+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
70479 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
70480+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
70481 #define local_inc(l) atomic_long_inc(&(l)->a)
70482+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
70483 #define local_dec(l) atomic_long_dec(&(l)->a)
70484+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
70485 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
70486+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
70487 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
70488+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
70489
70490 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
70491 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
70492 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
70493 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
70494 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
70495+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
70496 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
70497 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
70498+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
70499
70500 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
70501+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
70502 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
70503 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
70504 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
70505diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
70506index 725612b..9cc513a 100644
70507--- a/include/asm-generic/pgtable-nopmd.h
70508+++ b/include/asm-generic/pgtable-nopmd.h
70509@@ -1,14 +1,19 @@
70510 #ifndef _PGTABLE_NOPMD_H
70511 #define _PGTABLE_NOPMD_H
70512
70513-#ifndef __ASSEMBLY__
70514-
70515 #include <asm-generic/pgtable-nopud.h>
70516
70517-struct mm_struct;
70518-
70519 #define __PAGETABLE_PMD_FOLDED
70520
70521+#define PMD_SHIFT PUD_SHIFT
70522+#define PTRS_PER_PMD 1
70523+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
70524+#define PMD_MASK (~(PMD_SIZE-1))
70525+
70526+#ifndef __ASSEMBLY__
70527+
70528+struct mm_struct;
70529+
70530 /*
70531 * Having the pmd type consist of a pud gets the size right, and allows
70532 * us to conceptually access the pud entry that this pmd is folded into
70533@@ -16,11 +21,6 @@ struct mm_struct;
70534 */
70535 typedef struct { pud_t pud; } pmd_t;
70536
70537-#define PMD_SHIFT PUD_SHIFT
70538-#define PTRS_PER_PMD 1
70539-#define PMD_SIZE (1UL << PMD_SHIFT)
70540-#define PMD_MASK (~(PMD_SIZE-1))
70541-
70542 /*
70543 * The "pud_xxx()" functions here are trivial for a folded two-level
70544 * setup: the pmd is never bad, and a pmd always exists (as it's folded
70545diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
70546index 810431d..0ec4804f 100644
70547--- a/include/asm-generic/pgtable-nopud.h
70548+++ b/include/asm-generic/pgtable-nopud.h
70549@@ -1,10 +1,15 @@
70550 #ifndef _PGTABLE_NOPUD_H
70551 #define _PGTABLE_NOPUD_H
70552
70553-#ifndef __ASSEMBLY__
70554-
70555 #define __PAGETABLE_PUD_FOLDED
70556
70557+#define PUD_SHIFT PGDIR_SHIFT
70558+#define PTRS_PER_PUD 1
70559+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
70560+#define PUD_MASK (~(PUD_SIZE-1))
70561+
70562+#ifndef __ASSEMBLY__
70563+
70564 /*
70565 * Having the pud type consist of a pgd gets the size right, and allows
70566 * us to conceptually access the pgd entry that this pud is folded into
70567@@ -12,11 +17,6 @@
70568 */
70569 typedef struct { pgd_t pgd; } pud_t;
70570
70571-#define PUD_SHIFT PGDIR_SHIFT
70572-#define PTRS_PER_PUD 1
70573-#define PUD_SIZE (1UL << PUD_SHIFT)
70574-#define PUD_MASK (~(PUD_SIZE-1))
70575-
70576 /*
70577 * The "pgd_xxx()" functions here are trivial for a folded two-level
70578 * setup: the pud is never bad, and a pud always exists (as it's folded
70579@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
70580 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
70581
70582 #define pgd_populate(mm, pgd, pud) do { } while (0)
70583+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
70584 /*
70585 * (puds are folded into pgds so this doesn't get actually called,
70586 * but the define is needed for a generic inline function.)
70587diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
70588index a59ff51..2594a70 100644
70589--- a/include/asm-generic/pgtable.h
70590+++ b/include/asm-generic/pgtable.h
70591@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
70592 }
70593 #endif /* CONFIG_NUMA_BALANCING */
70594
70595+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
70596+static inline unsigned long pax_open_kernel(void) { return 0; }
70597+#endif
70598+
70599+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
70600+static inline unsigned long pax_close_kernel(void) { return 0; }
70601+#endif
70602+
70603 #endif /* CONFIG_MMU */
70604
70605 #endif /* !__ASSEMBLY__ */
70606diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
70607index 13821c3..5672d7e 100644
70608--- a/include/asm-generic/tlb.h
70609+++ b/include/asm-generic/tlb.h
70610@@ -112,7 +112,7 @@ struct mmu_gather {
70611
70612 #define HAVE_GENERIC_MMU_GATHER
70613
70614-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
70615+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
70616 void tlb_flush_mmu(struct mmu_gather *tlb);
70617 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
70618 unsigned long end);
70619diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
70620index c184aa8..d049942 100644
70621--- a/include/asm-generic/uaccess.h
70622+++ b/include/asm-generic/uaccess.h
70623@@ -343,4 +343,12 @@ clear_user(void __user *to, unsigned long n)
70624 return __clear_user(to, n);
70625 }
70626
70627+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
70628+//static inline unsigned long pax_open_userland(void) { return 0; }
70629+#endif
70630+
70631+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
70632+//static inline unsigned long pax_close_userland(void) { return 0; }
70633+#endif
70634+
70635 #endif /* __ASM_GENERIC_UACCESS_H */
70636diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
70637index eb58d2d..df131bf 100644
70638--- a/include/asm-generic/vmlinux.lds.h
70639+++ b/include/asm-generic/vmlinux.lds.h
70640@@ -239,6 +239,7 @@
70641 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
70642 VMLINUX_SYMBOL(__start_rodata) = .; \
70643 *(.rodata) *(.rodata.*) \
70644+ *(.data..read_only) \
70645 *(__vermagic) /* Kernel version magic */ \
70646 . = ALIGN(8); \
70647 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
70648@@ -749,17 +750,18 @@
70649 * section in the linker script will go there too. @phdr should have
70650 * a leading colon.
70651 *
70652- * Note that this macros defines __per_cpu_load as an absolute symbol.
70653+ * Note that this macros defines per_cpu_load as an absolute symbol.
70654 * If there is no need to put the percpu section at a predetermined
70655 * address, use PERCPU_SECTION.
70656 */
70657 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
70658- VMLINUX_SYMBOL(__per_cpu_load) = .; \
70659- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
70660+ per_cpu_load = .; \
70661+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
70662 - LOAD_OFFSET) { \
70663+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
70664 PERCPU_INPUT(cacheline) \
70665 } phdr \
70666- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
70667+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
70668
70669 /**
70670 * PERCPU_SECTION - define output section for percpu area, simple version
70671diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
70672index 418d270..bfd2794 100644
70673--- a/include/crypto/algapi.h
70674+++ b/include/crypto/algapi.h
70675@@ -34,7 +34,7 @@ struct crypto_type {
70676 unsigned int maskclear;
70677 unsigned int maskset;
70678 unsigned int tfmsize;
70679-};
70680+} __do_const;
70681
70682 struct crypto_instance {
70683 struct crypto_alg alg;
70684diff --git a/include/drm/drmP.h b/include/drm/drmP.h
70685index 63d17ee..716de2b 100644
70686--- a/include/drm/drmP.h
70687+++ b/include/drm/drmP.h
70688@@ -72,6 +72,7 @@
70689 #include <linux/workqueue.h>
70690 #include <linux/poll.h>
70691 #include <asm/pgalloc.h>
70692+#include <asm/local.h>
70693 #include <drm/drm.h>
70694 #include <drm/drm_sarea.h>
70695
70696@@ -296,10 +297,12 @@ do { \
70697 * \param cmd command.
70698 * \param arg argument.
70699 */
70700-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
70701+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
70702+ struct drm_file *file_priv);
70703+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
70704 struct drm_file *file_priv);
70705
70706-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
70707+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
70708 unsigned long arg);
70709
70710 #define DRM_IOCTL_NR(n) _IOC_NR(n)
70711@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
70712 struct drm_ioctl_desc {
70713 unsigned int cmd;
70714 int flags;
70715- drm_ioctl_t *func;
70716+ drm_ioctl_t func;
70717 unsigned int cmd_drv;
70718 const char *name;
70719-};
70720+} __do_const;
70721
70722 /**
70723 * Creates a driver or general drm_ioctl_desc array entry for the given
70724@@ -1015,7 +1018,7 @@ struct drm_info_list {
70725 int (*show)(struct seq_file*, void*); /** show callback */
70726 u32 driver_features; /**< Required driver features for this entry */
70727 void *data;
70728-};
70729+} __do_const;
70730
70731 /**
70732 * debugfs node structure. This structure represents a debugfs file.
70733@@ -1088,7 +1091,7 @@ struct drm_device {
70734
70735 /** \name Usage Counters */
70736 /*@{ */
70737- int open_count; /**< Outstanding files open */
70738+ local_t open_count; /**< Outstanding files open */
70739 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
70740 atomic_t vma_count; /**< Outstanding vma areas open */
70741 int buf_use; /**< Buffers in use -- cannot alloc */
70742@@ -1099,7 +1102,7 @@ struct drm_device {
70743 /*@{ */
70744 unsigned long counters;
70745 enum drm_stat_type types[15];
70746- atomic_t counts[15];
70747+ atomic_unchecked_t counts[15];
70748 /*@} */
70749
70750 struct list_head filelist;
70751diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
70752index f43d556..94d9343 100644
70753--- a/include/drm/drm_crtc_helper.h
70754+++ b/include/drm/drm_crtc_helper.h
70755@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
70756 struct drm_connector *connector);
70757 /* disable encoder when not in use - more explicit than dpms off */
70758 void (*disable)(struct drm_encoder *encoder);
70759-};
70760+} __no_const;
70761
70762 /**
70763 * drm_connector_helper_funcs - helper operations for connectors
70764diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
70765index 72dcbe8..8db58d7 100644
70766--- a/include/drm/ttm/ttm_memory.h
70767+++ b/include/drm/ttm/ttm_memory.h
70768@@ -48,7 +48,7 @@
70769
70770 struct ttm_mem_shrink {
70771 int (*do_shrink) (struct ttm_mem_shrink *);
70772-};
70773+} __no_const;
70774
70775 /**
70776 * struct ttm_mem_global - Global memory accounting structure.
70777diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
70778index 4b840e8..155d235 100644
70779--- a/include/keys/asymmetric-subtype.h
70780+++ b/include/keys/asymmetric-subtype.h
70781@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
70782 /* Verify the signature on a key of this subtype (optional) */
70783 int (*verify_signature)(const struct key *key,
70784 const struct public_key_signature *sig);
70785-};
70786+} __do_const;
70787
70788 /**
70789 * asymmetric_key_subtype - Get the subtype from an asymmetric key
70790diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
70791index c1da539..1dcec55 100644
70792--- a/include/linux/atmdev.h
70793+++ b/include/linux/atmdev.h
70794@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
70795 #endif
70796
70797 struct k_atm_aal_stats {
70798-#define __HANDLE_ITEM(i) atomic_t i
70799+#define __HANDLE_ITEM(i) atomic_unchecked_t i
70800 __AAL_STAT_ITEMS
70801 #undef __HANDLE_ITEM
70802 };
70803@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
70804 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
70805 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
70806 struct module *owner;
70807-};
70808+} __do_const ;
70809
70810 struct atmphy_ops {
70811 int (*start)(struct atm_dev *dev);
70812diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
70813index 70cf138..0418ee2 100644
70814--- a/include/linux/binfmts.h
70815+++ b/include/linux/binfmts.h
70816@@ -73,8 +73,10 @@ struct linux_binfmt {
70817 int (*load_binary)(struct linux_binprm *);
70818 int (*load_shlib)(struct file *);
70819 int (*core_dump)(struct coredump_params *cprm);
70820+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
70821+ void (*handle_mmap)(struct file *);
70822 unsigned long min_coredump; /* minimal dump size */
70823-};
70824+} __do_const;
70825
70826 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
70827
70828diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
70829index 2fdb4a4..54aad7e 100644
70830--- a/include/linux/blkdev.h
70831+++ b/include/linux/blkdev.h
70832@@ -1526,7 +1526,7 @@ struct block_device_operations {
70833 /* this callback is with swap_lock and sometimes page table lock held */
70834 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
70835 struct module *owner;
70836-};
70837+} __do_const;
70838
70839 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
70840 unsigned long);
70841diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
70842index 7c2e030..b72475d 100644
70843--- a/include/linux/blktrace_api.h
70844+++ b/include/linux/blktrace_api.h
70845@@ -23,7 +23,7 @@ struct blk_trace {
70846 struct dentry *dir;
70847 struct dentry *dropped_file;
70848 struct dentry *msg_file;
70849- atomic_t dropped;
70850+ atomic_unchecked_t dropped;
70851 };
70852
70853 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
70854diff --git a/include/linux/cache.h b/include/linux/cache.h
70855index 4c57065..4307975 100644
70856--- a/include/linux/cache.h
70857+++ b/include/linux/cache.h
70858@@ -16,6 +16,10 @@
70859 #define __read_mostly
70860 #endif
70861
70862+#ifndef __read_only
70863+#define __read_only __read_mostly
70864+#endif
70865+
70866 #ifndef ____cacheline_aligned
70867 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
70868 #endif
70869diff --git a/include/linux/capability.h b/include/linux/capability.h
70870index d9a4f7f4..19f77d6 100644
70871--- a/include/linux/capability.h
70872+++ b/include/linux/capability.h
70873@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
70874 extern bool nsown_capable(int cap);
70875 extern bool inode_capable(const struct inode *inode, int cap);
70876 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
70877+extern bool capable_nolog(int cap);
70878+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
70879+extern bool inode_capable_nolog(const struct inode *inode, int cap);
70880
70881 /* audit system wants to get cap info from files as well */
70882 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
70883
70884+extern int is_privileged_binary(const struct dentry *dentry);
70885+
70886 #endif /* !_LINUX_CAPABILITY_H */
70887diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
70888index 8609d57..86e4d79 100644
70889--- a/include/linux/cdrom.h
70890+++ b/include/linux/cdrom.h
70891@@ -87,7 +87,6 @@ struct cdrom_device_ops {
70892
70893 /* driver specifications */
70894 const int capability; /* capability flags */
70895- int n_minors; /* number of active minor devices */
70896 /* handle uniform packets for scsi type devices (scsi,atapi) */
70897 int (*generic_packet) (struct cdrom_device_info *,
70898 struct packet_command *);
70899diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
70900index 4ce9056..86caac6 100644
70901--- a/include/linux/cleancache.h
70902+++ b/include/linux/cleancache.h
70903@@ -31,7 +31,7 @@ struct cleancache_ops {
70904 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
70905 void (*invalidate_inode)(int, struct cleancache_filekey);
70906 void (*invalidate_fs)(int);
70907-};
70908+} __no_const;
70909
70910 extern struct cleancache_ops *
70911 cleancache_register_ops(struct cleancache_ops *ops);
70912diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
70913index 1186098..f87e53d 100644
70914--- a/include/linux/clk-provider.h
70915+++ b/include/linux/clk-provider.h
70916@@ -132,6 +132,7 @@ struct clk_ops {
70917 unsigned long);
70918 void (*init)(struct clk_hw *hw);
70919 };
70920+typedef struct clk_ops __no_const clk_ops_no_const;
70921
70922 /**
70923 * struct clk_init_data - holds init data that's common to all clocks and is
70924diff --git a/include/linux/compat.h b/include/linux/compat.h
70925index 7f0c1dd..206ac34 100644
70926--- a/include/linux/compat.h
70927+++ b/include/linux/compat.h
70928@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
70929 compat_size_t __user *len_ptr);
70930
70931 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
70932-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
70933+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
70934 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
70935 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
70936 compat_ssize_t msgsz, int msgflg);
70937@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
70938 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
70939 compat_ulong_t addr, compat_ulong_t data);
70940 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
70941- compat_long_t addr, compat_long_t data);
70942+ compat_ulong_t addr, compat_ulong_t data);
70943
70944 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
70945 /*
70946@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
70947
70948 int compat_restore_altstack(const compat_stack_t __user *uss);
70949 int __compat_save_altstack(compat_stack_t __user *, unsigned long);
70950+void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
70951
70952 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
70953 struct compat_timespec __user *interval);
70954diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
70955index 842de22..7f3a41f 100644
70956--- a/include/linux/compiler-gcc4.h
70957+++ b/include/linux/compiler-gcc4.h
70958@@ -39,9 +39,29 @@
70959 # define __compiletime_warning(message) __attribute__((warning(message)))
70960 # define __compiletime_error(message) __attribute__((error(message)))
70961 #endif /* __CHECKER__ */
70962+
70963+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
70964+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
70965+#define __bos0(ptr) __bos((ptr), 0)
70966+#define __bos1(ptr) __bos((ptr), 1)
70967 #endif /* GCC_VERSION >= 40300 */
70968
70969 #if GCC_VERSION >= 40500
70970+
70971+#ifdef CONSTIFY_PLUGIN
70972+#define __no_const __attribute__((no_const))
70973+#define __do_const __attribute__((do_const))
70974+#endif
70975+
70976+#ifdef SIZE_OVERFLOW_PLUGIN
70977+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
70978+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
70979+#endif
70980+
70981+#ifdef LATENT_ENTROPY_PLUGIN
70982+#define __latent_entropy __attribute__((latent_entropy))
70983+#endif
70984+
70985 /*
70986 * Mark a position in code as unreachable. This can be used to
70987 * suppress control flow warnings after asm blocks that transfer
70988diff --git a/include/linux/compiler.h b/include/linux/compiler.h
70989index 92669cd..1771a15 100644
70990--- a/include/linux/compiler.h
70991+++ b/include/linux/compiler.h
70992@@ -5,11 +5,14 @@
70993
70994 #ifdef __CHECKER__
70995 # define __user __attribute__((noderef, address_space(1)))
70996+# define __force_user __force __user
70997 # define __kernel __attribute__((address_space(0)))
70998+# define __force_kernel __force __kernel
70999 # define __safe __attribute__((safe))
71000 # define __force __attribute__((force))
71001 # define __nocast __attribute__((nocast))
71002 # define __iomem __attribute__((noderef, address_space(2)))
71003+# define __force_iomem __force __iomem
71004 # define __must_hold(x) __attribute__((context(x,1,1)))
71005 # define __acquires(x) __attribute__((context(x,0,1)))
71006 # define __releases(x) __attribute__((context(x,1,0)))
71007@@ -17,20 +20,37 @@
71008 # define __release(x) __context__(x,-1)
71009 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
71010 # define __percpu __attribute__((noderef, address_space(3)))
71011+# define __force_percpu __force __percpu
71012 #ifdef CONFIG_SPARSE_RCU_POINTER
71013 # define __rcu __attribute__((noderef, address_space(4)))
71014+# define __force_rcu __force __rcu
71015 #else
71016 # define __rcu
71017+# define __force_rcu
71018 #endif
71019 extern void __chk_user_ptr(const volatile void __user *);
71020 extern void __chk_io_ptr(const volatile void __iomem *);
71021 #else
71022-# define __user
71023-# define __kernel
71024+# ifdef CHECKER_PLUGIN
71025+//# define __user
71026+//# define __force_user
71027+//# define __kernel
71028+//# define __force_kernel
71029+# else
71030+# ifdef STRUCTLEAK_PLUGIN
71031+# define __user __attribute__((user))
71032+# else
71033+# define __user
71034+# endif
71035+# define __force_user
71036+# define __kernel
71037+# define __force_kernel
71038+# endif
71039 # define __safe
71040 # define __force
71041 # define __nocast
71042 # define __iomem
71043+# define __force_iomem
71044 # define __chk_user_ptr(x) (void)0
71045 # define __chk_io_ptr(x) (void)0
71046 # define __builtin_warning(x, y...) (1)
71047@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
71048 # define __release(x) (void)0
71049 # define __cond_lock(x,c) (c)
71050 # define __percpu
71051+# define __force_percpu
71052 # define __rcu
71053+# define __force_rcu
71054 #endif
71055
71056 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
71057@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
71058 # define __attribute_const__ /* unimplemented */
71059 #endif
71060
71061+#ifndef __no_const
71062+# define __no_const
71063+#endif
71064+
71065+#ifndef __do_const
71066+# define __do_const
71067+#endif
71068+
71069+#ifndef __size_overflow
71070+# define __size_overflow(...)
71071+#endif
71072+
71073+#ifndef __intentional_overflow
71074+# define __intentional_overflow(...)
71075+#endif
71076+
71077+#ifndef __latent_entropy
71078+# define __latent_entropy
71079+#endif
71080+
71081 /*
71082 * Tell gcc if a function is cold. The compiler will assume any path
71083 * directly leading to the call is unlikely.
71084@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
71085 #define __cold
71086 #endif
71087
71088+#ifndef __alloc_size
71089+#define __alloc_size(...)
71090+#endif
71091+
71092+#ifndef __bos
71093+#define __bos(ptr, arg)
71094+#endif
71095+
71096+#ifndef __bos0
71097+#define __bos0(ptr)
71098+#endif
71099+
71100+#ifndef __bos1
71101+#define __bos1(ptr)
71102+#endif
71103+
71104 /* Simple shorthand for a section definition */
71105 #ifndef __section
71106 # define __section(S) __attribute__ ((__section__(#S)))
71107@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
71108 * use is to mediate communication between process-level code and irq/NMI
71109 * handlers, all running on the same CPU.
71110 */
71111-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
71112+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
71113+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
71114
71115 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
71116 #ifdef CONFIG_KPROBES
71117diff --git a/include/linux/completion.h b/include/linux/completion.h
71118index 33f0280..35c6568 100644
71119--- a/include/linux/completion.h
71120+++ b/include/linux/completion.h
71121@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
71122 extern void wait_for_completion(struct completion *);
71123 extern void wait_for_completion_io(struct completion *);
71124 extern int wait_for_completion_interruptible(struct completion *x);
71125-extern int wait_for_completion_killable(struct completion *x);
71126+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
71127 extern unsigned long wait_for_completion_timeout(struct completion *x,
71128 unsigned long timeout);
71129 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
71130 unsigned long timeout);
71131 extern long wait_for_completion_interruptible_timeout(
71132- struct completion *x, unsigned long timeout);
71133+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
71134 extern long wait_for_completion_killable_timeout(
71135- struct completion *x, unsigned long timeout);
71136+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
71137 extern bool try_wait_for_completion(struct completion *x);
71138 extern bool completion_done(struct completion *x);
71139
71140diff --git a/include/linux/configfs.h b/include/linux/configfs.h
71141index 34025df..d94bbbc 100644
71142--- a/include/linux/configfs.h
71143+++ b/include/linux/configfs.h
71144@@ -125,7 +125,7 @@ struct configfs_attribute {
71145 const char *ca_name;
71146 struct module *ca_owner;
71147 umode_t ca_mode;
71148-};
71149+} __do_const;
71150
71151 /*
71152 * Users often need to create attribute structures for their configurable
71153diff --git a/include/linux/cpu.h b/include/linux/cpu.h
71154index 9f3c7e8..a18c7b6 100644
71155--- a/include/linux/cpu.h
71156+++ b/include/linux/cpu.h
71157@@ -115,7 +115,7 @@ enum {
71158 /* Need to know about CPUs going up/down? */
71159 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
71160 #define cpu_notifier(fn, pri) { \
71161- static struct notifier_block fn##_nb __cpuinitdata = \
71162+ static struct notifier_block fn##_nb = \
71163 { .notifier_call = fn, .priority = pri }; \
71164 register_cpu_notifier(&fn##_nb); \
71165 }
71166diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
71167index 037d36a..ca5fe6e 100644
71168--- a/include/linux/cpufreq.h
71169+++ b/include/linux/cpufreq.h
71170@@ -262,7 +262,7 @@ struct cpufreq_driver {
71171 int (*suspend) (struct cpufreq_policy *policy);
71172 int (*resume) (struct cpufreq_policy *policy);
71173 struct freq_attr **attr;
71174-};
71175+} __do_const;
71176
71177 /* flags */
71178
71179@@ -321,6 +321,7 @@ struct global_attr {
71180 ssize_t (*store)(struct kobject *a, struct attribute *b,
71181 const char *c, size_t count);
71182 };
71183+typedef struct global_attr __no_const global_attr_no_const;
71184
71185 #define define_one_global_ro(_name) \
71186 static struct global_attr _name = \
71187diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
71188index 8f04062..900239a 100644
71189--- a/include/linux/cpuidle.h
71190+++ b/include/linux/cpuidle.h
71191@@ -52,7 +52,8 @@ struct cpuidle_state {
71192 int index);
71193
71194 int (*enter_dead) (struct cpuidle_device *dev, int index);
71195-};
71196+} __do_const;
71197+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
71198
71199 /* Idle State Flags */
71200 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
71201@@ -191,7 +192,7 @@ struct cpuidle_governor {
71202 void (*reflect) (struct cpuidle_device *dev, int index);
71203
71204 struct module *owner;
71205-};
71206+} __do_const;
71207
71208 #ifdef CONFIG_CPU_IDLE
71209
71210diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
71211index d08e4d2..95fad61 100644
71212--- a/include/linux/cpumask.h
71213+++ b/include/linux/cpumask.h
71214@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
71215 }
71216
71217 /* Valid inputs for n are -1 and 0. */
71218-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
71219+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
71220 {
71221 return n+1;
71222 }
71223
71224-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
71225+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
71226 {
71227 return n+1;
71228 }
71229
71230-static inline unsigned int cpumask_next_and(int n,
71231+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
71232 const struct cpumask *srcp,
71233 const struct cpumask *andp)
71234 {
71235@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
71236 *
71237 * Returns >= nr_cpu_ids if no further cpus set.
71238 */
71239-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
71240+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
71241 {
71242 /* -1 is a legal arg here. */
71243 if (n != -1)
71244@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
71245 *
71246 * Returns >= nr_cpu_ids if no further cpus unset.
71247 */
71248-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
71249+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
71250 {
71251 /* -1 is a legal arg here. */
71252 if (n != -1)
71253@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
71254 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
71255 }
71256
71257-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
71258+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
71259 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
71260
71261 /**
71262diff --git a/include/linux/cred.h b/include/linux/cred.h
71263index 04421e8..6bce4ef 100644
71264--- a/include/linux/cred.h
71265+++ b/include/linux/cred.h
71266@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
71267 static inline void validate_process_creds(void)
71268 {
71269 }
71270+static inline void validate_task_creds(struct task_struct *task)
71271+{
71272+}
71273 #endif
71274
71275 /**
71276diff --git a/include/linux/crypto.h b/include/linux/crypto.h
71277index b92eadf..b4ecdc1 100644
71278--- a/include/linux/crypto.h
71279+++ b/include/linux/crypto.h
71280@@ -373,7 +373,7 @@ struct cipher_tfm {
71281 const u8 *key, unsigned int keylen);
71282 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
71283 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
71284-};
71285+} __no_const;
71286
71287 struct hash_tfm {
71288 int (*init)(struct hash_desc *desc);
71289@@ -394,13 +394,13 @@ struct compress_tfm {
71290 int (*cot_decompress)(struct crypto_tfm *tfm,
71291 const u8 *src, unsigned int slen,
71292 u8 *dst, unsigned int *dlen);
71293-};
71294+} __no_const;
71295
71296 struct rng_tfm {
71297 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
71298 unsigned int dlen);
71299 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
71300-};
71301+} __no_const;
71302
71303 #define crt_ablkcipher crt_u.ablkcipher
71304 #define crt_aead crt_u.aead
71305diff --git a/include/linux/ctype.h b/include/linux/ctype.h
71306index 653589e..4ef254a 100644
71307--- a/include/linux/ctype.h
71308+++ b/include/linux/ctype.h
71309@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
71310 * Fast implementation of tolower() for internal usage. Do not use in your
71311 * code.
71312 */
71313-static inline char _tolower(const char c)
71314+static inline unsigned char _tolower(const unsigned char c)
71315 {
71316 return c | 0x20;
71317 }
71318diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
71319index 7925bf0..d5143d2 100644
71320--- a/include/linux/decompress/mm.h
71321+++ b/include/linux/decompress/mm.h
71322@@ -77,7 +77,7 @@ static void free(void *where)
71323 * warnings when not needed (indeed large_malloc / large_free are not
71324 * needed by inflate */
71325
71326-#define malloc(a) kmalloc(a, GFP_KERNEL)
71327+#define malloc(a) kmalloc((a), GFP_KERNEL)
71328 #define free(a) kfree(a)
71329
71330 #define large_malloc(a) vmalloc(a)
71331diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
71332index fe8c447..bdc1f33 100644
71333--- a/include/linux/devfreq.h
71334+++ b/include/linux/devfreq.h
71335@@ -114,7 +114,7 @@ struct devfreq_governor {
71336 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
71337 int (*event_handler)(struct devfreq *devfreq,
71338 unsigned int event, void *data);
71339-};
71340+} __do_const;
71341
71342 /**
71343 * struct devfreq - Device devfreq structure
71344diff --git a/include/linux/device.h b/include/linux/device.h
71345index c0a1261..dba7569 100644
71346--- a/include/linux/device.h
71347+++ b/include/linux/device.h
71348@@ -290,7 +290,7 @@ struct subsys_interface {
71349 struct list_head node;
71350 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
71351 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
71352-};
71353+} __do_const;
71354
71355 int subsys_interface_register(struct subsys_interface *sif);
71356 void subsys_interface_unregister(struct subsys_interface *sif);
71357@@ -473,7 +473,7 @@ struct device_type {
71358 void (*release)(struct device *dev);
71359
71360 const struct dev_pm_ops *pm;
71361-};
71362+} __do_const;
71363
71364 /* interface for exporting device attributes */
71365 struct device_attribute {
71366@@ -483,11 +483,12 @@ struct device_attribute {
71367 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
71368 const char *buf, size_t count);
71369 };
71370+typedef struct device_attribute __no_const device_attribute_no_const;
71371
71372 struct dev_ext_attribute {
71373 struct device_attribute attr;
71374 void *var;
71375-};
71376+} __do_const;
71377
71378 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
71379 char *buf);
71380diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
71381index 94af418..b1ca7a2 100644
71382--- a/include/linux/dma-mapping.h
71383+++ b/include/linux/dma-mapping.h
71384@@ -54,7 +54,7 @@ struct dma_map_ops {
71385 u64 (*get_required_mask)(struct device *dev);
71386 #endif
71387 int is_phys;
71388-};
71389+} __do_const;
71390
71391 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
71392
71393diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
71394index 96d3e4a..dc36433 100644
71395--- a/include/linux/dmaengine.h
71396+++ b/include/linux/dmaengine.h
71397@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
71398 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
71399 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
71400
71401-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
71402+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
71403 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
71404-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
71405+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
71406 struct dma_pinned_list *pinned_list, struct page *page,
71407 unsigned int offset, size_t len);
71408
71409diff --git a/include/linux/efi.h b/include/linux/efi.h
71410index 2bc0ad7..3f7b006 100644
71411--- a/include/linux/efi.h
71412+++ b/include/linux/efi.h
71413@@ -745,6 +745,7 @@ struct efivar_operations {
71414 efi_set_variable_t *set_variable;
71415 efi_query_variable_store_t *query_variable_store;
71416 };
71417+typedef struct efivar_operations __no_const efivar_operations_no_const;
71418
71419 struct efivars {
71420 /*
71421diff --git a/include/linux/elf.h b/include/linux/elf.h
71422index 40a3c0e..4c45a38 100644
71423--- a/include/linux/elf.h
71424+++ b/include/linux/elf.h
71425@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
71426 #define elf_note elf32_note
71427 #define elf_addr_t Elf32_Off
71428 #define Elf_Half Elf32_Half
71429+#define elf_dyn Elf32_Dyn
71430
71431 #else
71432
71433@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
71434 #define elf_note elf64_note
71435 #define elf_addr_t Elf64_Off
71436 #define Elf_Half Elf64_Half
71437+#define elf_dyn Elf64_Dyn
71438
71439 #endif
71440
71441diff --git a/include/linux/err.h b/include/linux/err.h
71442index f2edce2..cc2082c 100644
71443--- a/include/linux/err.h
71444+++ b/include/linux/err.h
71445@@ -19,12 +19,12 @@
71446
71447 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
71448
71449-static inline void * __must_check ERR_PTR(long error)
71450+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
71451 {
71452 return (void *) error;
71453 }
71454
71455-static inline long __must_check PTR_ERR(const void *ptr)
71456+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
71457 {
71458 return (long) ptr;
71459 }
71460diff --git a/include/linux/extcon.h b/include/linux/extcon.h
71461index fcb51c8..bdafcf6 100644
71462--- a/include/linux/extcon.h
71463+++ b/include/linux/extcon.h
71464@@ -134,7 +134,7 @@ struct extcon_dev {
71465 /* /sys/class/extcon/.../mutually_exclusive/... */
71466 struct attribute_group attr_g_muex;
71467 struct attribute **attrs_muex;
71468- struct device_attribute *d_attrs_muex;
71469+ device_attribute_no_const *d_attrs_muex;
71470 };
71471
71472 /**
71473diff --git a/include/linux/fb.h b/include/linux/fb.h
71474index d49c60f..2834fbe 100644
71475--- a/include/linux/fb.h
71476+++ b/include/linux/fb.h
71477@@ -304,7 +304,7 @@ struct fb_ops {
71478 /* called at KDB enter and leave time to prepare the console */
71479 int (*fb_debug_enter)(struct fb_info *info);
71480 int (*fb_debug_leave)(struct fb_info *info);
71481-};
71482+} __do_const;
71483
71484 #ifdef CONFIG_FB_TILEBLITTING
71485 #define FB_TILE_CURSOR_NONE 0
71486diff --git a/include/linux/filter.h b/include/linux/filter.h
71487index f65f5a6..2f4f93a 100644
71488--- a/include/linux/filter.h
71489+++ b/include/linux/filter.h
71490@@ -20,6 +20,7 @@ struct compat_sock_fprog {
71491
71492 struct sk_buff;
71493 struct sock;
71494+struct bpf_jit_work;
71495
71496 struct sk_filter
71497 {
71498@@ -27,6 +28,9 @@ struct sk_filter
71499 unsigned int len; /* Number of filter blocks */
71500 unsigned int (*bpf_func)(const struct sk_buff *skb,
71501 const struct sock_filter *filter);
71502+#ifdef CONFIG_BPF_JIT
71503+ struct bpf_jit_work *work;
71504+#endif
71505 struct rcu_head rcu;
71506 struct sock_filter insns[0];
71507 };
71508diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
71509index 8293262..2b3b8bd 100644
71510--- a/include/linux/frontswap.h
71511+++ b/include/linux/frontswap.h
71512@@ -11,7 +11,7 @@ struct frontswap_ops {
71513 int (*load)(unsigned, pgoff_t, struct page *);
71514 void (*invalidate_page)(unsigned, pgoff_t);
71515 void (*invalidate_area)(unsigned);
71516-};
71517+} __no_const;
71518
71519 extern bool frontswap_enabled;
71520 extern struct frontswap_ops *
71521diff --git a/include/linux/fs.h b/include/linux/fs.h
71522index 65c2be2..4c53f6e 100644
71523--- a/include/linux/fs.h
71524+++ b/include/linux/fs.h
71525@@ -1543,7 +1543,8 @@ struct file_operations {
71526 long (*fallocate)(struct file *file, int mode, loff_t offset,
71527 loff_t len);
71528 int (*show_fdinfo)(struct seq_file *m, struct file *f);
71529-};
71530+} __do_const;
71531+typedef struct file_operations __no_const file_operations_no_const;
71532
71533 struct inode_operations {
71534 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
71535@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
71536 inode->i_flags |= S_NOSEC;
71537 }
71538
71539+static inline bool is_sidechannel_device(const struct inode *inode)
71540+{
71541+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
71542+ umode_t mode = inode->i_mode;
71543+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
71544+#else
71545+ return false;
71546+#endif
71547+}
71548+
71549 #endif /* _LINUX_FS_H */
71550diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
71551index 2b93a9a..855d94a 100644
71552--- a/include/linux/fs_struct.h
71553+++ b/include/linux/fs_struct.h
71554@@ -6,7 +6,7 @@
71555 #include <linux/seqlock.h>
71556
71557 struct fs_struct {
71558- int users;
71559+ atomic_t users;
71560 spinlock_t lock;
71561 seqcount_t seq;
71562 int umask;
71563diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
71564index 5dfa0aa..6acf322 100644
71565--- a/include/linux/fscache-cache.h
71566+++ b/include/linux/fscache-cache.h
71567@@ -112,7 +112,7 @@ struct fscache_operation {
71568 fscache_operation_release_t release;
71569 };
71570
71571-extern atomic_t fscache_op_debug_id;
71572+extern atomic_unchecked_t fscache_op_debug_id;
71573 extern void fscache_op_work_func(struct work_struct *work);
71574
71575 extern void fscache_enqueue_operation(struct fscache_operation *);
71576@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
71577 INIT_WORK(&op->work, fscache_op_work_func);
71578 atomic_set(&op->usage, 1);
71579 op->state = FSCACHE_OP_ST_INITIALISED;
71580- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
71581+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
71582 op->processor = processor;
71583 op->release = release;
71584 INIT_LIST_HEAD(&op->pend_link);
71585diff --git a/include/linux/fscache.h b/include/linux/fscache.h
71586index 7a08623..4c07b0f 100644
71587--- a/include/linux/fscache.h
71588+++ b/include/linux/fscache.h
71589@@ -152,7 +152,7 @@ struct fscache_cookie_def {
71590 * - this is mandatory for any object that may have data
71591 */
71592 void (*now_uncached)(void *cookie_netfs_data);
71593-};
71594+} __do_const;
71595
71596 /*
71597 * fscache cached network filesystem type
71598diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
71599index a78680a..87bd73e 100644
71600--- a/include/linux/fsnotify.h
71601+++ b/include/linux/fsnotify.h
71602@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
71603 struct inode *inode = path->dentry->d_inode;
71604 __u32 mask = FS_ACCESS;
71605
71606+ if (is_sidechannel_device(inode))
71607+ return;
71608+
71609 if (S_ISDIR(inode->i_mode))
71610 mask |= FS_ISDIR;
71611
71612@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
71613 struct inode *inode = path->dentry->d_inode;
71614 __u32 mask = FS_MODIFY;
71615
71616+ if (is_sidechannel_device(inode))
71617+ return;
71618+
71619 if (S_ISDIR(inode->i_mode))
71620 mask |= FS_ISDIR;
71621
71622@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
71623 */
71624 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
71625 {
71626- return kstrdup(name, GFP_KERNEL);
71627+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
71628 }
71629
71630 /*
71631diff --git a/include/linux/genhd.h b/include/linux/genhd.h
71632index 9f3c275..911b591 100644
71633--- a/include/linux/genhd.h
71634+++ b/include/linux/genhd.h
71635@@ -194,7 +194,7 @@ struct gendisk {
71636 struct kobject *slave_dir;
71637
71638 struct timer_rand_state *random;
71639- atomic_t sync_io; /* RAID */
71640+ atomic_unchecked_t sync_io; /* RAID */
71641 struct disk_events *ev;
71642 #ifdef CONFIG_BLK_DEV_INTEGRITY
71643 struct blk_integrity *integrity;
71644diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
71645index 023bc34..b02b46a 100644
71646--- a/include/linux/genl_magic_func.h
71647+++ b/include/linux/genl_magic_func.h
71648@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
71649 },
71650
71651 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
71652-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
71653+static struct genl_ops ZZZ_genl_ops[] = {
71654 #include GENL_MAGIC_INCLUDE_FILE
71655 };
71656
71657diff --git a/include/linux/gfp.h b/include/linux/gfp.h
71658index 0f615eb..5c3832f 100644
71659--- a/include/linux/gfp.h
71660+++ b/include/linux/gfp.h
71661@@ -35,6 +35,13 @@ struct vm_area_struct;
71662 #define ___GFP_NO_KSWAPD 0x400000u
71663 #define ___GFP_OTHER_NODE 0x800000u
71664 #define ___GFP_WRITE 0x1000000u
71665+
71666+#ifdef CONFIG_PAX_USERCOPY_SLABS
71667+#define ___GFP_USERCOPY 0x2000000u
71668+#else
71669+#define ___GFP_USERCOPY 0
71670+#endif
71671+
71672 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
71673
71674 /*
71675@@ -92,6 +99,7 @@ struct vm_area_struct;
71676 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
71677 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
71678 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
71679+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
71680
71681 /*
71682 * This may seem redundant, but it's a way of annotating false positives vs.
71683@@ -99,7 +107,7 @@ struct vm_area_struct;
71684 */
71685 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
71686
71687-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
71688+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
71689 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
71690
71691 /* This equals 0, but use constants in case they ever change */
71692@@ -153,6 +161,8 @@ struct vm_area_struct;
71693 /* 4GB DMA on some platforms */
71694 #define GFP_DMA32 __GFP_DMA32
71695
71696+#define GFP_USERCOPY __GFP_USERCOPY
71697+
71698 /* Convert GFP flags to their corresponding migrate type */
71699 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
71700 {
71701diff --git a/include/linux/gracl.h b/include/linux/gracl.h
71702new file mode 100644
71703index 0000000..ebe6d72
71704--- /dev/null
71705+++ b/include/linux/gracl.h
71706@@ -0,0 +1,319 @@
71707+#ifndef GR_ACL_H
71708+#define GR_ACL_H
71709+
71710+#include <linux/grdefs.h>
71711+#include <linux/resource.h>
71712+#include <linux/capability.h>
71713+#include <linux/dcache.h>
71714+#include <asm/resource.h>
71715+
71716+/* Major status information */
71717+
71718+#define GR_VERSION "grsecurity 2.9.1"
71719+#define GRSECURITY_VERSION 0x2901
71720+
71721+enum {
71722+ GR_SHUTDOWN = 0,
71723+ GR_ENABLE = 1,
71724+ GR_SPROLE = 2,
71725+ GR_RELOAD = 3,
71726+ GR_SEGVMOD = 4,
71727+ GR_STATUS = 5,
71728+ GR_UNSPROLE = 6,
71729+ GR_PASSSET = 7,
71730+ GR_SPROLEPAM = 8,
71731+};
71732+
71733+/* Password setup definitions
71734+ * kernel/grhash.c */
71735+enum {
71736+ GR_PW_LEN = 128,
71737+ GR_SALT_LEN = 16,
71738+ GR_SHA_LEN = 32,
71739+};
71740+
71741+enum {
71742+ GR_SPROLE_LEN = 64,
71743+};
71744+
71745+enum {
71746+ GR_NO_GLOB = 0,
71747+ GR_REG_GLOB,
71748+ GR_CREATE_GLOB
71749+};
71750+
71751+#define GR_NLIMITS 32
71752+
71753+/* Begin Data Structures */
71754+
71755+struct sprole_pw {
71756+ unsigned char *rolename;
71757+ unsigned char salt[GR_SALT_LEN];
71758+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
71759+};
71760+
71761+struct name_entry {
71762+ __u32 key;
71763+ ino_t inode;
71764+ dev_t device;
71765+ char *name;
71766+ __u16 len;
71767+ __u8 deleted;
71768+ struct name_entry *prev;
71769+ struct name_entry *next;
71770+};
71771+
71772+struct inodev_entry {
71773+ struct name_entry *nentry;
71774+ struct inodev_entry *prev;
71775+ struct inodev_entry *next;
71776+};
71777+
71778+struct acl_role_db {
71779+ struct acl_role_label **r_hash;
71780+ __u32 r_size;
71781+};
71782+
71783+struct inodev_db {
71784+ struct inodev_entry **i_hash;
71785+ __u32 i_size;
71786+};
71787+
71788+struct name_db {
71789+ struct name_entry **n_hash;
71790+ __u32 n_size;
71791+};
71792+
71793+struct crash_uid {
71794+ uid_t uid;
71795+ unsigned long expires;
71796+};
71797+
71798+struct gr_hash_struct {
71799+ void **table;
71800+ void **nametable;
71801+ void *first;
71802+ __u32 table_size;
71803+ __u32 used_size;
71804+ int type;
71805+};
71806+
71807+/* Userspace Grsecurity ACL data structures */
71808+
71809+struct acl_subject_label {
71810+ char *filename;
71811+ ino_t inode;
71812+ dev_t device;
71813+ __u32 mode;
71814+ kernel_cap_t cap_mask;
71815+ kernel_cap_t cap_lower;
71816+ kernel_cap_t cap_invert_audit;
71817+
71818+ struct rlimit res[GR_NLIMITS];
71819+ __u32 resmask;
71820+
71821+ __u8 user_trans_type;
71822+ __u8 group_trans_type;
71823+ uid_t *user_transitions;
71824+ gid_t *group_transitions;
71825+ __u16 user_trans_num;
71826+ __u16 group_trans_num;
71827+
71828+ __u32 sock_families[2];
71829+ __u32 ip_proto[8];
71830+ __u32 ip_type;
71831+ struct acl_ip_label **ips;
71832+ __u32 ip_num;
71833+ __u32 inaddr_any_override;
71834+
71835+ __u32 crashes;
71836+ unsigned long expires;
71837+
71838+ struct acl_subject_label *parent_subject;
71839+ struct gr_hash_struct *hash;
71840+ struct acl_subject_label *prev;
71841+ struct acl_subject_label *next;
71842+
71843+ struct acl_object_label **obj_hash;
71844+ __u32 obj_hash_size;
71845+ __u16 pax_flags;
71846+};
71847+
71848+struct role_allowed_ip {
71849+ __u32 addr;
71850+ __u32 netmask;
71851+
71852+ struct role_allowed_ip *prev;
71853+ struct role_allowed_ip *next;
71854+};
71855+
71856+struct role_transition {
71857+ char *rolename;
71858+
71859+ struct role_transition *prev;
71860+ struct role_transition *next;
71861+};
71862+
71863+struct acl_role_label {
71864+ char *rolename;
71865+ uid_t uidgid;
71866+ __u16 roletype;
71867+
71868+ __u16 auth_attempts;
71869+ unsigned long expires;
71870+
71871+ struct acl_subject_label *root_label;
71872+ struct gr_hash_struct *hash;
71873+
71874+ struct acl_role_label *prev;
71875+ struct acl_role_label *next;
71876+
71877+ struct role_transition *transitions;
71878+ struct role_allowed_ip *allowed_ips;
71879+ uid_t *domain_children;
71880+ __u16 domain_child_num;
71881+
71882+ umode_t umask;
71883+
71884+ struct acl_subject_label **subj_hash;
71885+ __u32 subj_hash_size;
71886+};
71887+
71888+struct user_acl_role_db {
71889+ struct acl_role_label **r_table;
71890+ __u32 num_pointers; /* Number of allocations to track */
71891+ __u32 num_roles; /* Number of roles */
71892+ __u32 num_domain_children; /* Number of domain children */
71893+ __u32 num_subjects; /* Number of subjects */
71894+ __u32 num_objects; /* Number of objects */
71895+};
71896+
71897+struct acl_object_label {
71898+ char *filename;
71899+ ino_t inode;
71900+ dev_t device;
71901+ __u32 mode;
71902+
71903+ struct acl_subject_label *nested;
71904+ struct acl_object_label *globbed;
71905+
71906+ /* next two structures not used */
71907+
71908+ struct acl_object_label *prev;
71909+ struct acl_object_label *next;
71910+};
71911+
71912+struct acl_ip_label {
71913+ char *iface;
71914+ __u32 addr;
71915+ __u32 netmask;
71916+ __u16 low, high;
71917+ __u8 mode;
71918+ __u32 type;
71919+ __u32 proto[8];
71920+
71921+ /* next two structures not used */
71922+
71923+ struct acl_ip_label *prev;
71924+ struct acl_ip_label *next;
71925+};
71926+
71927+struct gr_arg {
71928+ struct user_acl_role_db role_db;
71929+ unsigned char pw[GR_PW_LEN];
71930+ unsigned char salt[GR_SALT_LEN];
71931+ unsigned char sum[GR_SHA_LEN];
71932+ unsigned char sp_role[GR_SPROLE_LEN];
71933+ struct sprole_pw *sprole_pws;
71934+ dev_t segv_device;
71935+ ino_t segv_inode;
71936+ uid_t segv_uid;
71937+ __u16 num_sprole_pws;
71938+ __u16 mode;
71939+};
71940+
71941+struct gr_arg_wrapper {
71942+ struct gr_arg *arg;
71943+ __u32 version;
71944+ __u32 size;
71945+};
71946+
71947+struct subject_map {
71948+ struct acl_subject_label *user;
71949+ struct acl_subject_label *kernel;
71950+ struct subject_map *prev;
71951+ struct subject_map *next;
71952+};
71953+
71954+struct acl_subj_map_db {
71955+ struct subject_map **s_hash;
71956+ __u32 s_size;
71957+};
71958+
71959+/* End Data Structures Section */
71960+
71961+/* Hash functions generated by empirical testing by Brad Spengler
71962+ Makes good use of the low bits of the inode. Generally 0-1 times
71963+ in loop for successful match. 0-3 for unsuccessful match.
71964+ Shift/add algorithm with modulus of table size and an XOR*/
71965+
71966+static __inline__ unsigned int
71967+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
71968+{
71969+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
71970+}
71971+
71972+ static __inline__ unsigned int
71973+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
71974+{
71975+ return ((const unsigned long)userp % sz);
71976+}
71977+
71978+static __inline__ unsigned int
71979+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
71980+{
71981+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
71982+}
71983+
71984+static __inline__ unsigned int
71985+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
71986+{
71987+ return full_name_hash((const unsigned char *)name, len) % sz;
71988+}
71989+
71990+#define FOR_EACH_ROLE_START(role) \
71991+ role = role_list; \
71992+ while (role) {
71993+
71994+#define FOR_EACH_ROLE_END(role) \
71995+ role = role->prev; \
71996+ }
71997+
71998+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
71999+ subj = NULL; \
72000+ iter = 0; \
72001+ while (iter < role->subj_hash_size) { \
72002+ if (subj == NULL) \
72003+ subj = role->subj_hash[iter]; \
72004+ if (subj == NULL) { \
72005+ iter++; \
72006+ continue; \
72007+ }
72008+
72009+#define FOR_EACH_SUBJECT_END(subj,iter) \
72010+ subj = subj->next; \
72011+ if (subj == NULL) \
72012+ iter++; \
72013+ }
72014+
72015+
72016+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
72017+ subj = role->hash->first; \
72018+ while (subj != NULL) {
72019+
72020+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
72021+ subj = subj->next; \
72022+ }
72023+
72024+#endif
72025+
72026diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
72027new file mode 100644
72028index 0000000..33ebd1f
72029--- /dev/null
72030+++ b/include/linux/gracl_compat.h
72031@@ -0,0 +1,156 @@
72032+#ifndef GR_ACL_COMPAT_H
72033+#define GR_ACL_COMPAT_H
72034+
72035+#include <linux/resource.h>
72036+#include <asm/resource.h>
72037+
72038+struct sprole_pw_compat {
72039+ compat_uptr_t rolename;
72040+ unsigned char salt[GR_SALT_LEN];
72041+ unsigned char sum[GR_SHA_LEN];
72042+};
72043+
72044+struct gr_hash_struct_compat {
72045+ compat_uptr_t table;
72046+ compat_uptr_t nametable;
72047+ compat_uptr_t first;
72048+ __u32 table_size;
72049+ __u32 used_size;
72050+ int type;
72051+};
72052+
72053+struct acl_subject_label_compat {
72054+ compat_uptr_t filename;
72055+ compat_ino_t inode;
72056+ __u32 device;
72057+ __u32 mode;
72058+ kernel_cap_t cap_mask;
72059+ kernel_cap_t cap_lower;
72060+ kernel_cap_t cap_invert_audit;
72061+
72062+ struct compat_rlimit res[GR_NLIMITS];
72063+ __u32 resmask;
72064+
72065+ __u8 user_trans_type;
72066+ __u8 group_trans_type;
72067+ compat_uptr_t user_transitions;
72068+ compat_uptr_t group_transitions;
72069+ __u16 user_trans_num;
72070+ __u16 group_trans_num;
72071+
72072+ __u32 sock_families[2];
72073+ __u32 ip_proto[8];
72074+ __u32 ip_type;
72075+ compat_uptr_t ips;
72076+ __u32 ip_num;
72077+ __u32 inaddr_any_override;
72078+
72079+ __u32 crashes;
72080+ compat_ulong_t expires;
72081+
72082+ compat_uptr_t parent_subject;
72083+ compat_uptr_t hash;
72084+ compat_uptr_t prev;
72085+ compat_uptr_t next;
72086+
72087+ compat_uptr_t obj_hash;
72088+ __u32 obj_hash_size;
72089+ __u16 pax_flags;
72090+};
72091+
72092+struct role_allowed_ip_compat {
72093+ __u32 addr;
72094+ __u32 netmask;
72095+
72096+ compat_uptr_t prev;
72097+ compat_uptr_t next;
72098+};
72099+
72100+struct role_transition_compat {
72101+ compat_uptr_t rolename;
72102+
72103+ compat_uptr_t prev;
72104+ compat_uptr_t next;
72105+};
72106+
72107+struct acl_role_label_compat {
72108+ compat_uptr_t rolename;
72109+ uid_t uidgid;
72110+ __u16 roletype;
72111+
72112+ __u16 auth_attempts;
72113+ compat_ulong_t expires;
72114+
72115+ compat_uptr_t root_label;
72116+ compat_uptr_t hash;
72117+
72118+ compat_uptr_t prev;
72119+ compat_uptr_t next;
72120+
72121+ compat_uptr_t transitions;
72122+ compat_uptr_t allowed_ips;
72123+ compat_uptr_t domain_children;
72124+ __u16 domain_child_num;
72125+
72126+ umode_t umask;
72127+
72128+ compat_uptr_t subj_hash;
72129+ __u32 subj_hash_size;
72130+};
72131+
72132+struct user_acl_role_db_compat {
72133+ compat_uptr_t r_table;
72134+ __u32 num_pointers;
72135+ __u32 num_roles;
72136+ __u32 num_domain_children;
72137+ __u32 num_subjects;
72138+ __u32 num_objects;
72139+};
72140+
72141+struct acl_object_label_compat {
72142+ compat_uptr_t filename;
72143+ compat_ino_t inode;
72144+ __u32 device;
72145+ __u32 mode;
72146+
72147+ compat_uptr_t nested;
72148+ compat_uptr_t globbed;
72149+
72150+ compat_uptr_t prev;
72151+ compat_uptr_t next;
72152+};
72153+
72154+struct acl_ip_label_compat {
72155+ compat_uptr_t iface;
72156+ __u32 addr;
72157+ __u32 netmask;
72158+ __u16 low, high;
72159+ __u8 mode;
72160+ __u32 type;
72161+ __u32 proto[8];
72162+
72163+ compat_uptr_t prev;
72164+ compat_uptr_t next;
72165+};
72166+
72167+struct gr_arg_compat {
72168+ struct user_acl_role_db_compat role_db;
72169+ unsigned char pw[GR_PW_LEN];
72170+ unsigned char salt[GR_SALT_LEN];
72171+ unsigned char sum[GR_SHA_LEN];
72172+ unsigned char sp_role[GR_SPROLE_LEN];
72173+ compat_uptr_t sprole_pws;
72174+ __u32 segv_device;
72175+ compat_ino_t segv_inode;
72176+ uid_t segv_uid;
72177+ __u16 num_sprole_pws;
72178+ __u16 mode;
72179+};
72180+
72181+struct gr_arg_wrapper_compat {
72182+ compat_uptr_t arg;
72183+ __u32 version;
72184+ __u32 size;
72185+};
72186+
72187+#endif
72188diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
72189new file mode 100644
72190index 0000000..323ecf2
72191--- /dev/null
72192+++ b/include/linux/gralloc.h
72193@@ -0,0 +1,9 @@
72194+#ifndef __GRALLOC_H
72195+#define __GRALLOC_H
72196+
72197+void acl_free_all(void);
72198+int acl_alloc_stack_init(unsigned long size);
72199+void *acl_alloc(unsigned long len);
72200+void *acl_alloc_num(unsigned long num, unsigned long len);
72201+
72202+#endif
72203diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
72204new file mode 100644
72205index 0000000..be66033
72206--- /dev/null
72207+++ b/include/linux/grdefs.h
72208@@ -0,0 +1,140 @@
72209+#ifndef GRDEFS_H
72210+#define GRDEFS_H
72211+
72212+/* Begin grsecurity status declarations */
72213+
72214+enum {
72215+ GR_READY = 0x01,
72216+ GR_STATUS_INIT = 0x00 // disabled state
72217+};
72218+
72219+/* Begin ACL declarations */
72220+
72221+/* Role flags */
72222+
72223+enum {
72224+ GR_ROLE_USER = 0x0001,
72225+ GR_ROLE_GROUP = 0x0002,
72226+ GR_ROLE_DEFAULT = 0x0004,
72227+ GR_ROLE_SPECIAL = 0x0008,
72228+ GR_ROLE_AUTH = 0x0010,
72229+ GR_ROLE_NOPW = 0x0020,
72230+ GR_ROLE_GOD = 0x0040,
72231+ GR_ROLE_LEARN = 0x0080,
72232+ GR_ROLE_TPE = 0x0100,
72233+ GR_ROLE_DOMAIN = 0x0200,
72234+ GR_ROLE_PAM = 0x0400,
72235+ GR_ROLE_PERSIST = 0x0800
72236+};
72237+
72238+/* ACL Subject and Object mode flags */
72239+enum {
72240+ GR_DELETED = 0x80000000
72241+};
72242+
72243+/* ACL Object-only mode flags */
72244+enum {
72245+ GR_READ = 0x00000001,
72246+ GR_APPEND = 0x00000002,
72247+ GR_WRITE = 0x00000004,
72248+ GR_EXEC = 0x00000008,
72249+ GR_FIND = 0x00000010,
72250+ GR_INHERIT = 0x00000020,
72251+ GR_SETID = 0x00000040,
72252+ GR_CREATE = 0x00000080,
72253+ GR_DELETE = 0x00000100,
72254+ GR_LINK = 0x00000200,
72255+ GR_AUDIT_READ = 0x00000400,
72256+ GR_AUDIT_APPEND = 0x00000800,
72257+ GR_AUDIT_WRITE = 0x00001000,
72258+ GR_AUDIT_EXEC = 0x00002000,
72259+ GR_AUDIT_FIND = 0x00004000,
72260+ GR_AUDIT_INHERIT= 0x00008000,
72261+ GR_AUDIT_SETID = 0x00010000,
72262+ GR_AUDIT_CREATE = 0x00020000,
72263+ GR_AUDIT_DELETE = 0x00040000,
72264+ GR_AUDIT_LINK = 0x00080000,
72265+ GR_PTRACERD = 0x00100000,
72266+ GR_NOPTRACE = 0x00200000,
72267+ GR_SUPPRESS = 0x00400000,
72268+ GR_NOLEARN = 0x00800000,
72269+ GR_INIT_TRANSFER= 0x01000000
72270+};
72271+
72272+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
72273+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
72274+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
72275+
72276+/* ACL subject-only mode flags */
72277+enum {
72278+ GR_KILL = 0x00000001,
72279+ GR_VIEW = 0x00000002,
72280+ GR_PROTECTED = 0x00000004,
72281+ GR_LEARN = 0x00000008,
72282+ GR_OVERRIDE = 0x00000010,
72283+ /* just a placeholder, this mode is only used in userspace */
72284+ GR_DUMMY = 0x00000020,
72285+ GR_PROTSHM = 0x00000040,
72286+ GR_KILLPROC = 0x00000080,
72287+ GR_KILLIPPROC = 0x00000100,
72288+ /* just a placeholder, this mode is only used in userspace */
72289+ GR_NOTROJAN = 0x00000200,
72290+ GR_PROTPROCFD = 0x00000400,
72291+ GR_PROCACCT = 0x00000800,
72292+ GR_RELAXPTRACE = 0x00001000,
72293+ //GR_NESTED = 0x00002000,
72294+ GR_INHERITLEARN = 0x00004000,
72295+ GR_PROCFIND = 0x00008000,
72296+ GR_POVERRIDE = 0x00010000,
72297+ GR_KERNELAUTH = 0x00020000,
72298+ GR_ATSECURE = 0x00040000,
72299+ GR_SHMEXEC = 0x00080000
72300+};
72301+
72302+enum {
72303+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
72304+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
72305+ GR_PAX_ENABLE_MPROTECT = 0x0004,
72306+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
72307+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
72308+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
72309+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
72310+ GR_PAX_DISABLE_MPROTECT = 0x0400,
72311+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
72312+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
72313+};
72314+
72315+enum {
72316+ GR_ID_USER = 0x01,
72317+ GR_ID_GROUP = 0x02,
72318+};
72319+
72320+enum {
72321+ GR_ID_ALLOW = 0x01,
72322+ GR_ID_DENY = 0x02,
72323+};
72324+
72325+#define GR_CRASH_RES 31
72326+#define GR_UIDTABLE_MAX 500
72327+
72328+/* begin resource learning section */
72329+enum {
72330+ GR_RLIM_CPU_BUMP = 60,
72331+ GR_RLIM_FSIZE_BUMP = 50000,
72332+ GR_RLIM_DATA_BUMP = 10000,
72333+ GR_RLIM_STACK_BUMP = 1000,
72334+ GR_RLIM_CORE_BUMP = 10000,
72335+ GR_RLIM_RSS_BUMP = 500000,
72336+ GR_RLIM_NPROC_BUMP = 1,
72337+ GR_RLIM_NOFILE_BUMP = 5,
72338+ GR_RLIM_MEMLOCK_BUMP = 50000,
72339+ GR_RLIM_AS_BUMP = 500000,
72340+ GR_RLIM_LOCKS_BUMP = 2,
72341+ GR_RLIM_SIGPENDING_BUMP = 5,
72342+ GR_RLIM_MSGQUEUE_BUMP = 10000,
72343+ GR_RLIM_NICE_BUMP = 1,
72344+ GR_RLIM_RTPRIO_BUMP = 1,
72345+ GR_RLIM_RTTIME_BUMP = 1000000
72346+};
72347+
72348+#endif
72349diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
72350new file mode 100644
72351index 0000000..fd8598b
72352--- /dev/null
72353+++ b/include/linux/grinternal.h
72354@@ -0,0 +1,228 @@
72355+#ifndef __GRINTERNAL_H
72356+#define __GRINTERNAL_H
72357+
72358+#ifdef CONFIG_GRKERNSEC
72359+
72360+#include <linux/fs.h>
72361+#include <linux/mnt_namespace.h>
72362+#include <linux/nsproxy.h>
72363+#include <linux/gracl.h>
72364+#include <linux/grdefs.h>
72365+#include <linux/grmsg.h>
72366+
72367+void gr_add_learn_entry(const char *fmt, ...)
72368+ __attribute__ ((format (printf, 1, 2)));
72369+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
72370+ const struct vfsmount *mnt);
72371+__u32 gr_check_create(const struct dentry *new_dentry,
72372+ const struct dentry *parent,
72373+ const struct vfsmount *mnt, const __u32 mode);
72374+int gr_check_protected_task(const struct task_struct *task);
72375+__u32 to_gr_audit(const __u32 reqmode);
72376+int gr_set_acls(const int type);
72377+int gr_apply_subject_to_task(struct task_struct *task);
72378+int gr_acl_is_enabled(void);
72379+char gr_roletype_to_char(void);
72380+
72381+void gr_handle_alertkill(struct task_struct *task);
72382+char *gr_to_filename(const struct dentry *dentry,
72383+ const struct vfsmount *mnt);
72384+char *gr_to_filename1(const struct dentry *dentry,
72385+ const struct vfsmount *mnt);
72386+char *gr_to_filename2(const struct dentry *dentry,
72387+ const struct vfsmount *mnt);
72388+char *gr_to_filename3(const struct dentry *dentry,
72389+ const struct vfsmount *mnt);
72390+
72391+extern int grsec_enable_ptrace_readexec;
72392+extern int grsec_enable_harden_ptrace;
72393+extern int grsec_enable_link;
72394+extern int grsec_enable_fifo;
72395+extern int grsec_enable_execve;
72396+extern int grsec_enable_shm;
72397+extern int grsec_enable_execlog;
72398+extern int grsec_enable_signal;
72399+extern int grsec_enable_audit_ptrace;
72400+extern int grsec_enable_forkfail;
72401+extern int grsec_enable_time;
72402+extern int grsec_enable_rofs;
72403+extern int grsec_enable_chroot_shmat;
72404+extern int grsec_enable_chroot_mount;
72405+extern int grsec_enable_chroot_double;
72406+extern int grsec_enable_chroot_pivot;
72407+extern int grsec_enable_chroot_chdir;
72408+extern int grsec_enable_chroot_chmod;
72409+extern int grsec_enable_chroot_mknod;
72410+extern int grsec_enable_chroot_fchdir;
72411+extern int grsec_enable_chroot_nice;
72412+extern int grsec_enable_chroot_execlog;
72413+extern int grsec_enable_chroot_caps;
72414+extern int grsec_enable_chroot_sysctl;
72415+extern int grsec_enable_chroot_unix;
72416+extern int grsec_enable_symlinkown;
72417+extern kgid_t grsec_symlinkown_gid;
72418+extern int grsec_enable_tpe;
72419+extern kgid_t grsec_tpe_gid;
72420+extern int grsec_enable_tpe_all;
72421+extern int grsec_enable_tpe_invert;
72422+extern int grsec_enable_socket_all;
72423+extern kgid_t grsec_socket_all_gid;
72424+extern int grsec_enable_socket_client;
72425+extern kgid_t grsec_socket_client_gid;
72426+extern int grsec_enable_socket_server;
72427+extern kgid_t grsec_socket_server_gid;
72428+extern kgid_t grsec_audit_gid;
72429+extern int grsec_enable_group;
72430+extern int grsec_enable_log_rwxmaps;
72431+extern int grsec_enable_mount;
72432+extern int grsec_enable_chdir;
72433+extern int grsec_resource_logging;
72434+extern int grsec_enable_blackhole;
72435+extern int grsec_lastack_retries;
72436+extern int grsec_enable_brute;
72437+extern int grsec_lock;
72438+
72439+extern spinlock_t grsec_alert_lock;
72440+extern unsigned long grsec_alert_wtime;
72441+extern unsigned long grsec_alert_fyet;
72442+
72443+extern spinlock_t grsec_audit_lock;
72444+
72445+extern rwlock_t grsec_exec_file_lock;
72446+
72447+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
72448+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
72449+ (tsk)->exec_file->f_path.mnt) : "/")
72450+
72451+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
72452+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
72453+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
72454+
72455+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
72456+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
72457+ (tsk)->exec_file->f_path.mnt) : "/")
72458+
72459+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
72460+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
72461+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
72462+
72463+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
72464+
72465+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
72466+
72467+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
72468+{
72469+ if (file1 && file2) {
72470+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
72471+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
72472+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
72473+ return true;
72474+ }
72475+
72476+ return false;
72477+}
72478+
72479+#define GR_CHROOT_CAPS {{ \
72480+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
72481+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
72482+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
72483+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
72484+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
72485+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
72486+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
72487+
72488+#define security_learn(normal_msg,args...) \
72489+({ \
72490+ read_lock(&grsec_exec_file_lock); \
72491+ gr_add_learn_entry(normal_msg "\n", ## args); \
72492+ read_unlock(&grsec_exec_file_lock); \
72493+})
72494+
72495+enum {
72496+ GR_DO_AUDIT,
72497+ GR_DONT_AUDIT,
72498+ /* used for non-audit messages that we shouldn't kill the task on */
72499+ GR_DONT_AUDIT_GOOD
72500+};
72501+
72502+enum {
72503+ GR_TTYSNIFF,
72504+ GR_RBAC,
72505+ GR_RBAC_STR,
72506+ GR_STR_RBAC,
72507+ GR_RBAC_MODE2,
72508+ GR_RBAC_MODE3,
72509+ GR_FILENAME,
72510+ GR_SYSCTL_HIDDEN,
72511+ GR_NOARGS,
72512+ GR_ONE_INT,
72513+ GR_ONE_INT_TWO_STR,
72514+ GR_ONE_STR,
72515+ GR_STR_INT,
72516+ GR_TWO_STR_INT,
72517+ GR_TWO_INT,
72518+ GR_TWO_U64,
72519+ GR_THREE_INT,
72520+ GR_FIVE_INT_TWO_STR,
72521+ GR_TWO_STR,
72522+ GR_THREE_STR,
72523+ GR_FOUR_STR,
72524+ GR_STR_FILENAME,
72525+ GR_FILENAME_STR,
72526+ GR_FILENAME_TWO_INT,
72527+ GR_FILENAME_TWO_INT_STR,
72528+ GR_TEXTREL,
72529+ GR_PTRACE,
72530+ GR_RESOURCE,
72531+ GR_CAP,
72532+ GR_SIG,
72533+ GR_SIG2,
72534+ GR_CRASH1,
72535+ GR_CRASH2,
72536+ GR_PSACCT,
72537+ GR_RWXMAP,
72538+ GR_RWXMAPVMA
72539+};
72540+
72541+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
72542+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
72543+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
72544+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
72545+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
72546+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
72547+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
72548+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
72549+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
72550+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
72551+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
72552+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
72553+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
72554+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
72555+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
72556+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
72557+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
72558+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
72559+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
72560+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
72561+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
72562+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
72563+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
72564+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
72565+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
72566+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
72567+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
72568+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
72569+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
72570+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
72571+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
72572+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
72573+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
72574+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
72575+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
72576+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
72577+
72578+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
72579+
72580+#endif
72581+
72582+#endif
72583diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
72584new file mode 100644
72585index 0000000..a4396b5
72586--- /dev/null
72587+++ b/include/linux/grmsg.h
72588@@ -0,0 +1,113 @@
72589+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
72590+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
72591+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
72592+#define GR_STOPMOD_MSG "denied modification of module state by "
72593+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
72594+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
72595+#define GR_IOPERM_MSG "denied use of ioperm() by "
72596+#define GR_IOPL_MSG "denied use of iopl() by "
72597+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
72598+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
72599+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
72600+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
72601+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
72602+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
72603+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
72604+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
72605+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
72606+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
72607+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
72608+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
72609+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
72610+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
72611+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
72612+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
72613+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
72614+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
72615+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
72616+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
72617+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
72618+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
72619+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
72620+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
72621+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
72622+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
72623+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
72624+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
72625+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
72626+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
72627+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
72628+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
72629+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
72630+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
72631+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
72632+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
72633+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
72634+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
72635+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
72636+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
72637+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
72638+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
72639+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
72640+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
72641+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
72642+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
72643+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
72644+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
72645+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
72646+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
72647+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
72648+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
72649+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
72650+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
72651+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
72652+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
72653+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
72654+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
72655+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
72656+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
72657+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
72658+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
72659+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
72660+#define GR_FAILFORK_MSG "failed fork with errno %s by "
72661+#define GR_NICE_CHROOT_MSG "denied priority change by "
72662+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
72663+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
72664+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
72665+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
72666+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
72667+#define GR_TIME_MSG "time set by "
72668+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
72669+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
72670+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
72671+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
72672+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
72673+#define GR_BIND_MSG "denied bind() by "
72674+#define GR_CONNECT_MSG "denied connect() by "
72675+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
72676+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
72677+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
72678+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
72679+#define GR_CAP_ACL_MSG "use of %s denied for "
72680+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
72681+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
72682+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
72683+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
72684+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
72685+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
72686+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
72687+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
72688+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
72689+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
72690+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
72691+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
72692+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
72693+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
72694+#define GR_VM86_MSG "denied use of vm86 by "
72695+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
72696+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
72697+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
72698+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
72699+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
72700+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
72701+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
72702diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
72703new file mode 100644
72704index 0000000..3676b0b
72705--- /dev/null
72706+++ b/include/linux/grsecurity.h
72707@@ -0,0 +1,242 @@
72708+#ifndef GR_SECURITY_H
72709+#define GR_SECURITY_H
72710+#include <linux/fs.h>
72711+#include <linux/fs_struct.h>
72712+#include <linux/binfmts.h>
72713+#include <linux/gracl.h>
72714+
72715+/* notify of brain-dead configs */
72716+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72717+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
72718+#endif
72719+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
72720+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
72721+#endif
72722+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
72723+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
72724+#endif
72725+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
72726+#error "CONFIG_PAX enabled, but no PaX options are enabled."
72727+#endif
72728+
72729+void gr_handle_brute_attach(unsigned long mm_flags);
72730+void gr_handle_brute_check(void);
72731+void gr_handle_kernel_exploit(void);
72732+
72733+char gr_roletype_to_char(void);
72734+
72735+int gr_acl_enable_at_secure(void);
72736+
72737+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
72738+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
72739+
72740+void gr_del_task_from_ip_table(struct task_struct *p);
72741+
72742+int gr_pid_is_chrooted(struct task_struct *p);
72743+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
72744+int gr_handle_chroot_nice(void);
72745+int gr_handle_chroot_sysctl(const int op);
72746+int gr_handle_chroot_setpriority(struct task_struct *p,
72747+ const int niceval);
72748+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
72749+int gr_handle_chroot_chroot(const struct dentry *dentry,
72750+ const struct vfsmount *mnt);
72751+void gr_handle_chroot_chdir(const struct path *path);
72752+int gr_handle_chroot_chmod(const struct dentry *dentry,
72753+ const struct vfsmount *mnt, const int mode);
72754+int gr_handle_chroot_mknod(const struct dentry *dentry,
72755+ const struct vfsmount *mnt, const int mode);
72756+int gr_handle_chroot_mount(const struct dentry *dentry,
72757+ const struct vfsmount *mnt,
72758+ const char *dev_name);
72759+int gr_handle_chroot_pivot(void);
72760+int gr_handle_chroot_unix(const pid_t pid);
72761+
72762+int gr_handle_rawio(const struct inode *inode);
72763+
72764+void gr_handle_ioperm(void);
72765+void gr_handle_iopl(void);
72766+
72767+umode_t gr_acl_umask(void);
72768+
72769+int gr_tpe_allow(const struct file *file);
72770+
72771+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
72772+void gr_clear_chroot_entries(struct task_struct *task);
72773+
72774+void gr_log_forkfail(const int retval);
72775+void gr_log_timechange(void);
72776+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
72777+void gr_log_chdir(const struct dentry *dentry,
72778+ const struct vfsmount *mnt);
72779+void gr_log_chroot_exec(const struct dentry *dentry,
72780+ const struct vfsmount *mnt);
72781+void gr_log_remount(const char *devname, const int retval);
72782+void gr_log_unmount(const char *devname, const int retval);
72783+void gr_log_mount(const char *from, const char *to, const int retval);
72784+void gr_log_textrel(struct vm_area_struct *vma);
72785+void gr_log_ptgnustack(struct file *file);
72786+void gr_log_rwxmmap(struct file *file);
72787+void gr_log_rwxmprotect(struct vm_area_struct *vma);
72788+
72789+int gr_handle_follow_link(const struct inode *parent,
72790+ const struct inode *inode,
72791+ const struct dentry *dentry,
72792+ const struct vfsmount *mnt);
72793+int gr_handle_fifo(const struct dentry *dentry,
72794+ const struct vfsmount *mnt,
72795+ const struct dentry *dir, const int flag,
72796+ const int acc_mode);
72797+int gr_handle_hardlink(const struct dentry *dentry,
72798+ const struct vfsmount *mnt,
72799+ struct inode *inode,
72800+ const int mode, const struct filename *to);
72801+
72802+int gr_is_capable(const int cap);
72803+int gr_is_capable_nolog(const int cap);
72804+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
72805+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
72806+
72807+void gr_copy_label(struct task_struct *tsk);
72808+void gr_handle_crash(struct task_struct *task, const int sig);
72809+int gr_handle_signal(const struct task_struct *p, const int sig);
72810+int gr_check_crash_uid(const kuid_t uid);
72811+int gr_check_protected_task(const struct task_struct *task);
72812+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
72813+int gr_acl_handle_mmap(const struct file *file,
72814+ const unsigned long prot);
72815+int gr_acl_handle_mprotect(const struct file *file,
72816+ const unsigned long prot);
72817+int gr_check_hidden_task(const struct task_struct *tsk);
72818+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
72819+ const struct vfsmount *mnt);
72820+__u32 gr_acl_handle_utime(const struct dentry *dentry,
72821+ const struct vfsmount *mnt);
72822+__u32 gr_acl_handle_access(const struct dentry *dentry,
72823+ const struct vfsmount *mnt, const int fmode);
72824+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
72825+ const struct vfsmount *mnt, umode_t *mode);
72826+__u32 gr_acl_handle_chown(const struct dentry *dentry,
72827+ const struct vfsmount *mnt);
72828+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
72829+ const struct vfsmount *mnt);
72830+int gr_handle_ptrace(struct task_struct *task, const long request);
72831+int gr_handle_proc_ptrace(struct task_struct *task);
72832+__u32 gr_acl_handle_execve(const struct dentry *dentry,
72833+ const struct vfsmount *mnt);
72834+int gr_check_crash_exec(const struct file *filp);
72835+int gr_acl_is_enabled(void);
72836+void gr_set_kernel_label(struct task_struct *task);
72837+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
72838+ const kgid_t gid);
72839+int gr_set_proc_label(const struct dentry *dentry,
72840+ const struct vfsmount *mnt,
72841+ const int unsafe_flags);
72842+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
72843+ const struct vfsmount *mnt);
72844+__u32 gr_acl_handle_open(const struct dentry *dentry,
72845+ const struct vfsmount *mnt, int acc_mode);
72846+__u32 gr_acl_handle_creat(const struct dentry *dentry,
72847+ const struct dentry *p_dentry,
72848+ const struct vfsmount *p_mnt,
72849+ int open_flags, int acc_mode, const int imode);
72850+void gr_handle_create(const struct dentry *dentry,
72851+ const struct vfsmount *mnt);
72852+void gr_handle_proc_create(const struct dentry *dentry,
72853+ const struct inode *inode);
72854+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
72855+ const struct dentry *parent_dentry,
72856+ const struct vfsmount *parent_mnt,
72857+ const int mode);
72858+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
72859+ const struct dentry *parent_dentry,
72860+ const struct vfsmount *parent_mnt);
72861+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
72862+ const struct vfsmount *mnt);
72863+void gr_handle_delete(const ino_t ino, const dev_t dev);
72864+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
72865+ const struct vfsmount *mnt);
72866+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
72867+ const struct dentry *parent_dentry,
72868+ const struct vfsmount *parent_mnt,
72869+ const struct filename *from);
72870+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
72871+ const struct dentry *parent_dentry,
72872+ const struct vfsmount *parent_mnt,
72873+ const struct dentry *old_dentry,
72874+ const struct vfsmount *old_mnt, const struct filename *to);
72875+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
72876+int gr_acl_handle_rename(struct dentry *new_dentry,
72877+ struct dentry *parent_dentry,
72878+ const struct vfsmount *parent_mnt,
72879+ struct dentry *old_dentry,
72880+ struct inode *old_parent_inode,
72881+ struct vfsmount *old_mnt, const struct filename *newname);
72882+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72883+ struct dentry *old_dentry,
72884+ struct dentry *new_dentry,
72885+ struct vfsmount *mnt, const __u8 replace);
72886+__u32 gr_check_link(const struct dentry *new_dentry,
72887+ const struct dentry *parent_dentry,
72888+ const struct vfsmount *parent_mnt,
72889+ const struct dentry *old_dentry,
72890+ const struct vfsmount *old_mnt);
72891+int gr_acl_handle_filldir(const struct file *file, const char *name,
72892+ const unsigned int namelen, const ino_t ino);
72893+
72894+__u32 gr_acl_handle_unix(const struct dentry *dentry,
72895+ const struct vfsmount *mnt);
72896+void gr_acl_handle_exit(void);
72897+void gr_acl_handle_psacct(struct task_struct *task, const long code);
72898+int gr_acl_handle_procpidmem(const struct task_struct *task);
72899+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
72900+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
72901+void gr_audit_ptrace(struct task_struct *task);
72902+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
72903+void gr_put_exec_file(struct task_struct *task);
72904+
72905+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
72906+
72907+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
72908+extern void gr_learn_resource(const struct task_struct *task, const int res,
72909+ const unsigned long wanted, const int gt);
72910+#else
72911+static inline void gr_learn_resource(const struct task_struct *task, const int res,
72912+ const unsigned long wanted, const int gt)
72913+{
72914+}
72915+#endif
72916+
72917+#ifdef CONFIG_GRKERNSEC_RESLOG
72918+extern void gr_log_resource(const struct task_struct *task, const int res,
72919+ const unsigned long wanted, const int gt);
72920+#else
72921+static inline void gr_log_resource(const struct task_struct *task, const int res,
72922+ const unsigned long wanted, const int gt)
72923+{
72924+}
72925+#endif
72926+
72927+#ifdef CONFIG_GRKERNSEC
72928+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
72929+void gr_handle_vm86(void);
72930+void gr_handle_mem_readwrite(u64 from, u64 to);
72931+
72932+void gr_log_badprocpid(const char *entry);
72933+
72934+extern int grsec_enable_dmesg;
72935+extern int grsec_disable_privio;
72936+
72937+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72938+extern kgid_t grsec_proc_gid;
72939+#endif
72940+
72941+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72942+extern int grsec_enable_chroot_findtask;
72943+#endif
72944+#ifdef CONFIG_GRKERNSEC_SETXID
72945+extern int grsec_enable_setxid;
72946+#endif
72947+#endif
72948+
72949+#endif
72950diff --git a/include/linux/grsock.h b/include/linux/grsock.h
72951new file mode 100644
72952index 0000000..e7ffaaf
72953--- /dev/null
72954+++ b/include/linux/grsock.h
72955@@ -0,0 +1,19 @@
72956+#ifndef __GRSOCK_H
72957+#define __GRSOCK_H
72958+
72959+extern void gr_attach_curr_ip(const struct sock *sk);
72960+extern int gr_handle_sock_all(const int family, const int type,
72961+ const int protocol);
72962+extern int gr_handle_sock_server(const struct sockaddr *sck);
72963+extern int gr_handle_sock_server_other(const struct sock *sck);
72964+extern int gr_handle_sock_client(const struct sockaddr *sck);
72965+extern int gr_search_connect(struct socket * sock,
72966+ struct sockaddr_in * addr);
72967+extern int gr_search_bind(struct socket * sock,
72968+ struct sockaddr_in * addr);
72969+extern int gr_search_listen(struct socket * sock);
72970+extern int gr_search_accept(struct socket * sock);
72971+extern int gr_search_socket(const int domain, const int type,
72972+ const int protocol);
72973+
72974+#endif
72975diff --git a/include/linux/highmem.h b/include/linux/highmem.h
72976index 7fb31da..08b5114 100644
72977--- a/include/linux/highmem.h
72978+++ b/include/linux/highmem.h
72979@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
72980 kunmap_atomic(kaddr);
72981 }
72982
72983+static inline void sanitize_highpage(struct page *page)
72984+{
72985+ void *kaddr;
72986+ unsigned long flags;
72987+
72988+ local_irq_save(flags);
72989+ kaddr = kmap_atomic(page);
72990+ clear_page(kaddr);
72991+ kunmap_atomic(kaddr);
72992+ local_irq_restore(flags);
72993+}
72994+
72995 static inline void zero_user_segments(struct page *page,
72996 unsigned start1, unsigned end1,
72997 unsigned start2, unsigned end2)
72998diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
72999index 1c7b89a..7f52502 100644
73000--- a/include/linux/hwmon-sysfs.h
73001+++ b/include/linux/hwmon-sysfs.h
73002@@ -25,7 +25,8 @@
73003 struct sensor_device_attribute{
73004 struct device_attribute dev_attr;
73005 int index;
73006-};
73007+} __do_const;
73008+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
73009 #define to_sensor_dev_attr(_dev_attr) \
73010 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
73011
73012@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
73013 struct device_attribute dev_attr;
73014 u8 index;
73015 u8 nr;
73016-};
73017+} __do_const;
73018 #define to_sensor_dev_attr_2(_dev_attr) \
73019 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
73020
73021diff --git a/include/linux/i2c.h b/include/linux/i2c.h
73022index e988fa9..ff9f17e 100644
73023--- a/include/linux/i2c.h
73024+++ b/include/linux/i2c.h
73025@@ -366,6 +366,7 @@ struct i2c_algorithm {
73026 /* To determine what the adapter supports */
73027 u32 (*functionality) (struct i2c_adapter *);
73028 };
73029+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
73030
73031 /**
73032 * struct i2c_bus_recovery_info - I2C bus recovery information
73033diff --git a/include/linux/i2o.h b/include/linux/i2o.h
73034index d23c3c2..eb63c81 100644
73035--- a/include/linux/i2o.h
73036+++ b/include/linux/i2o.h
73037@@ -565,7 +565,7 @@ struct i2o_controller {
73038 struct i2o_device *exec; /* Executive */
73039 #if BITS_PER_LONG == 64
73040 spinlock_t context_list_lock; /* lock for context_list */
73041- atomic_t context_list_counter; /* needed for unique contexts */
73042+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
73043 struct list_head context_list; /* list of context id's
73044 and pointers */
73045 #endif
73046diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
73047index aff7ad8..3942bbd 100644
73048--- a/include/linux/if_pppox.h
73049+++ b/include/linux/if_pppox.h
73050@@ -76,7 +76,7 @@ struct pppox_proto {
73051 int (*ioctl)(struct socket *sock, unsigned int cmd,
73052 unsigned long arg);
73053 struct module *owner;
73054-};
73055+} __do_const;
73056
73057 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
73058 extern void unregister_pppox_proto(int proto_num);
73059diff --git a/include/linux/init.h b/include/linux/init.h
73060index 8618147..0821126 100644
73061--- a/include/linux/init.h
73062+++ b/include/linux/init.h
73063@@ -39,9 +39,36 @@
73064 * Also note, that this data cannot be "const".
73065 */
73066
73067+#ifdef MODULE
73068+#define add_init_latent_entropy
73069+#define add_devinit_latent_entropy
73070+#define add_cpuinit_latent_entropy
73071+#define add_meminit_latent_entropy
73072+#else
73073+#define add_init_latent_entropy __latent_entropy
73074+
73075+#ifdef CONFIG_HOTPLUG
73076+#define add_devinit_latent_entropy
73077+#else
73078+#define add_devinit_latent_entropy __latent_entropy
73079+#endif
73080+
73081+#ifdef CONFIG_HOTPLUG_CPU
73082+#define add_cpuinit_latent_entropy
73083+#else
73084+#define add_cpuinit_latent_entropy __latent_entropy
73085+#endif
73086+
73087+#ifdef CONFIG_MEMORY_HOTPLUG
73088+#define add_meminit_latent_entropy
73089+#else
73090+#define add_meminit_latent_entropy __latent_entropy
73091+#endif
73092+#endif
73093+
73094 /* These are for everybody (although not all archs will actually
73095 discard it in modules) */
73096-#define __init __section(.init.text) __cold notrace
73097+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
73098 #define __initdata __section(.init.data)
73099 #define __initconst __constsection(.init.rodata)
73100 #define __exitdata __section(.exit.data)
73101@@ -94,7 +121,7 @@
73102 #define __exit __section(.exit.text) __exitused __cold notrace
73103
73104 /* Used for HOTPLUG_CPU */
73105-#define __cpuinit __section(.cpuinit.text) __cold notrace
73106+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
73107 #define __cpuinitdata __section(.cpuinit.data)
73108 #define __cpuinitconst __constsection(.cpuinit.rodata)
73109 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
73110@@ -102,7 +129,7 @@
73111 #define __cpuexitconst __constsection(.cpuexit.rodata)
73112
73113 /* Used for MEMORY_HOTPLUG */
73114-#define __meminit __section(.meminit.text) __cold notrace
73115+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
73116 #define __meminitdata __section(.meminit.data)
73117 #define __meminitconst __constsection(.meminit.rodata)
73118 #define __memexit __section(.memexit.text) __exitused __cold notrace
73119diff --git a/include/linux/init_task.h b/include/linux/init_task.h
73120index 5cd0f09..c9f67cc 100644
73121--- a/include/linux/init_task.h
73122+++ b/include/linux/init_task.h
73123@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
73124
73125 #define INIT_TASK_COMM "swapper"
73126
73127+#ifdef CONFIG_X86
73128+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
73129+#else
73130+#define INIT_TASK_THREAD_INFO
73131+#endif
73132+
73133 /*
73134 * INIT_TASK is used to set up the first task table, touch at
73135 * your own risk!. Base=0, limit=0x1fffff (=2MB)
73136@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
73137 RCU_POINTER_INITIALIZER(cred, &init_cred), \
73138 .comm = INIT_TASK_COMM, \
73139 .thread = INIT_THREAD, \
73140+ INIT_TASK_THREAD_INFO \
73141 .fs = &init_fs, \
73142 .files = &init_files, \
73143 .signal = &init_signals, \
73144diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
73145index 5fa5afe..ac55b25 100644
73146--- a/include/linux/interrupt.h
73147+++ b/include/linux/interrupt.h
73148@@ -430,7 +430,7 @@ enum
73149 /* map softirq index to softirq name. update 'softirq_to_name' in
73150 * kernel/softirq.c when adding a new softirq.
73151 */
73152-extern char *softirq_to_name[NR_SOFTIRQS];
73153+extern const char * const softirq_to_name[NR_SOFTIRQS];
73154
73155 /* softirq mask and active fields moved to irq_cpustat_t in
73156 * asm/hardirq.h to get better cache usage. KAO
73157@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
73158
73159 struct softirq_action
73160 {
73161- void (*action)(struct softirq_action *);
73162-};
73163+ void (*action)(void);
73164+} __no_const;
73165
73166 asmlinkage void do_softirq(void);
73167 asmlinkage void __do_softirq(void);
73168-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
73169+extern void open_softirq(int nr, void (*action)(void));
73170 extern void softirq_init(void);
73171 extern void __raise_softirq_irqoff(unsigned int nr);
73172
73173diff --git a/include/linux/iommu.h b/include/linux/iommu.h
73174index 3aeb730..2177f39 100644
73175--- a/include/linux/iommu.h
73176+++ b/include/linux/iommu.h
73177@@ -113,7 +113,7 @@ struct iommu_ops {
73178 u32 (*domain_get_windows)(struct iommu_domain *domain);
73179
73180 unsigned long pgsize_bitmap;
73181-};
73182+} __do_const;
73183
73184 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
73185 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
73186diff --git a/include/linux/ioport.h b/include/linux/ioport.h
73187index 89b7c24..382af74 100644
73188--- a/include/linux/ioport.h
73189+++ b/include/linux/ioport.h
73190@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
73191 int adjust_resource(struct resource *res, resource_size_t start,
73192 resource_size_t size);
73193 resource_size_t resource_alignment(struct resource *res);
73194-static inline resource_size_t resource_size(const struct resource *res)
73195+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
73196 {
73197 return res->end - res->start + 1;
73198 }
73199diff --git a/include/linux/irq.h b/include/linux/irq.h
73200index bc4e066..50468a9 100644
73201--- a/include/linux/irq.h
73202+++ b/include/linux/irq.h
73203@@ -328,7 +328,8 @@ struct irq_chip {
73204 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
73205
73206 unsigned long flags;
73207-};
73208+} __do_const;
73209+typedef struct irq_chip __no_const irq_chip_no_const;
73210
73211 /*
73212 * irq_chip specific flags
73213diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
73214index 3e203eb..3fe68d0 100644
73215--- a/include/linux/irqchip/arm-gic.h
73216+++ b/include/linux/irqchip/arm-gic.h
73217@@ -59,9 +59,11 @@
73218
73219 #ifndef __ASSEMBLY__
73220
73221+#include <linux/irq.h>
73222+
73223 struct device_node;
73224
73225-extern struct irq_chip gic_arch_extn;
73226+extern irq_chip_no_const gic_arch_extn;
73227
73228 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
73229 u32 offset, struct device_node *);
73230diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
73231index 6883e19..e854fcb 100644
73232--- a/include/linux/kallsyms.h
73233+++ b/include/linux/kallsyms.h
73234@@ -15,7 +15,8 @@
73235
73236 struct module;
73237
73238-#ifdef CONFIG_KALLSYMS
73239+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
73240+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73241 /* Lookup the address for a symbol. Returns 0 if not found. */
73242 unsigned long kallsyms_lookup_name(const char *name);
73243
73244@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
73245 /* Stupid that this does nothing, but I didn't create this mess. */
73246 #define __print_symbol(fmt, addr)
73247 #endif /*CONFIG_KALLSYMS*/
73248+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
73249+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
73250+extern unsigned long kallsyms_lookup_name(const char *name);
73251+extern void __print_symbol(const char *fmt, unsigned long address);
73252+extern int sprint_backtrace(char *buffer, unsigned long address);
73253+extern int sprint_symbol(char *buffer, unsigned long address);
73254+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
73255+const char *kallsyms_lookup(unsigned long addr,
73256+ unsigned long *symbolsize,
73257+ unsigned long *offset,
73258+ char **modname, char *namebuf);
73259+extern int kallsyms_lookup_size_offset(unsigned long addr,
73260+ unsigned long *symbolsize,
73261+ unsigned long *offset);
73262+#endif
73263
73264 /* This macro allows us to keep printk typechecking */
73265 static __printf(1, 2)
73266diff --git a/include/linux/key-type.h b/include/linux/key-type.h
73267index 518a53a..5e28358 100644
73268--- a/include/linux/key-type.h
73269+++ b/include/linux/key-type.h
73270@@ -125,7 +125,7 @@ struct key_type {
73271 /* internal fields */
73272 struct list_head link; /* link in types list */
73273 struct lock_class_key lock_class; /* key->sem lock class */
73274-};
73275+} __do_const;
73276
73277 extern struct key_type key_type_keyring;
73278
73279diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
73280index c6e091b..a940adf 100644
73281--- a/include/linux/kgdb.h
73282+++ b/include/linux/kgdb.h
73283@@ -52,7 +52,7 @@ extern int kgdb_connected;
73284 extern int kgdb_io_module_registered;
73285
73286 extern atomic_t kgdb_setting_breakpoint;
73287-extern atomic_t kgdb_cpu_doing_single_step;
73288+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
73289
73290 extern struct task_struct *kgdb_usethread;
73291 extern struct task_struct *kgdb_contthread;
73292@@ -254,7 +254,7 @@ struct kgdb_arch {
73293 void (*correct_hw_break)(void);
73294
73295 void (*enable_nmi)(bool on);
73296-};
73297+} __do_const;
73298
73299 /**
73300 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
73301@@ -279,7 +279,7 @@ struct kgdb_io {
73302 void (*pre_exception) (void);
73303 void (*post_exception) (void);
73304 int is_console;
73305-};
73306+} __do_const;
73307
73308 extern struct kgdb_arch arch_kgdb_ops;
73309
73310diff --git a/include/linux/kmod.h b/include/linux/kmod.h
73311index 0555cc6..b16a7a4 100644
73312--- a/include/linux/kmod.h
73313+++ b/include/linux/kmod.h
73314@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
73315 * usually useless though. */
73316 extern __printf(2, 3)
73317 int __request_module(bool wait, const char *name, ...);
73318+extern __printf(3, 4)
73319+int ___request_module(bool wait, char *param_name, const char *name, ...);
73320 #define request_module(mod...) __request_module(true, mod)
73321 #define request_module_nowait(mod...) __request_module(false, mod)
73322 #define try_then_request_module(x, mod...) \
73323diff --git a/include/linux/kobject.h b/include/linux/kobject.h
73324index 939b112..ed6ed51 100644
73325--- a/include/linux/kobject.h
73326+++ b/include/linux/kobject.h
73327@@ -111,7 +111,7 @@ struct kobj_type {
73328 struct attribute **default_attrs;
73329 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
73330 const void *(*namespace)(struct kobject *kobj);
73331-};
73332+} __do_const;
73333
73334 struct kobj_uevent_env {
73335 char *envp[UEVENT_NUM_ENVP];
73336@@ -134,6 +134,7 @@ struct kobj_attribute {
73337 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
73338 const char *buf, size_t count);
73339 };
73340+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
73341
73342 extern const struct sysfs_ops kobj_sysfs_ops;
73343
73344diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
73345index f66b065..c2c29b4 100644
73346--- a/include/linux/kobject_ns.h
73347+++ b/include/linux/kobject_ns.h
73348@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
73349 const void *(*netlink_ns)(struct sock *sk);
73350 const void *(*initial_ns)(void);
73351 void (*drop_ns)(void *);
73352-};
73353+} __do_const;
73354
73355 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
73356 int kobj_ns_type_registered(enum kobj_ns_type type);
73357diff --git a/include/linux/kref.h b/include/linux/kref.h
73358index 484604d..0f6c5b6 100644
73359--- a/include/linux/kref.h
73360+++ b/include/linux/kref.h
73361@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
73362 static inline int kref_sub(struct kref *kref, unsigned int count,
73363 void (*release)(struct kref *kref))
73364 {
73365- WARN_ON(release == NULL);
73366+ BUG_ON(release == NULL);
73367
73368 if (atomic_sub_and_test((int) count, &kref->refcount)) {
73369 release(kref);
73370diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
73371index 8db53cf..c21121d 100644
73372--- a/include/linux/kvm_host.h
73373+++ b/include/linux/kvm_host.h
73374@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
73375 {
73376 }
73377 #endif
73378-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
73379+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
73380 struct module *module);
73381 void kvm_exit(void);
73382
73383@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
73384 struct kvm_guest_debug *dbg);
73385 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
73386
73387-int kvm_arch_init(void *opaque);
73388+int kvm_arch_init(const void *opaque);
73389 void kvm_arch_exit(void);
73390
73391 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
73392diff --git a/include/linux/libata.h b/include/linux/libata.h
73393index eae7a05..2cdd875 100644
73394--- a/include/linux/libata.h
73395+++ b/include/linux/libata.h
73396@@ -919,7 +919,7 @@ struct ata_port_operations {
73397 * fields must be pointers.
73398 */
73399 const struct ata_port_operations *inherits;
73400-};
73401+} __do_const;
73402
73403 struct ata_port_info {
73404 unsigned long flags;
73405diff --git a/include/linux/list.h b/include/linux/list.h
73406index b83e565..baa6c1d 100644
73407--- a/include/linux/list.h
73408+++ b/include/linux/list.h
73409@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
73410 extern void list_del(struct list_head *entry);
73411 #endif
73412
73413+extern void __pax_list_add(struct list_head *new,
73414+ struct list_head *prev,
73415+ struct list_head *next);
73416+static inline void pax_list_add(struct list_head *new, struct list_head *head)
73417+{
73418+ __pax_list_add(new, head, head->next);
73419+}
73420+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
73421+{
73422+ __pax_list_add(new, head->prev, head);
73423+}
73424+extern void pax_list_del(struct list_head *entry);
73425+
73426 /**
73427 * list_replace - replace old entry by new one
73428 * @old : the element to be replaced
73429@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
73430 INIT_LIST_HEAD(entry);
73431 }
73432
73433+extern void pax_list_del_init(struct list_head *entry);
73434+
73435 /**
73436 * list_move - delete from one list and add as another's head
73437 * @list: the entry to move
73438diff --git a/include/linux/math64.h b/include/linux/math64.h
73439index 2913b86..8dcbb1e 100644
73440--- a/include/linux/math64.h
73441+++ b/include/linux/math64.h
73442@@ -15,7 +15,7 @@
73443 * This is commonly provided by 32bit archs to provide an optimized 64bit
73444 * divide.
73445 */
73446-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73447+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73448 {
73449 *remainder = dividend % divisor;
73450 return dividend / divisor;
73451@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
73452 /**
73453 * div64_u64 - unsigned 64bit divide with 64bit divisor
73454 */
73455-static inline u64 div64_u64(u64 dividend, u64 divisor)
73456+static inline u64 __intentional_overflow(0) div64_u64(u64 dividend, u64 divisor)
73457 {
73458 return dividend / divisor;
73459 }
73460@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
73461 #define div64_ul(x, y) div_u64((x), (y))
73462
73463 #ifndef div_u64_rem
73464-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73465+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73466 {
73467 *remainder = do_div(dividend, divisor);
73468 return dividend;
73469@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
73470 * divide.
73471 */
73472 #ifndef div_u64
73473-static inline u64 div_u64(u64 dividend, u32 divisor)
73474+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
73475 {
73476 u32 remainder;
73477 return div_u64_rem(dividend, divisor, &remainder);
73478diff --git a/include/linux/mm.h b/include/linux/mm.h
73479index e0c8528..bcf0c29 100644
73480--- a/include/linux/mm.h
73481+++ b/include/linux/mm.h
73482@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
73483 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
73484 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
73485 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
73486+
73487+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
73488+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
73489+#endif
73490+
73491 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
73492
73493 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
73494@@ -205,8 +210,8 @@ struct vm_operations_struct {
73495 /* called by access_process_vm when get_user_pages() fails, typically
73496 * for use by special VMAs that can switch between memory and hardware
73497 */
73498- int (*access)(struct vm_area_struct *vma, unsigned long addr,
73499- void *buf, int len, int write);
73500+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
73501+ void *buf, size_t len, int write);
73502 #ifdef CONFIG_NUMA
73503 /*
73504 * set_policy() op must add a reference to any non-NULL @new mempolicy
73505@@ -236,6 +241,7 @@ struct vm_operations_struct {
73506 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
73507 unsigned long size, pgoff_t pgoff);
73508 };
73509+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
73510
73511 struct mmu_gather;
73512 struct inode;
73513@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
73514 unsigned long *pfn);
73515 int follow_phys(struct vm_area_struct *vma, unsigned long address,
73516 unsigned int flags, unsigned long *prot, resource_size_t *phys);
73517-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
73518- void *buf, int len, int write);
73519+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
73520+ void *buf, size_t len, int write);
73521
73522 static inline void unmap_shared_mapping_range(struct address_space *mapping,
73523 loff_t const holebegin, loff_t const holelen)
73524@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
73525 }
73526 #endif
73527
73528-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
73529-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
73530- void *buf, int len, int write);
73531+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
73532+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
73533+ void *buf, size_t len, int write);
73534
73535 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
73536 unsigned long start, unsigned long nr_pages,
73537@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
73538 int set_page_dirty_lock(struct page *page);
73539 int clear_page_dirty_for_io(struct page *page);
73540
73541-/* Is the vma a continuation of the stack vma above it? */
73542-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
73543-{
73544- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
73545-}
73546-
73547-static inline int stack_guard_page_start(struct vm_area_struct *vma,
73548- unsigned long addr)
73549-{
73550- return (vma->vm_flags & VM_GROWSDOWN) &&
73551- (vma->vm_start == addr) &&
73552- !vma_growsdown(vma->vm_prev, addr);
73553-}
73554-
73555-/* Is the vma a continuation of the stack vma below it? */
73556-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
73557-{
73558- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
73559-}
73560-
73561-static inline int stack_guard_page_end(struct vm_area_struct *vma,
73562- unsigned long addr)
73563-{
73564- return (vma->vm_flags & VM_GROWSUP) &&
73565- (vma->vm_end == addr) &&
73566- !vma_growsup(vma->vm_next, addr);
73567-}
73568-
73569 extern pid_t
73570 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
73571
73572@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
73573 }
73574 #endif
73575
73576+#ifdef CONFIG_MMU
73577+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
73578+#else
73579+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
73580+{
73581+ return __pgprot(0);
73582+}
73583+#endif
73584+
73585 int vma_wants_writenotify(struct vm_area_struct *vma);
73586
73587 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
73588@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
73589 {
73590 return 0;
73591 }
73592+
73593+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
73594+ unsigned long address)
73595+{
73596+ return 0;
73597+}
73598 #else
73599 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
73600+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
73601 #endif
73602
73603 #ifdef __PAGETABLE_PMD_FOLDED
73604@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
73605 {
73606 return 0;
73607 }
73608+
73609+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
73610+ unsigned long address)
73611+{
73612+ return 0;
73613+}
73614 #else
73615 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
73616+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
73617 #endif
73618
73619 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
73620@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
73621 NULL: pud_offset(pgd, address);
73622 }
73623
73624+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
73625+{
73626+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
73627+ NULL: pud_offset(pgd, address);
73628+}
73629+
73630 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
73631 {
73632 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
73633 NULL: pmd_offset(pud, address);
73634 }
73635+
73636+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
73637+{
73638+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
73639+ NULL: pmd_offset(pud, address);
73640+}
73641 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
73642
73643 #if USE_SPLIT_PTLOCKS
73644@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73645 unsigned long len, unsigned long prot, unsigned long flags,
73646 unsigned long pgoff, unsigned long *populate);
73647 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
73648+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
73649
73650 #ifdef CONFIG_MMU
73651 extern int __mm_populate(unsigned long addr, unsigned long len,
73652@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
73653 unsigned long high_limit;
73654 unsigned long align_mask;
73655 unsigned long align_offset;
73656+ unsigned long threadstack_offset;
73657 };
73658
73659-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
73660-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
73661+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
73662+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
73663
73664 /*
73665 * Search for an unmapped address range.
73666@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
73667 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
73668 */
73669 static inline unsigned long
73670-vm_unmapped_area(struct vm_unmapped_area_info *info)
73671+vm_unmapped_area(const struct vm_unmapped_area_info *info)
73672 {
73673 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
73674 return unmapped_area(info);
73675@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
73676 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
73677 struct vm_area_struct **pprev);
73678
73679+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
73680+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
73681+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
73682+
73683 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
73684 NULL if none. Assume start_addr < end_addr. */
73685 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
73686@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
73687 return vma;
73688 }
73689
73690-#ifdef CONFIG_MMU
73691-pgprot_t vm_get_page_prot(unsigned long vm_flags);
73692-#else
73693-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
73694-{
73695- return __pgprot(0);
73696-}
73697-#endif
73698-
73699 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
73700 unsigned long change_prot_numa(struct vm_area_struct *vma,
73701 unsigned long start, unsigned long end);
73702@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
73703 static inline void vm_stat_account(struct mm_struct *mm,
73704 unsigned long flags, struct file *file, long pages)
73705 {
73706+
73707+#ifdef CONFIG_PAX_RANDMMAP
73708+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
73709+#endif
73710+
73711 mm->total_vm += pages;
73712 }
73713 #endif /* CONFIG_PROC_FS */
73714@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
73715 extern int sysctl_memory_failure_early_kill;
73716 extern int sysctl_memory_failure_recovery;
73717 extern void shake_page(struct page *p, int access);
73718-extern atomic_long_t num_poisoned_pages;
73719+extern atomic_long_unchecked_t num_poisoned_pages;
73720 extern int soft_offline_page(struct page *page, int flags);
73721
73722 extern void dump_page(struct page *page);
73723@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
73724 static inline void setup_nr_node_ids(void) {}
73725 #endif
73726
73727+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73728+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
73729+#else
73730+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
73731+#endif
73732+
73733 #endif /* __KERNEL__ */
73734 #endif /* _LINUX_MM_H */
73735diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
73736index ace9a5f..81bdb59 100644
73737--- a/include/linux/mm_types.h
73738+++ b/include/linux/mm_types.h
73739@@ -289,6 +289,8 @@ struct vm_area_struct {
73740 #ifdef CONFIG_NUMA
73741 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
73742 #endif
73743+
73744+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
73745 };
73746
73747 struct core_thread {
73748@@ -437,6 +439,24 @@ struct mm_struct {
73749 int first_nid;
73750 #endif
73751 struct uprobes_state uprobes_state;
73752+
73753+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73754+ unsigned long pax_flags;
73755+#endif
73756+
73757+#ifdef CONFIG_PAX_DLRESOLVE
73758+ unsigned long call_dl_resolve;
73759+#endif
73760+
73761+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
73762+ unsigned long call_syscall;
73763+#endif
73764+
73765+#ifdef CONFIG_PAX_ASLR
73766+ unsigned long delta_mmap; /* randomized offset */
73767+ unsigned long delta_stack; /* randomized offset */
73768+#endif
73769+
73770 };
73771
73772 /* first nid will either be a valid NID or one of these values */
73773diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
73774index c5d5278..f0b68c8 100644
73775--- a/include/linux/mmiotrace.h
73776+++ b/include/linux/mmiotrace.h
73777@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
73778 /* Called from ioremap.c */
73779 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
73780 void __iomem *addr);
73781-extern void mmiotrace_iounmap(volatile void __iomem *addr);
73782+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
73783
73784 /* For anyone to insert markers. Remember trailing newline. */
73785 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
73786@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
73787 {
73788 }
73789
73790-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
73791+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
73792 {
73793 }
73794
73795diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
73796index 5c76737..61f518e 100644
73797--- a/include/linux/mmzone.h
73798+++ b/include/linux/mmzone.h
73799@@ -396,7 +396,7 @@ struct zone {
73800 unsigned long flags; /* zone flags, see below */
73801
73802 /* Zone statistics */
73803- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73804+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73805
73806 /*
73807 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
73808diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
73809index b508016..237cfe5 100644
73810--- a/include/linux/mod_devicetable.h
73811+++ b/include/linux/mod_devicetable.h
73812@@ -13,7 +13,7 @@
73813 typedef unsigned long kernel_ulong_t;
73814 #endif
73815
73816-#define PCI_ANY_ID (~0)
73817+#define PCI_ANY_ID ((__u16)~0)
73818
73819 struct pci_device_id {
73820 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
73821@@ -139,7 +139,7 @@ struct usb_device_id {
73822 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
73823 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
73824
73825-#define HID_ANY_ID (~0)
73826+#define HID_ANY_ID (~0U)
73827 #define HID_BUS_ANY 0xffff
73828 #define HID_GROUP_ANY 0x0000
73829
73830@@ -465,7 +465,7 @@ struct dmi_system_id {
73831 const char *ident;
73832 struct dmi_strmatch matches[4];
73833 void *driver_data;
73834-};
73835+} __do_const;
73836 /*
73837 * struct dmi_device_id appears during expansion of
73838 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
73839diff --git a/include/linux/module.h b/include/linux/module.h
73840index 46f1ea0..a34ca37 100644
73841--- a/include/linux/module.h
73842+++ b/include/linux/module.h
73843@@ -17,9 +17,11 @@
73844 #include <linux/moduleparam.h>
73845 #include <linux/tracepoint.h>
73846 #include <linux/export.h>
73847+#include <linux/fs.h>
73848
73849 #include <linux/percpu.h>
73850 #include <asm/module.h>
73851+#include <asm/pgtable.h>
73852
73853 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
73854 #define MODULE_SIG_STRING "~Module signature appended~\n"
73855@@ -54,12 +56,13 @@ struct module_attribute {
73856 int (*test)(struct module *);
73857 void (*free)(struct module *);
73858 };
73859+typedef struct module_attribute __no_const module_attribute_no_const;
73860
73861 struct module_version_attribute {
73862 struct module_attribute mattr;
73863 const char *module_name;
73864 const char *version;
73865-} __attribute__ ((__aligned__(sizeof(void *))));
73866+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
73867
73868 extern ssize_t __modver_version_show(struct module_attribute *,
73869 struct module_kobject *, char *);
73870@@ -232,7 +235,7 @@ struct module
73871
73872 /* Sysfs stuff. */
73873 struct module_kobject mkobj;
73874- struct module_attribute *modinfo_attrs;
73875+ module_attribute_no_const *modinfo_attrs;
73876 const char *version;
73877 const char *srcversion;
73878 struct kobject *holders_dir;
73879@@ -281,19 +284,16 @@ struct module
73880 int (*init)(void);
73881
73882 /* If this is non-NULL, vfree after init() returns */
73883- void *module_init;
73884+ void *module_init_rx, *module_init_rw;
73885
73886 /* Here is the actual code + data, vfree'd on unload. */
73887- void *module_core;
73888+ void *module_core_rx, *module_core_rw;
73889
73890 /* Here are the sizes of the init and core sections */
73891- unsigned int init_size, core_size;
73892+ unsigned int init_size_rw, core_size_rw;
73893
73894 /* The size of the executable code in each section. */
73895- unsigned int init_text_size, core_text_size;
73896-
73897- /* Size of RO sections of the module (text+rodata) */
73898- unsigned int init_ro_size, core_ro_size;
73899+ unsigned int init_size_rx, core_size_rx;
73900
73901 /* Arch-specific module values */
73902 struct mod_arch_specific arch;
73903@@ -349,6 +349,10 @@ struct module
73904 #ifdef CONFIG_EVENT_TRACING
73905 struct ftrace_event_call **trace_events;
73906 unsigned int num_trace_events;
73907+ struct file_operations trace_id;
73908+ struct file_operations trace_enable;
73909+ struct file_operations trace_format;
73910+ struct file_operations trace_filter;
73911 #endif
73912 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
73913 unsigned int num_ftrace_callsites;
73914@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
73915 bool is_module_percpu_address(unsigned long addr);
73916 bool is_module_text_address(unsigned long addr);
73917
73918+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
73919+{
73920+
73921+#ifdef CONFIG_PAX_KERNEXEC
73922+ if (ktla_ktva(addr) >= (unsigned long)start &&
73923+ ktla_ktva(addr) < (unsigned long)start + size)
73924+ return 1;
73925+#endif
73926+
73927+ return ((void *)addr >= start && (void *)addr < start + size);
73928+}
73929+
73930+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
73931+{
73932+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
73933+}
73934+
73935+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
73936+{
73937+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
73938+}
73939+
73940+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
73941+{
73942+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
73943+}
73944+
73945+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
73946+{
73947+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
73948+}
73949+
73950 static inline int within_module_core(unsigned long addr, const struct module *mod)
73951 {
73952- return (unsigned long)mod->module_core <= addr &&
73953- addr < (unsigned long)mod->module_core + mod->core_size;
73954+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
73955 }
73956
73957 static inline int within_module_init(unsigned long addr, const struct module *mod)
73958 {
73959- return (unsigned long)mod->module_init <= addr &&
73960- addr < (unsigned long)mod->module_init + mod->init_size;
73961+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
73962 }
73963
73964 /* Search for module by name: must hold module_mutex. */
73965diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
73966index 560ca53..ef621ef 100644
73967--- a/include/linux/moduleloader.h
73968+++ b/include/linux/moduleloader.h
73969@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
73970 sections. Returns NULL on failure. */
73971 void *module_alloc(unsigned long size);
73972
73973+#ifdef CONFIG_PAX_KERNEXEC
73974+void *module_alloc_exec(unsigned long size);
73975+#else
73976+#define module_alloc_exec(x) module_alloc(x)
73977+#endif
73978+
73979 /* Free memory returned from module_alloc. */
73980 void module_free(struct module *mod, void *module_region);
73981
73982+#ifdef CONFIG_PAX_KERNEXEC
73983+void module_free_exec(struct module *mod, void *module_region);
73984+#else
73985+#define module_free_exec(x, y) module_free((x), (y))
73986+#endif
73987+
73988 /*
73989 * Apply the given relocation to the (simplified) ELF. Return -error
73990 * or 0.
73991@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
73992 unsigned int relsec,
73993 struct module *me)
73994 {
73995+#ifdef CONFIG_MODULES
73996 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
73997+#endif
73998 return -ENOEXEC;
73999 }
74000 #endif
74001@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
74002 unsigned int relsec,
74003 struct module *me)
74004 {
74005+#ifdef CONFIG_MODULES
74006 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
74007+#endif
74008 return -ENOEXEC;
74009 }
74010 #endif
74011diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
74012index 137b419..fe663ec 100644
74013--- a/include/linux/moduleparam.h
74014+++ b/include/linux/moduleparam.h
74015@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
74016 * @len is usually just sizeof(string).
74017 */
74018 #define module_param_string(name, string, len, perm) \
74019- static const struct kparam_string __param_string_##name \
74020+ static const struct kparam_string __param_string_##name __used \
74021 = { len, string }; \
74022 __module_param_call(MODULE_PARAM_PREFIX, name, \
74023 &param_ops_string, \
74024@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
74025 */
74026 #define module_param_array_named(name, array, type, nump, perm) \
74027 param_check_##type(name, &(array)[0]); \
74028- static const struct kparam_array __param_arr_##name \
74029+ static const struct kparam_array __param_arr_##name __used \
74030 = { .max = ARRAY_SIZE(array), .num = nump, \
74031 .ops = &param_ops_##type, \
74032 .elemsize = sizeof(array[0]), .elem = array }; \
74033diff --git a/include/linux/namei.h b/include/linux/namei.h
74034index 5a5ff57..5ae5070 100644
74035--- a/include/linux/namei.h
74036+++ b/include/linux/namei.h
74037@@ -19,7 +19,7 @@ struct nameidata {
74038 unsigned seq;
74039 int last_type;
74040 unsigned depth;
74041- char *saved_names[MAX_NESTED_LINKS + 1];
74042+ const char *saved_names[MAX_NESTED_LINKS + 1];
74043 };
74044
74045 /*
74046@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
74047
74048 extern void nd_jump_link(struct nameidata *nd, struct path *path);
74049
74050-static inline void nd_set_link(struct nameidata *nd, char *path)
74051+static inline void nd_set_link(struct nameidata *nd, const char *path)
74052 {
74053 nd->saved_names[nd->depth] = path;
74054 }
74055
74056-static inline char *nd_get_link(struct nameidata *nd)
74057+static inline const char *nd_get_link(const struct nameidata *nd)
74058 {
74059 return nd->saved_names[nd->depth];
74060 }
74061diff --git a/include/linux/net.h b/include/linux/net.h
74062index 99c9f0c..e1cf296 100644
74063--- a/include/linux/net.h
74064+++ b/include/linux/net.h
74065@@ -183,7 +183,7 @@ struct net_proto_family {
74066 int (*create)(struct net *net, struct socket *sock,
74067 int protocol, int kern);
74068 struct module *owner;
74069-};
74070+} __do_const;
74071
74072 struct iovec;
74073 struct kvec;
74074diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
74075index 96e4c21..9cc8278 100644
74076--- a/include/linux/netdevice.h
74077+++ b/include/linux/netdevice.h
74078@@ -1026,6 +1026,7 @@ struct net_device_ops {
74079 int (*ndo_change_carrier)(struct net_device *dev,
74080 bool new_carrier);
74081 };
74082+typedef struct net_device_ops __no_const net_device_ops_no_const;
74083
74084 /*
74085 * The DEVICE structure.
74086@@ -1094,7 +1095,7 @@ struct net_device {
74087 int iflink;
74088
74089 struct net_device_stats stats;
74090- atomic_long_t rx_dropped; /* dropped packets by core network
74091+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
74092 * Do not use this in drivers.
74093 */
74094
74095diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
74096index 0060fde..481c6ae 100644
74097--- a/include/linux/netfilter.h
74098+++ b/include/linux/netfilter.h
74099@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
74100 #endif
74101 /* Use the module struct to lock set/get code in place */
74102 struct module *owner;
74103-};
74104+} __do_const;
74105
74106 /* Function to register/unregister hook points. */
74107 int nf_register_hook(struct nf_hook_ops *reg);
74108diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
74109index d80e275..c3510b8 100644
74110--- a/include/linux/netfilter/ipset/ip_set.h
74111+++ b/include/linux/netfilter/ipset/ip_set.h
74112@@ -124,7 +124,7 @@ struct ip_set_type_variant {
74113 /* Return true if "b" set is the same as "a"
74114 * according to the create set parameters */
74115 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
74116-};
74117+} __do_const;
74118
74119 /* The core set type structure */
74120 struct ip_set_type {
74121diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
74122index cadb740..d7c37c0 100644
74123--- a/include/linux/netfilter/nfnetlink.h
74124+++ b/include/linux/netfilter/nfnetlink.h
74125@@ -16,7 +16,7 @@ struct nfnl_callback {
74126 const struct nlattr * const cda[]);
74127 const struct nla_policy *policy; /* netlink attribute policy */
74128 const u_int16_t attr_count; /* number of nlattr's */
74129-};
74130+} __do_const;
74131
74132 struct nfnetlink_subsystem {
74133 const char *name;
74134diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
74135new file mode 100644
74136index 0000000..33f4af8
74137--- /dev/null
74138+++ b/include/linux/netfilter/xt_gradm.h
74139@@ -0,0 +1,9 @@
74140+#ifndef _LINUX_NETFILTER_XT_GRADM_H
74141+#define _LINUX_NETFILTER_XT_GRADM_H 1
74142+
74143+struct xt_gradm_mtinfo {
74144+ __u16 flags;
74145+ __u16 invflags;
74146+};
74147+
74148+#endif
74149diff --git a/include/linux/nls.h b/include/linux/nls.h
74150index 5dc635f..35f5e11 100644
74151--- a/include/linux/nls.h
74152+++ b/include/linux/nls.h
74153@@ -31,7 +31,7 @@ struct nls_table {
74154 const unsigned char *charset2upper;
74155 struct module *owner;
74156 struct nls_table *next;
74157-};
74158+} __do_const;
74159
74160 /* this value hold the maximum octet of charset */
74161 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
74162diff --git a/include/linux/notifier.h b/include/linux/notifier.h
74163index d14a4c3..a078786 100644
74164--- a/include/linux/notifier.h
74165+++ b/include/linux/notifier.h
74166@@ -54,7 +54,8 @@ struct notifier_block {
74167 notifier_fn_t notifier_call;
74168 struct notifier_block __rcu *next;
74169 int priority;
74170-};
74171+} __do_const;
74172+typedef struct notifier_block __no_const notifier_block_no_const;
74173
74174 struct atomic_notifier_head {
74175 spinlock_t lock;
74176diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
74177index a4c5624..79d6d88 100644
74178--- a/include/linux/oprofile.h
74179+++ b/include/linux/oprofile.h
74180@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
74181 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
74182 char const * name, ulong * val);
74183
74184-/** Create a file for read-only access to an atomic_t. */
74185+/** Create a file for read-only access to an atomic_unchecked_t. */
74186 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
74187- char const * name, atomic_t * val);
74188+ char const * name, atomic_unchecked_t * val);
74189
74190 /** create a directory */
74191 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
74192diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
74193index 8db71dc..a76bf2c 100644
74194--- a/include/linux/pci_hotplug.h
74195+++ b/include/linux/pci_hotplug.h
74196@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
74197 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
74198 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
74199 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
74200-};
74201+} __do_const;
74202+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
74203
74204 /**
74205 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
74206diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
74207index c5b6dbf..b124155 100644
74208--- a/include/linux/perf_event.h
74209+++ b/include/linux/perf_event.h
74210@@ -318,8 +318,8 @@ struct perf_event {
74211
74212 enum perf_event_active_state state;
74213 unsigned int attach_state;
74214- local64_t count;
74215- atomic64_t child_count;
74216+ local64_t count; /* PaX: fix it one day */
74217+ atomic64_unchecked_t child_count;
74218
74219 /*
74220 * These are the total time in nanoseconds that the event
74221@@ -370,8 +370,8 @@ struct perf_event {
74222 * These accumulate total time (in nanoseconds) that children
74223 * events have been enabled and running, respectively.
74224 */
74225- atomic64_t child_total_time_enabled;
74226- atomic64_t child_total_time_running;
74227+ atomic64_unchecked_t child_total_time_enabled;
74228+ atomic64_unchecked_t child_total_time_running;
74229
74230 /*
74231 * Protect attach/detach and child_list:
74232@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
74233 entry->ip[entry->nr++] = ip;
74234 }
74235
74236-extern int sysctl_perf_event_paranoid;
74237+extern int sysctl_perf_event_legitimately_concerned;
74238 extern int sysctl_perf_event_mlock;
74239 extern int sysctl_perf_event_sample_rate;
74240
74241@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
74242 void __user *buffer, size_t *lenp,
74243 loff_t *ppos);
74244
74245+static inline bool perf_paranoid_any(void)
74246+{
74247+ return sysctl_perf_event_legitimately_concerned > 2;
74248+}
74249+
74250 static inline bool perf_paranoid_tracepoint_raw(void)
74251 {
74252- return sysctl_perf_event_paranoid > -1;
74253+ return sysctl_perf_event_legitimately_concerned > -1;
74254 }
74255
74256 static inline bool perf_paranoid_cpu(void)
74257 {
74258- return sysctl_perf_event_paranoid > 0;
74259+ return sysctl_perf_event_legitimately_concerned > 0;
74260 }
74261
74262 static inline bool perf_paranoid_kernel(void)
74263 {
74264- return sysctl_perf_event_paranoid > 1;
74265+ return sysctl_perf_event_legitimately_concerned > 1;
74266 }
74267
74268 extern void perf_event_init(void);
74269@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
74270 */
74271 #define perf_cpu_notifier(fn) \
74272 do { \
74273- static struct notifier_block fn##_nb __cpuinitdata = \
74274+ static struct notifier_block fn##_nb = \
74275 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
74276 unsigned long cpu = smp_processor_id(); \
74277 unsigned long flags; \
74278@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
74279 struct device_attribute attr;
74280 u64 id;
74281 const char *event_str;
74282-};
74283+} __do_const;
74284
74285 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
74286 static struct perf_pmu_events_attr _var = { \
74287diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
74288index b8809fe..ae4ccd0 100644
74289--- a/include/linux/pipe_fs_i.h
74290+++ b/include/linux/pipe_fs_i.h
74291@@ -47,10 +47,10 @@ struct pipe_inode_info {
74292 struct mutex mutex;
74293 wait_queue_head_t wait;
74294 unsigned int nrbufs, curbuf, buffers;
74295- unsigned int readers;
74296- unsigned int writers;
74297- unsigned int files;
74298- unsigned int waiting_writers;
74299+ atomic_t readers;
74300+ atomic_t writers;
74301+ atomic_t files;
74302+ atomic_t waiting_writers;
74303 unsigned int r_counter;
74304 unsigned int w_counter;
74305 struct page *tmp_page;
74306diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
74307index 5f28cae..3d23723 100644
74308--- a/include/linux/platform_data/usb-ehci-s5p.h
74309+++ b/include/linux/platform_data/usb-ehci-s5p.h
74310@@ -14,7 +14,7 @@
74311 struct s5p_ehci_platdata {
74312 int (*phy_init)(struct platform_device *pdev, int type);
74313 int (*phy_exit)(struct platform_device *pdev, int type);
74314-};
74315+} __no_const;
74316
74317 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
74318
74319diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
74320index c256c59..8ea94c7 100644
74321--- a/include/linux/platform_data/usb-ohci-exynos.h
74322+++ b/include/linux/platform_data/usb-ohci-exynos.h
74323@@ -14,7 +14,7 @@
74324 struct exynos4_ohci_platdata {
74325 int (*phy_init)(struct platform_device *pdev, int type);
74326 int (*phy_exit)(struct platform_device *pdev, int type);
74327-};
74328+} __no_const;
74329
74330 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
74331
74332diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
74333index 7c1d252..c5c773e 100644
74334--- a/include/linux/pm_domain.h
74335+++ b/include/linux/pm_domain.h
74336@@ -48,7 +48,7 @@ struct gpd_dev_ops {
74337
74338 struct gpd_cpu_data {
74339 unsigned int saved_exit_latency;
74340- struct cpuidle_state *idle_state;
74341+ cpuidle_state_no_const *idle_state;
74342 };
74343
74344 struct generic_pm_domain {
74345diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
74346index 7d7e09e..8671ef8 100644
74347--- a/include/linux/pm_runtime.h
74348+++ b/include/linux/pm_runtime.h
74349@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
74350
74351 static inline void pm_runtime_mark_last_busy(struct device *dev)
74352 {
74353- ACCESS_ONCE(dev->power.last_busy) = jiffies;
74354+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
74355 }
74356
74357 #else /* !CONFIG_PM_RUNTIME */
74358diff --git a/include/linux/pnp.h b/include/linux/pnp.h
74359index 195aafc..49a7bc2 100644
74360--- a/include/linux/pnp.h
74361+++ b/include/linux/pnp.h
74362@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
74363 struct pnp_fixup {
74364 char id[7];
74365 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
74366-};
74367+} __do_const;
74368
74369 /* config parameters */
74370 #define PNP_CONFIG_NORMAL 0x0001
74371diff --git a/include/linux/poison.h b/include/linux/poison.h
74372index 2110a81..13a11bb 100644
74373--- a/include/linux/poison.h
74374+++ b/include/linux/poison.h
74375@@ -19,8 +19,8 @@
74376 * under normal circumstances, used to verify that nobody uses
74377 * non-initialized list entries.
74378 */
74379-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
74380-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
74381+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
74382+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
74383
74384 /********** include/linux/timer.h **********/
74385 /*
74386diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
74387index c0f44c2..1572583 100644
74388--- a/include/linux/power/smartreflex.h
74389+++ b/include/linux/power/smartreflex.h
74390@@ -238,7 +238,7 @@ struct omap_sr_class_data {
74391 int (*notify)(struct omap_sr *sr, u32 status);
74392 u8 notify_flags;
74393 u8 class_type;
74394-};
74395+} __do_const;
74396
74397 /**
74398 * struct omap_sr_nvalue_table - Smartreflex n-target value info
74399diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
74400index 4ea1d37..80f4b33 100644
74401--- a/include/linux/ppp-comp.h
74402+++ b/include/linux/ppp-comp.h
74403@@ -84,7 +84,7 @@ struct compressor {
74404 struct module *owner;
74405 /* Extra skb space needed by the compressor algorithm */
74406 unsigned int comp_extra;
74407-};
74408+} __do_const;
74409
74410 /*
74411 * The return value from decompress routine is the length of the
74412diff --git a/include/linux/preempt.h b/include/linux/preempt.h
74413index f5d4723..a6ea2fa 100644
74414--- a/include/linux/preempt.h
74415+++ b/include/linux/preempt.h
74416@@ -18,8 +18,13 @@
74417 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
74418 #endif
74419
74420+#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
74421+#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
74422+
74423 #define inc_preempt_count() add_preempt_count(1)
74424+#define raw_inc_preempt_count() raw_add_preempt_count(1)
74425 #define dec_preempt_count() sub_preempt_count(1)
74426+#define raw_dec_preempt_count() raw_sub_preempt_count(1)
74427
74428 #define preempt_count() (current_thread_info()->preempt_count)
74429
74430@@ -64,6 +69,12 @@ do { \
74431 barrier(); \
74432 } while (0)
74433
74434+#define raw_preempt_disable() \
74435+do { \
74436+ raw_inc_preempt_count(); \
74437+ barrier(); \
74438+} while (0)
74439+
74440 #define sched_preempt_enable_no_resched() \
74441 do { \
74442 barrier(); \
74443@@ -72,6 +83,12 @@ do { \
74444
74445 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
74446
74447+#define raw_preempt_enable_no_resched() \
74448+do { \
74449+ barrier(); \
74450+ raw_dec_preempt_count(); \
74451+} while (0)
74452+
74453 #define preempt_enable() \
74454 do { \
74455 preempt_enable_no_resched(); \
74456@@ -116,8 +133,10 @@ do { \
74457 * region.
74458 */
74459 #define preempt_disable() barrier()
74460+#define raw_preempt_disable() barrier()
74461 #define sched_preempt_enable_no_resched() barrier()
74462 #define preempt_enable_no_resched() barrier()
74463+#define raw_preempt_enable_no_resched() barrier()
74464 #define preempt_enable() barrier()
74465
74466 #define preempt_disable_notrace() barrier()
74467diff --git a/include/linux/printk.h b/include/linux/printk.h
74468index 22c7052..ad3fa0a 100644
74469--- a/include/linux/printk.h
74470+++ b/include/linux/printk.h
74471@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
74472 void early_printk(const char *s, ...) { }
74473 #endif
74474
74475+extern int kptr_restrict;
74476+
74477 #ifdef CONFIG_PRINTK
74478 asmlinkage __printf(5, 0)
74479 int vprintk_emit(int facility, int level,
74480@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
74481
74482 extern int printk_delay_msec;
74483 extern int dmesg_restrict;
74484-extern int kptr_restrict;
74485
74486 extern void wake_up_klogd(void);
74487
74488diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
74489index 608e60a..c26f864 100644
74490--- a/include/linux/proc_fs.h
74491+++ b/include/linux/proc_fs.h
74492@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
74493 return proc_create_data(name, mode, parent, proc_fops, NULL);
74494 }
74495
74496+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
74497+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
74498+{
74499+#ifdef CONFIG_GRKERNSEC_PROC_USER
74500+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
74501+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74502+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
74503+#else
74504+ return proc_create_data(name, mode, parent, proc_fops, NULL);
74505+#endif
74506+}
74507+
74508+
74509 extern void proc_set_size(struct proc_dir_entry *, loff_t);
74510 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
74511 extern void *PDE_DATA(const struct inode *);
74512diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
74513index 34a1e10..03a6d03 100644
74514--- a/include/linux/proc_ns.h
74515+++ b/include/linux/proc_ns.h
74516@@ -14,7 +14,7 @@ struct proc_ns_operations {
74517 void (*put)(void *ns);
74518 int (*install)(struct nsproxy *nsproxy, void *ns);
74519 unsigned int (*inum)(void *ns);
74520-};
74521+} __do_const;
74522
74523 struct proc_ns {
74524 void *ns;
74525diff --git a/include/linux/random.h b/include/linux/random.h
74526index 3b9377d..61b506a 100644
74527--- a/include/linux/random.h
74528+++ b/include/linux/random.h
74529@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
74530 u32 prandom_u32_state(struct rnd_state *);
74531 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
74532
74533+static inline unsigned long pax_get_random_long(void)
74534+{
74535+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
74536+}
74537+
74538 /*
74539 * Handle minimum values for seeds
74540 */
74541diff --git a/include/linux/rculist.h b/include/linux/rculist.h
74542index f4b1001..8ddb2b6 100644
74543--- a/include/linux/rculist.h
74544+++ b/include/linux/rculist.h
74545@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
74546 struct list_head *prev, struct list_head *next);
74547 #endif
74548
74549+extern void __pax_list_add_rcu(struct list_head *new,
74550+ struct list_head *prev, struct list_head *next);
74551+
74552 /**
74553 * list_add_rcu - add a new entry to rcu-protected list
74554 * @new: new entry to be added
74555@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
74556 __list_add_rcu(new, head, head->next);
74557 }
74558
74559+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
74560+{
74561+ __pax_list_add_rcu(new, head, head->next);
74562+}
74563+
74564 /**
74565 * list_add_tail_rcu - add a new entry to rcu-protected list
74566 * @new: new entry to be added
74567@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
74568 __list_add_rcu(new, head->prev, head);
74569 }
74570
74571+static inline void pax_list_add_tail_rcu(struct list_head *new,
74572+ struct list_head *head)
74573+{
74574+ __pax_list_add_rcu(new, head->prev, head);
74575+}
74576+
74577 /**
74578 * list_del_rcu - deletes entry from list without re-initialization
74579 * @entry: the element to delete from the list.
74580@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
74581 entry->prev = LIST_POISON2;
74582 }
74583
74584+extern void pax_list_del_rcu(struct list_head *entry);
74585+
74586 /**
74587 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
74588 * @n: the element to delete from the hash list.
74589diff --git a/include/linux/reboot.h b/include/linux/reboot.h
74590index 23b3630..e1bc12b 100644
74591--- a/include/linux/reboot.h
74592+++ b/include/linux/reboot.h
74593@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
74594 * Architecture-specific implementations of sys_reboot commands.
74595 */
74596
74597-extern void machine_restart(char *cmd);
74598-extern void machine_halt(void);
74599-extern void machine_power_off(void);
74600+extern void machine_restart(char *cmd) __noreturn;
74601+extern void machine_halt(void) __noreturn;
74602+extern void machine_power_off(void) __noreturn;
74603
74604 extern void machine_shutdown(void);
74605 struct pt_regs;
74606@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
74607 */
74608
74609 extern void kernel_restart_prepare(char *cmd);
74610-extern void kernel_restart(char *cmd);
74611-extern void kernel_halt(void);
74612-extern void kernel_power_off(void);
74613+extern void kernel_restart(char *cmd) __noreturn;
74614+extern void kernel_halt(void) __noreturn;
74615+extern void kernel_power_off(void) __noreturn;
74616
74617 extern int C_A_D; /* for sysctl */
74618 void ctrl_alt_del(void);
74619@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
74620 * Emergency restart, callable from an interrupt handler.
74621 */
74622
74623-extern void emergency_restart(void);
74624+extern void emergency_restart(void) __noreturn;
74625 #include <asm/emergency-restart.h>
74626
74627 #endif /* _LINUX_REBOOT_H */
74628diff --git a/include/linux/regset.h b/include/linux/regset.h
74629index 8e0c9fe..ac4d221 100644
74630--- a/include/linux/regset.h
74631+++ b/include/linux/regset.h
74632@@ -161,7 +161,8 @@ struct user_regset {
74633 unsigned int align;
74634 unsigned int bias;
74635 unsigned int core_note_type;
74636-};
74637+} __do_const;
74638+typedef struct user_regset __no_const user_regset_no_const;
74639
74640 /**
74641 * struct user_regset_view - available regsets
74642diff --git a/include/linux/relay.h b/include/linux/relay.h
74643index d7c8359..818daf5 100644
74644--- a/include/linux/relay.h
74645+++ b/include/linux/relay.h
74646@@ -157,7 +157,7 @@ struct rchan_callbacks
74647 * The callback should return 0 if successful, negative if not.
74648 */
74649 int (*remove_buf_file)(struct dentry *dentry);
74650-};
74651+} __no_const;
74652
74653 /*
74654 * CONFIG_RELAY kernel API, kernel/relay.c
74655diff --git a/include/linux/rio.h b/include/linux/rio.h
74656index 18e0993..8ab5b21 100644
74657--- a/include/linux/rio.h
74658+++ b/include/linux/rio.h
74659@@ -345,7 +345,7 @@ struct rio_ops {
74660 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
74661 u64 rstart, u32 size, u32 flags);
74662 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
74663-};
74664+} __no_const;
74665
74666 #define RIO_RESOURCE_MEM 0x00000100
74667 #define RIO_RESOURCE_DOORBELL 0x00000200
74668diff --git a/include/linux/rmap.h b/include/linux/rmap.h
74669index 6dacb93..6174423 100644
74670--- a/include/linux/rmap.h
74671+++ b/include/linux/rmap.h
74672@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
74673 void anon_vma_init(void); /* create anon_vma_cachep */
74674 int anon_vma_prepare(struct vm_area_struct *);
74675 void unlink_anon_vmas(struct vm_area_struct *);
74676-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
74677-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
74678+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
74679+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
74680
74681 static inline void anon_vma_merge(struct vm_area_struct *vma,
74682 struct vm_area_struct *next)
74683diff --git a/include/linux/sched.h b/include/linux/sched.h
74684index 178a8d9..450bf11 100644
74685--- a/include/linux/sched.h
74686+++ b/include/linux/sched.h
74687@@ -62,6 +62,7 @@ struct bio_list;
74688 struct fs_struct;
74689 struct perf_event_context;
74690 struct blk_plug;
74691+struct linux_binprm;
74692
74693 /*
74694 * List of flags we want to share for kernel threads,
74695@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
74696 extern int in_sched_functions(unsigned long addr);
74697
74698 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
74699-extern signed long schedule_timeout(signed long timeout);
74700+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
74701 extern signed long schedule_timeout_interruptible(signed long timeout);
74702 extern signed long schedule_timeout_killable(signed long timeout);
74703 extern signed long schedule_timeout_uninterruptible(signed long timeout);
74704@@ -314,6 +315,19 @@ struct nsproxy;
74705 struct user_namespace;
74706
74707 #ifdef CONFIG_MMU
74708+
74709+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
74710+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
74711+#else
74712+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
74713+{
74714+ return 0;
74715+}
74716+#endif
74717+
74718+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
74719+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
74720+
74721 extern void arch_pick_mmap_layout(struct mm_struct *mm);
74722 extern unsigned long
74723 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
74724@@ -591,6 +605,17 @@ struct signal_struct {
74725 #ifdef CONFIG_TASKSTATS
74726 struct taskstats *stats;
74727 #endif
74728+
74729+#ifdef CONFIG_GRKERNSEC
74730+ u32 curr_ip;
74731+ u32 saved_ip;
74732+ u32 gr_saddr;
74733+ u32 gr_daddr;
74734+ u16 gr_sport;
74735+ u16 gr_dport;
74736+ u8 used_accept:1;
74737+#endif
74738+
74739 #ifdef CONFIG_AUDIT
74740 unsigned audit_tty;
74741 unsigned audit_tty_log_passwd;
74742@@ -671,6 +696,14 @@ struct user_struct {
74743 struct key *session_keyring; /* UID's default session keyring */
74744 #endif
74745
74746+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74747+ unsigned char kernel_banned;
74748+#endif
74749+#ifdef CONFIG_GRKERNSEC_BRUTE
74750+ unsigned char suid_banned;
74751+ unsigned long suid_ban_expires;
74752+#endif
74753+
74754 /* Hash table maintenance information */
74755 struct hlist_node uidhash_node;
74756 kuid_t uid;
74757@@ -1158,8 +1191,8 @@ struct task_struct {
74758 struct list_head thread_group;
74759
74760 struct completion *vfork_done; /* for vfork() */
74761- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
74762- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
74763+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
74764+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
74765
74766 cputime_t utime, stime, utimescaled, stimescaled;
74767 cputime_t gtime;
74768@@ -1184,11 +1217,6 @@ struct task_struct {
74769 struct task_cputime cputime_expires;
74770 struct list_head cpu_timers[3];
74771
74772-/* process credentials */
74773- const struct cred __rcu *real_cred; /* objective and real subjective task
74774- * credentials (COW) */
74775- const struct cred __rcu *cred; /* effective (overridable) subjective task
74776- * credentials (COW) */
74777 char comm[TASK_COMM_LEN]; /* executable name excluding path
74778 - access with [gs]et_task_comm (which lock
74779 it with task_lock())
74780@@ -1205,6 +1233,10 @@ struct task_struct {
74781 #endif
74782 /* CPU-specific state of this task */
74783 struct thread_struct thread;
74784+/* thread_info moved to task_struct */
74785+#ifdef CONFIG_X86
74786+ struct thread_info tinfo;
74787+#endif
74788 /* filesystem information */
74789 struct fs_struct *fs;
74790 /* open file information */
74791@@ -1278,6 +1310,10 @@ struct task_struct {
74792 gfp_t lockdep_reclaim_gfp;
74793 #endif
74794
74795+/* process credentials */
74796+ const struct cred __rcu *real_cred; /* objective and real subjective task
74797+ * credentials (COW) */
74798+
74799 /* journalling filesystem info */
74800 void *journal_info;
74801
74802@@ -1316,6 +1352,10 @@ struct task_struct {
74803 /* cg_list protected by css_set_lock and tsk->alloc_lock */
74804 struct list_head cg_list;
74805 #endif
74806+
74807+ const struct cred __rcu *cred; /* effective (overridable) subjective task
74808+ * credentials (COW) */
74809+
74810 #ifdef CONFIG_FUTEX
74811 struct robust_list_head __user *robust_list;
74812 #ifdef CONFIG_COMPAT
74813@@ -1416,8 +1456,76 @@ struct task_struct {
74814 unsigned int sequential_io;
74815 unsigned int sequential_io_avg;
74816 #endif
74817+
74818+#ifdef CONFIG_GRKERNSEC
74819+ /* grsecurity */
74820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74821+ u64 exec_id;
74822+#endif
74823+#ifdef CONFIG_GRKERNSEC_SETXID
74824+ const struct cred *delayed_cred;
74825+#endif
74826+ struct dentry *gr_chroot_dentry;
74827+ struct acl_subject_label *acl;
74828+ struct acl_role_label *role;
74829+ struct file *exec_file;
74830+ unsigned long brute_expires;
74831+ u16 acl_role_id;
74832+ /* is this the task that authenticated to the special role */
74833+ u8 acl_sp_role;
74834+ u8 is_writable;
74835+ u8 brute;
74836+ u8 gr_is_chrooted;
74837+#endif
74838+
74839 };
74840
74841+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
74842+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
74843+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
74844+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
74845+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
74846+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
74847+
74848+#ifdef CONFIG_PAX_SOFTMODE
74849+extern int pax_softmode;
74850+#endif
74851+
74852+extern int pax_check_flags(unsigned long *);
74853+
74854+/* if tsk != current then task_lock must be held on it */
74855+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
74856+static inline unsigned long pax_get_flags(struct task_struct *tsk)
74857+{
74858+ if (likely(tsk->mm))
74859+ return tsk->mm->pax_flags;
74860+ else
74861+ return 0UL;
74862+}
74863+
74864+/* if tsk != current then task_lock must be held on it */
74865+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
74866+{
74867+ if (likely(tsk->mm)) {
74868+ tsk->mm->pax_flags = flags;
74869+ return 0;
74870+ }
74871+ return -EINVAL;
74872+}
74873+#endif
74874+
74875+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
74876+extern void pax_set_initial_flags(struct linux_binprm *bprm);
74877+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
74878+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
74879+#endif
74880+
74881+struct path;
74882+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
74883+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
74884+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
74885+extern void pax_report_refcount_overflow(struct pt_regs *regs);
74886+
74887 /* Future-safe accessor for struct task_struct's cpus_allowed. */
74888 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
74889
74890@@ -1476,7 +1584,7 @@ struct pid_namespace;
74891 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
74892 struct pid_namespace *ns);
74893
74894-static inline pid_t task_pid_nr(struct task_struct *tsk)
74895+static inline pid_t task_pid_nr(const struct task_struct *tsk)
74896 {
74897 return tsk->pid;
74898 }
74899@@ -1919,7 +2027,9 @@ void yield(void);
74900 extern struct exec_domain default_exec_domain;
74901
74902 union thread_union {
74903+#ifndef CONFIG_X86
74904 struct thread_info thread_info;
74905+#endif
74906 unsigned long stack[THREAD_SIZE/sizeof(long)];
74907 };
74908
74909@@ -1952,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
74910 */
74911
74912 extern struct task_struct *find_task_by_vpid(pid_t nr);
74913+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
74914 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
74915 struct pid_namespace *ns);
74916
74917@@ -2118,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
74918 extern void exit_itimers(struct signal_struct *);
74919 extern void flush_itimer_signals(void);
74920
74921-extern void do_group_exit(int);
74922+extern __noreturn void do_group_exit(int);
74923
74924 extern int allow_signal(int);
74925 extern int disallow_signal(int);
74926@@ -2309,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
74927
74928 #endif
74929
74930-static inline int object_is_on_stack(void *obj)
74931+static inline int object_starts_on_stack(void *obj)
74932 {
74933- void *stack = task_stack_page(current);
74934+ const void *stack = task_stack_page(current);
74935
74936 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
74937 }
74938diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
74939index bf8086b..962b035 100644
74940--- a/include/linux/sched/sysctl.h
74941+++ b/include/linux/sched/sysctl.h
74942@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
74943 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
74944
74945 extern int sysctl_max_map_count;
74946+extern unsigned long sysctl_heap_stack_gap;
74947
74948 extern unsigned int sysctl_sched_latency;
74949 extern unsigned int sysctl_sched_min_granularity;
74950diff --git a/include/linux/security.h b/include/linux/security.h
74951index 4686491..2bd210e 100644
74952--- a/include/linux/security.h
74953+++ b/include/linux/security.h
74954@@ -26,6 +26,7 @@
74955 #include <linux/capability.h>
74956 #include <linux/slab.h>
74957 #include <linux/err.h>
74958+#include <linux/grsecurity.h>
74959
74960 struct linux_binprm;
74961 struct cred;
74962diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
74963index 2da29ac..aac448ec 100644
74964--- a/include/linux/seq_file.h
74965+++ b/include/linux/seq_file.h
74966@@ -26,6 +26,9 @@ struct seq_file {
74967 struct mutex lock;
74968 const struct seq_operations *op;
74969 int poll_event;
74970+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74971+ u64 exec_id;
74972+#endif
74973 #ifdef CONFIG_USER_NS
74974 struct user_namespace *user_ns;
74975 #endif
74976@@ -38,6 +41,7 @@ struct seq_operations {
74977 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
74978 int (*show) (struct seq_file *m, void *v);
74979 };
74980+typedef struct seq_operations __no_const seq_operations_no_const;
74981
74982 #define SEQ_SKIP 1
74983
74984diff --git a/include/linux/shm.h b/include/linux/shm.h
74985index 429c199..4d42e38 100644
74986--- a/include/linux/shm.h
74987+++ b/include/linux/shm.h
74988@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
74989
74990 /* The task created the shm object. NULL if the task is dead. */
74991 struct task_struct *shm_creator;
74992+#ifdef CONFIG_GRKERNSEC
74993+ time_t shm_createtime;
74994+ pid_t shm_lapid;
74995+#endif
74996 };
74997
74998 /* shm_mode upper byte flags */
74999diff --git a/include/linux/signal.h b/include/linux/signal.h
75000index d897484..323ba98 100644
75001--- a/include/linux/signal.h
75002+++ b/include/linux/signal.h
75003@@ -433,6 +433,7 @@ void signals_init(void);
75004
75005 int restore_altstack(const stack_t __user *);
75006 int __save_altstack(stack_t __user *, unsigned long);
75007+void __save_altstack_ex(stack_t __user *, unsigned long);
75008
75009 #ifdef CONFIG_PROC_FS
75010 struct seq_file;
75011diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
75012index dec1748..112c1f9 100644
75013--- a/include/linux/skbuff.h
75014+++ b/include/linux/skbuff.h
75015@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
75016 extern struct sk_buff *__alloc_skb(unsigned int size,
75017 gfp_t priority, int flags, int node);
75018 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
75019-static inline struct sk_buff *alloc_skb(unsigned int size,
75020+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
75021 gfp_t priority)
75022 {
75023 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
75024@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
75025 */
75026 static inline int skb_queue_empty(const struct sk_buff_head *list)
75027 {
75028- return list->next == (struct sk_buff *)list;
75029+ return list->next == (const struct sk_buff *)list;
75030 }
75031
75032 /**
75033@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
75034 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
75035 const struct sk_buff *skb)
75036 {
75037- return skb->next == (struct sk_buff *)list;
75038+ return skb->next == (const struct sk_buff *)list;
75039 }
75040
75041 /**
75042@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
75043 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
75044 const struct sk_buff *skb)
75045 {
75046- return skb->prev == (struct sk_buff *)list;
75047+ return skb->prev == (const struct sk_buff *)list;
75048 }
75049
75050 /**
75051@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
75052 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
75053 */
75054 #ifndef NET_SKB_PAD
75055-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
75056+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
75057 #endif
75058
75059 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
75060@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
75061 int noblock, int *err);
75062 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
75063 struct poll_table_struct *wait);
75064-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
75065+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
75066 int offset, struct iovec *to,
75067 int size);
75068 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
75069@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
75070 nf_bridge_put(skb->nf_bridge);
75071 skb->nf_bridge = NULL;
75072 #endif
75073+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
75074+ skb->nf_trace = 0;
75075+#endif
75076 }
75077
75078 static inline void nf_reset_trace(struct sk_buff *skb)
75079diff --git a/include/linux/slab.h b/include/linux/slab.h
75080index 0c62175..f016ac1 100644
75081--- a/include/linux/slab.h
75082+++ b/include/linux/slab.h
75083@@ -12,15 +12,29 @@
75084 #include <linux/gfp.h>
75085 #include <linux/types.h>
75086 #include <linux/workqueue.h>
75087-
75088+#include <linux/err.h>
75089
75090 /*
75091 * Flags to pass to kmem_cache_create().
75092 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
75093 */
75094 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
75095+
75096+#ifdef CONFIG_PAX_USERCOPY_SLABS
75097+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
75098+#else
75099+#define SLAB_USERCOPY 0x00000000UL
75100+#endif
75101+
75102 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
75103 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
75104+
75105+#ifdef CONFIG_PAX_MEMORY_SANITIZE
75106+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
75107+#else
75108+#define SLAB_NO_SANITIZE 0x00000000UL
75109+#endif
75110+
75111 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
75112 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
75113 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
75114@@ -89,10 +103,13 @@
75115 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
75116 * Both make kfree a no-op.
75117 */
75118-#define ZERO_SIZE_PTR ((void *)16)
75119+#define ZERO_SIZE_PTR \
75120+({ \
75121+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
75122+ (void *)(-MAX_ERRNO-1L); \
75123+})
75124
75125-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
75126- (unsigned long)ZERO_SIZE_PTR)
75127+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
75128
75129
75130 struct mem_cgroup;
75131@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
75132 void kfree(const void *);
75133 void kzfree(const void *);
75134 size_t ksize(const void *);
75135+const char *check_heap_object(const void *ptr, unsigned long n);
75136+bool is_usercopy_object(const void *ptr);
75137
75138 /*
75139 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
75140@@ -164,7 +183,7 @@ struct kmem_cache {
75141 unsigned int align; /* Alignment as calculated */
75142 unsigned long flags; /* Active flags on the slab */
75143 const char *name; /* Slab name for sysfs */
75144- int refcount; /* Use counter */
75145+ atomic_t refcount; /* Use counter */
75146 void (*ctor)(void *); /* Called on object slot creation */
75147 struct list_head list; /* List of all slab caches on the system */
75148 };
75149@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
75150 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
75151 #endif
75152
75153+#ifdef CONFIG_PAX_USERCOPY_SLABS
75154+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
75155+#endif
75156+
75157 /*
75158 * Figure out which kmalloc slab an allocation of a certain size
75159 * belongs to.
75160@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
75161 * 2 = 120 .. 192 bytes
75162 * n = 2^(n-1) .. 2^n -1
75163 */
75164-static __always_inline int kmalloc_index(size_t size)
75165+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
75166 {
75167 if (!size)
75168 return 0;
75169@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
75170 * for general use, and so are not documented here. For a full list of
75171 * potential flags, always refer to linux/gfp.h.
75172 */
75173+
75174 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
75175 {
75176 if (size != 0 && n > SIZE_MAX / size)
75177@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
75178 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
75179 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
75180 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
75181-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
75182+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
75183 #define kmalloc_track_caller(size, flags) \
75184 __kmalloc_track_caller(size, flags, _RET_IP_)
75185 #else
75186@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
75187 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
75188 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
75189 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
75190-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
75191+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
75192 #define kmalloc_node_track_caller(size, flags, node) \
75193 __kmalloc_node_track_caller(size, flags, node, \
75194 _RET_IP_)
75195diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
75196index cd40158..4e2f7af 100644
75197--- a/include/linux/slab_def.h
75198+++ b/include/linux/slab_def.h
75199@@ -50,7 +50,7 @@ struct kmem_cache {
75200 /* 4) cache creation/removal */
75201 const char *name;
75202 struct list_head list;
75203- int refcount;
75204+ atomic_t refcount;
75205 int object_size;
75206 int align;
75207
75208@@ -66,10 +66,14 @@ struct kmem_cache {
75209 unsigned long node_allocs;
75210 unsigned long node_frees;
75211 unsigned long node_overflow;
75212- atomic_t allochit;
75213- atomic_t allocmiss;
75214- atomic_t freehit;
75215- atomic_t freemiss;
75216+ atomic_unchecked_t allochit;
75217+ atomic_unchecked_t allocmiss;
75218+ atomic_unchecked_t freehit;
75219+ atomic_unchecked_t freemiss;
75220+#ifdef CONFIG_PAX_MEMORY_SANITIZE
75221+ atomic_unchecked_t sanitized;
75222+ atomic_unchecked_t not_sanitized;
75223+#endif
75224
75225 /*
75226 * If debugging is enabled, then the allocator can add additional
75227@@ -103,7 +107,7 @@ struct kmem_cache {
75228 };
75229
75230 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
75231-void *__kmalloc(size_t size, gfp_t flags);
75232+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
75233
75234 #ifdef CONFIG_TRACING
75235 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
75236@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75237 cachep = kmalloc_dma_caches[i];
75238 else
75239 #endif
75240+
75241+#ifdef CONFIG_PAX_USERCOPY_SLABS
75242+ if (flags & GFP_USERCOPY)
75243+ cachep = kmalloc_usercopy_caches[i];
75244+ else
75245+#endif
75246+
75247 cachep = kmalloc_caches[i];
75248
75249 ret = kmem_cache_alloc_trace(cachep, flags, size);
75250@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75251 }
75252
75253 #ifdef CONFIG_NUMA
75254-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
75255+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
75256 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
75257
75258 #ifdef CONFIG_TRACING
75259@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
75260 cachep = kmalloc_dma_caches[i];
75261 else
75262 #endif
75263+
75264+#ifdef CONFIG_PAX_USERCOPY_SLABS
75265+ if (flags & GFP_USERCOPY)
75266+ cachep = kmalloc_usercopy_caches[i];
75267+ else
75268+#endif
75269+
75270 cachep = kmalloc_caches[i];
75271
75272 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
75273diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
75274index f28e14a..7831211 100644
75275--- a/include/linux/slob_def.h
75276+++ b/include/linux/slob_def.h
75277@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
75278 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
75279 }
75280
75281-void *__kmalloc_node(size_t size, gfp_t flags, int node);
75282+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
75283
75284 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
75285 {
75286@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75287 return __kmalloc_node(size, flags, NUMA_NO_NODE);
75288 }
75289
75290-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
75291+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
75292 {
75293 return kmalloc(size, flags);
75294 }
75295diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
75296index 027276f..092bfe8 100644
75297--- a/include/linux/slub_def.h
75298+++ b/include/linux/slub_def.h
75299@@ -80,7 +80,7 @@ struct kmem_cache {
75300 struct kmem_cache_order_objects max;
75301 struct kmem_cache_order_objects min;
75302 gfp_t allocflags; /* gfp flags to use on each alloc */
75303- int refcount; /* Refcount for slab cache destroy */
75304+ atomic_t refcount; /* Refcount for slab cache destroy */
75305 void (*ctor)(void *);
75306 int inuse; /* Offset to metadata */
75307 int align; /* Alignment */
75308@@ -105,7 +105,7 @@ struct kmem_cache {
75309 };
75310
75311 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
75312-void *__kmalloc(size_t size, gfp_t flags);
75313+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
75314
75315 static __always_inline void *
75316 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
75317@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
75318 }
75319 #endif
75320
75321-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
75322+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
75323 {
75324 unsigned int order = get_order(size);
75325 return kmalloc_order_trace(size, flags, order);
75326@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75327 }
75328
75329 #ifdef CONFIG_NUMA
75330-void *__kmalloc_node(size_t size, gfp_t flags, int node);
75331+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
75332 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
75333
75334 #ifdef CONFIG_TRACING
75335diff --git a/include/linux/smp.h b/include/linux/smp.h
75336index c848876..11e8a84 100644
75337--- a/include/linux/smp.h
75338+++ b/include/linux/smp.h
75339@@ -221,7 +221,9 @@ static inline void kick_all_cpus_sync(void) { }
75340 #endif
75341
75342 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
75343+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
75344 #define put_cpu() preempt_enable()
75345+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
75346
75347 /*
75348 * Callback to arch code if there's nosmp or maxcpus=0 on the
75349diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
75350index 54f91d3..be2c379 100644
75351--- a/include/linux/sock_diag.h
75352+++ b/include/linux/sock_diag.h
75353@@ -11,7 +11,7 @@ struct sock;
75354 struct sock_diag_handler {
75355 __u8 family;
75356 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
75357-};
75358+} __do_const;
75359
75360 int sock_diag_register(const struct sock_diag_handler *h);
75361 void sock_diag_unregister(const struct sock_diag_handler *h);
75362diff --git a/include/linux/sonet.h b/include/linux/sonet.h
75363index 680f9a3..f13aeb0 100644
75364--- a/include/linux/sonet.h
75365+++ b/include/linux/sonet.h
75366@@ -7,7 +7,7 @@
75367 #include <uapi/linux/sonet.h>
75368
75369 struct k_sonet_stats {
75370-#define __HANDLE_ITEM(i) atomic_t i
75371+#define __HANDLE_ITEM(i) atomic_unchecked_t i
75372 __SONET_ITEMS
75373 #undef __HANDLE_ITEM
75374 };
75375diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
75376index 07d8e53..dc934c9 100644
75377--- a/include/linux/sunrpc/addr.h
75378+++ b/include/linux/sunrpc/addr.h
75379@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
75380 {
75381 switch (sap->sa_family) {
75382 case AF_INET:
75383- return ntohs(((struct sockaddr_in *)sap)->sin_port);
75384+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
75385 case AF_INET6:
75386- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
75387+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
75388 }
75389 return 0;
75390 }
75391@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
75392 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
75393 const struct sockaddr *src)
75394 {
75395- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
75396+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
75397 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
75398
75399 dsin->sin_family = ssin->sin_family;
75400@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
75401 if (sa->sa_family != AF_INET6)
75402 return 0;
75403
75404- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
75405+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
75406 }
75407
75408 #endif /* _LINUX_SUNRPC_ADDR_H */
75409diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
75410index bfe11be..12bc8c4 100644
75411--- a/include/linux/sunrpc/clnt.h
75412+++ b/include/linux/sunrpc/clnt.h
75413@@ -96,7 +96,7 @@ struct rpc_procinfo {
75414 unsigned int p_timer; /* Which RTT timer to use */
75415 u32 p_statidx; /* Which procedure to account */
75416 const char * p_name; /* name of procedure */
75417-};
75418+} __do_const;
75419
75420 #ifdef __KERNEL__
75421
75422diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
75423index 1f0216b..6a4fa50 100644
75424--- a/include/linux/sunrpc/svc.h
75425+++ b/include/linux/sunrpc/svc.h
75426@@ -411,7 +411,7 @@ struct svc_procedure {
75427 unsigned int pc_count; /* call count */
75428 unsigned int pc_cachetype; /* cache info (NFS) */
75429 unsigned int pc_xdrressize; /* maximum size of XDR reply */
75430-};
75431+} __do_const;
75432
75433 /*
75434 * Function prototypes.
75435diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
75436index 0b8e3e6..33e0a01 100644
75437--- a/include/linux/sunrpc/svc_rdma.h
75438+++ b/include/linux/sunrpc/svc_rdma.h
75439@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
75440 extern unsigned int svcrdma_max_requests;
75441 extern unsigned int svcrdma_max_req_size;
75442
75443-extern atomic_t rdma_stat_recv;
75444-extern atomic_t rdma_stat_read;
75445-extern atomic_t rdma_stat_write;
75446-extern atomic_t rdma_stat_sq_starve;
75447-extern atomic_t rdma_stat_rq_starve;
75448-extern atomic_t rdma_stat_rq_poll;
75449-extern atomic_t rdma_stat_rq_prod;
75450-extern atomic_t rdma_stat_sq_poll;
75451-extern atomic_t rdma_stat_sq_prod;
75452+extern atomic_unchecked_t rdma_stat_recv;
75453+extern atomic_unchecked_t rdma_stat_read;
75454+extern atomic_unchecked_t rdma_stat_write;
75455+extern atomic_unchecked_t rdma_stat_sq_starve;
75456+extern atomic_unchecked_t rdma_stat_rq_starve;
75457+extern atomic_unchecked_t rdma_stat_rq_poll;
75458+extern atomic_unchecked_t rdma_stat_rq_prod;
75459+extern atomic_unchecked_t rdma_stat_sq_poll;
75460+extern atomic_unchecked_t rdma_stat_sq_prod;
75461
75462 #define RPCRDMA_VERSION 1
75463
75464diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
75465index ff374ab..7fd2ecb 100644
75466--- a/include/linux/sunrpc/svcauth.h
75467+++ b/include/linux/sunrpc/svcauth.h
75468@@ -109,7 +109,7 @@ struct auth_ops {
75469 int (*release)(struct svc_rqst *rq);
75470 void (*domain_release)(struct auth_domain *);
75471 int (*set_client)(struct svc_rqst *rq);
75472-};
75473+} __do_const;
75474
75475 #define SVC_GARBAGE 1
75476 #define SVC_SYSERR 2
75477diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
75478index a5ffd32..0935dea 100644
75479--- a/include/linux/swiotlb.h
75480+++ b/include/linux/swiotlb.h
75481@@ -60,7 +60,8 @@ extern void
75482
75483 extern void
75484 swiotlb_free_coherent(struct device *hwdev, size_t size,
75485- void *vaddr, dma_addr_t dma_handle);
75486+ void *vaddr, dma_addr_t dma_handle,
75487+ struct dma_attrs *attrs);
75488
75489 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
75490 unsigned long offset, size_t size,
75491diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
75492index 4147d70..d356a10 100644
75493--- a/include/linux/syscalls.h
75494+++ b/include/linux/syscalls.h
75495@@ -97,8 +97,12 @@ struct sigaltstack;
75496 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
75497
75498 #define __SC_DECL(t, a) t a
75499-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
75500-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
75501+#define __TYPE_IS_SL(t) (__same_type((t)0, 0L))
75502+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
75503+#define __TYPE_IS_SLL(t) (__same_type((t)0, 0LL))
75504+#define __TYPE_IS_ULL(t) (__same_type((t)0, 0ULL))
75505+#define __TYPE_IS_LL(t) (__TYPE_IS_SLL(t) || __TYPE_IS_ULL(t))
75506+#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), __builtin_choose_expr(__TYPE_IS_ULL(t), 0ULL, 0LL), __builtin_choose_expr(__TYPE_IS_UL(t), 0UL, 0L))) a
75507 #define __SC_CAST(t, a) (t) a
75508 #define __SC_ARGS(t, a) a
75509 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
75510@@ -362,11 +366,11 @@ asmlinkage long sys_sync(void);
75511 asmlinkage long sys_fsync(unsigned int fd);
75512 asmlinkage long sys_fdatasync(unsigned int fd);
75513 asmlinkage long sys_bdflush(int func, long data);
75514-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
75515- char __user *type, unsigned long flags,
75516+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
75517+ const char __user *type, unsigned long flags,
75518 void __user *data);
75519-asmlinkage long sys_umount(char __user *name, int flags);
75520-asmlinkage long sys_oldumount(char __user *name);
75521+asmlinkage long sys_umount(const char __user *name, int flags);
75522+asmlinkage long sys_oldumount(const char __user *name);
75523 asmlinkage long sys_truncate(const char __user *path, long length);
75524 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
75525 asmlinkage long sys_stat(const char __user *filename,
75526@@ -578,7 +582,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
75527 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
75528 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
75529 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
75530- struct sockaddr __user *, int);
75531+ struct sockaddr __user *, int) __intentional_overflow(0);
75532 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
75533 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
75534 unsigned int vlen, unsigned flags);
75535diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
75536index 27b3b0b..e093dd9 100644
75537--- a/include/linux/syscore_ops.h
75538+++ b/include/linux/syscore_ops.h
75539@@ -16,7 +16,7 @@ struct syscore_ops {
75540 int (*suspend)(void);
75541 void (*resume)(void);
75542 void (*shutdown)(void);
75543-};
75544+} __do_const;
75545
75546 extern void register_syscore_ops(struct syscore_ops *ops);
75547 extern void unregister_syscore_ops(struct syscore_ops *ops);
75548diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
75549index 14a8ff2..af52bad 100644
75550--- a/include/linux/sysctl.h
75551+++ b/include/linux/sysctl.h
75552@@ -34,13 +34,13 @@ struct ctl_table_root;
75553 struct ctl_table_header;
75554 struct ctl_dir;
75555
75556-typedef struct ctl_table ctl_table;
75557-
75558 typedef int proc_handler (struct ctl_table *ctl, int write,
75559 void __user *buffer, size_t *lenp, loff_t *ppos);
75560
75561 extern int proc_dostring(struct ctl_table *, int,
75562 void __user *, size_t *, loff_t *);
75563+extern int proc_dostring_modpriv(struct ctl_table *, int,
75564+ void __user *, size_t *, loff_t *);
75565 extern int proc_dointvec(struct ctl_table *, int,
75566 void __user *, size_t *, loff_t *);
75567 extern int proc_dointvec_minmax(struct ctl_table *, int,
75568@@ -115,7 +115,9 @@ struct ctl_table
75569 struct ctl_table_poll *poll;
75570 void *extra1;
75571 void *extra2;
75572-};
75573+} __do_const;
75574+typedef struct ctl_table __no_const ctl_table_no_const;
75575+typedef struct ctl_table ctl_table;
75576
75577 struct ctl_node {
75578 struct rb_node node;
75579diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
75580index e2cee22..3ddb921 100644
75581--- a/include/linux/sysfs.h
75582+++ b/include/linux/sysfs.h
75583@@ -31,7 +31,8 @@ struct attribute {
75584 struct lock_class_key *key;
75585 struct lock_class_key skey;
75586 #endif
75587-};
75588+} __do_const;
75589+typedef struct attribute __no_const attribute_no_const;
75590
75591 /**
75592 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
75593@@ -59,8 +60,8 @@ struct attribute_group {
75594 umode_t (*is_visible)(struct kobject *,
75595 struct attribute *, int);
75596 struct attribute **attrs;
75597-};
75598-
75599+} __do_const;
75600+typedef struct attribute_group __no_const attribute_group_no_const;
75601
75602
75603 /**
75604@@ -107,7 +108,8 @@ struct bin_attribute {
75605 char *, loff_t, size_t);
75606 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
75607 struct vm_area_struct *vma);
75608-};
75609+} __do_const;
75610+typedef struct bin_attribute __no_const bin_attribute_no_const;
75611
75612 /**
75613 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
75614diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
75615index 7faf933..9b85a0c 100644
75616--- a/include/linux/sysrq.h
75617+++ b/include/linux/sysrq.h
75618@@ -16,6 +16,7 @@
75619
75620 #include <linux/errno.h>
75621 #include <linux/types.h>
75622+#include <linux/compiler.h>
75623
75624 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
75625 #define SYSRQ_DEFAULT_ENABLE 1
75626@@ -36,7 +37,7 @@ struct sysrq_key_op {
75627 char *help_msg;
75628 char *action_msg;
75629 int enable_mask;
75630-};
75631+} __do_const;
75632
75633 #ifdef CONFIG_MAGIC_SYSRQ
75634
75635diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
75636index e7e0473..7989295 100644
75637--- a/include/linux/thread_info.h
75638+++ b/include/linux/thread_info.h
75639@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
75640 #error "no set_restore_sigmask() provided and default one won't work"
75641 #endif
75642
75643+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
75644+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
75645+{
75646+#ifndef CONFIG_PAX_USERCOPY_DEBUG
75647+ if (!__builtin_constant_p(n))
75648+#endif
75649+ __check_object_size(ptr, n, to_user);
75650+}
75651+
75652 #endif /* __KERNEL__ */
75653
75654 #endif /* _LINUX_THREAD_INFO_H */
75655diff --git a/include/linux/tty.h b/include/linux/tty.h
75656index 8780bd2..d1ae08b 100644
75657--- a/include/linux/tty.h
75658+++ b/include/linux/tty.h
75659@@ -194,7 +194,7 @@ struct tty_port {
75660 const struct tty_port_operations *ops; /* Port operations */
75661 spinlock_t lock; /* Lock protecting tty field */
75662 int blocked_open; /* Waiting to open */
75663- int count; /* Usage count */
75664+ atomic_t count; /* Usage count */
75665 wait_queue_head_t open_wait; /* Open waiters */
75666 wait_queue_head_t close_wait; /* Close waiters */
75667 wait_queue_head_t delta_msr_wait; /* Modem status change */
75668@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
75669 struct tty_struct *tty, struct file *filp);
75670 static inline int tty_port_users(struct tty_port *port)
75671 {
75672- return port->count + port->blocked_open;
75673+ return atomic_read(&port->count) + port->blocked_open;
75674 }
75675
75676 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
75677diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
75678index 756a609..b302dd6 100644
75679--- a/include/linux/tty_driver.h
75680+++ b/include/linux/tty_driver.h
75681@@ -285,7 +285,7 @@ struct tty_operations {
75682 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
75683 #endif
75684 const struct file_operations *proc_fops;
75685-};
75686+} __do_const;
75687
75688 struct tty_driver {
75689 int magic; /* magic number for this structure */
75690diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
75691index 58390c7..95e214c 100644
75692--- a/include/linux/tty_ldisc.h
75693+++ b/include/linux/tty_ldisc.h
75694@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
75695
75696 struct module *owner;
75697
75698- int refcount;
75699+ atomic_t refcount;
75700 };
75701
75702 struct tty_ldisc {
75703diff --git a/include/linux/types.h b/include/linux/types.h
75704index 4d118ba..c3ee9bf 100644
75705--- a/include/linux/types.h
75706+++ b/include/linux/types.h
75707@@ -176,10 +176,26 @@ typedef struct {
75708 int counter;
75709 } atomic_t;
75710
75711+#ifdef CONFIG_PAX_REFCOUNT
75712+typedef struct {
75713+ int counter;
75714+} atomic_unchecked_t;
75715+#else
75716+typedef atomic_t atomic_unchecked_t;
75717+#endif
75718+
75719 #ifdef CONFIG_64BIT
75720 typedef struct {
75721 long counter;
75722 } atomic64_t;
75723+
75724+#ifdef CONFIG_PAX_REFCOUNT
75725+typedef struct {
75726+ long counter;
75727+} atomic64_unchecked_t;
75728+#else
75729+typedef atomic64_t atomic64_unchecked_t;
75730+#endif
75731 #endif
75732
75733 struct list_head {
75734diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
75735index 5ca0951..ab496a5 100644
75736--- a/include/linux/uaccess.h
75737+++ b/include/linux/uaccess.h
75738@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
75739 long ret; \
75740 mm_segment_t old_fs = get_fs(); \
75741 \
75742- set_fs(KERNEL_DS); \
75743 pagefault_disable(); \
75744- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
75745- pagefault_enable(); \
75746+ set_fs(KERNEL_DS); \
75747+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
75748 set_fs(old_fs); \
75749+ pagefault_enable(); \
75750 ret; \
75751 })
75752
75753diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
75754index 8e522cbc..aa8572d 100644
75755--- a/include/linux/uidgid.h
75756+++ b/include/linux/uidgid.h
75757@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
75758
75759 #endif /* CONFIG_USER_NS */
75760
75761+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
75762+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
75763+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
75764+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
75765+
75766 #endif /* _LINUX_UIDGID_H */
75767diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
75768index 99c1b4d..562e6f3 100644
75769--- a/include/linux/unaligned/access_ok.h
75770+++ b/include/linux/unaligned/access_ok.h
75771@@ -4,34 +4,34 @@
75772 #include <linux/kernel.h>
75773 #include <asm/byteorder.h>
75774
75775-static inline u16 get_unaligned_le16(const void *p)
75776+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
75777 {
75778- return le16_to_cpup((__le16 *)p);
75779+ return le16_to_cpup((const __le16 *)p);
75780 }
75781
75782-static inline u32 get_unaligned_le32(const void *p)
75783+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
75784 {
75785- return le32_to_cpup((__le32 *)p);
75786+ return le32_to_cpup((const __le32 *)p);
75787 }
75788
75789-static inline u64 get_unaligned_le64(const void *p)
75790+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
75791 {
75792- return le64_to_cpup((__le64 *)p);
75793+ return le64_to_cpup((const __le64 *)p);
75794 }
75795
75796-static inline u16 get_unaligned_be16(const void *p)
75797+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
75798 {
75799- return be16_to_cpup((__be16 *)p);
75800+ return be16_to_cpup((const __be16 *)p);
75801 }
75802
75803-static inline u32 get_unaligned_be32(const void *p)
75804+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
75805 {
75806- return be32_to_cpup((__be32 *)p);
75807+ return be32_to_cpup((const __be32 *)p);
75808 }
75809
75810-static inline u64 get_unaligned_be64(const void *p)
75811+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
75812 {
75813- return be64_to_cpup((__be64 *)p);
75814+ return be64_to_cpup((const __be64 *)p);
75815 }
75816
75817 static inline void put_unaligned_le16(u16 val, void *p)
75818diff --git a/include/linux/usb.h b/include/linux/usb.h
75819index a0bee5a..5533a52 100644
75820--- a/include/linux/usb.h
75821+++ b/include/linux/usb.h
75822@@ -552,7 +552,7 @@ struct usb_device {
75823 int maxchild;
75824
75825 u32 quirks;
75826- atomic_t urbnum;
75827+ atomic_unchecked_t urbnum;
75828
75829 unsigned long active_duration;
75830
75831@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
75832
75833 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
75834 __u8 request, __u8 requesttype, __u16 value, __u16 index,
75835- void *data, __u16 size, int timeout);
75836+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
75837 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
75838 void *data, int len, int *actual_length, int timeout);
75839 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
75840diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
75841index e452ba6..78f8e80 100644
75842--- a/include/linux/usb/renesas_usbhs.h
75843+++ b/include/linux/usb/renesas_usbhs.h
75844@@ -39,7 +39,7 @@ enum {
75845 */
75846 struct renesas_usbhs_driver_callback {
75847 int (*notify_hotplug)(struct platform_device *pdev);
75848-};
75849+} __no_const;
75850
75851 /*
75852 * callback functions for platform
75853diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
75854index 6f8fbcf..8259001 100644
75855--- a/include/linux/vermagic.h
75856+++ b/include/linux/vermagic.h
75857@@ -25,9 +25,35 @@
75858 #define MODULE_ARCH_VERMAGIC ""
75859 #endif
75860
75861+#ifdef CONFIG_PAX_REFCOUNT
75862+#define MODULE_PAX_REFCOUNT "REFCOUNT "
75863+#else
75864+#define MODULE_PAX_REFCOUNT ""
75865+#endif
75866+
75867+#ifdef CONSTIFY_PLUGIN
75868+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
75869+#else
75870+#define MODULE_CONSTIFY_PLUGIN ""
75871+#endif
75872+
75873+#ifdef STACKLEAK_PLUGIN
75874+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
75875+#else
75876+#define MODULE_STACKLEAK_PLUGIN ""
75877+#endif
75878+
75879+#ifdef CONFIG_GRKERNSEC
75880+#define MODULE_GRSEC "GRSEC "
75881+#else
75882+#define MODULE_GRSEC ""
75883+#endif
75884+
75885 #define VERMAGIC_STRING \
75886 UTS_RELEASE " " \
75887 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
75888 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
75889- MODULE_ARCH_VERMAGIC
75890+ MODULE_ARCH_VERMAGIC \
75891+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
75892+ MODULE_GRSEC
75893
75894diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
75895index 7d5773a..541c01c 100644
75896--- a/include/linux/vmalloc.h
75897+++ b/include/linux/vmalloc.h
75898@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
75899 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
75900 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
75901 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
75902+
75903+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75904+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
75905+#endif
75906+
75907 /* bits [20..32] reserved for arch specific ioremap internals */
75908
75909 /*
75910@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
75911 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
75912 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
75913 unsigned long start, unsigned long end, gfp_t gfp_mask,
75914- pgprot_t prot, int node, const void *caller);
75915+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
75916 extern void vfree(const void *addr);
75917
75918 extern void *vmap(struct page **pages, unsigned int count,
75919@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
75920 extern void free_vm_area(struct vm_struct *area);
75921
75922 /* for /dev/kmem */
75923-extern long vread(char *buf, char *addr, unsigned long count);
75924-extern long vwrite(char *buf, char *addr, unsigned long count);
75925+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
75926+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
75927
75928 /*
75929 * Internals. Dont't use..
75930diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
75931index c586679..f06b389 100644
75932--- a/include/linux/vmstat.h
75933+++ b/include/linux/vmstat.h
75934@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
75935 /*
75936 * Zone based page accounting with per cpu differentials.
75937 */
75938-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75939+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75940
75941 static inline void zone_page_state_add(long x, struct zone *zone,
75942 enum zone_stat_item item)
75943 {
75944- atomic_long_add(x, &zone->vm_stat[item]);
75945- atomic_long_add(x, &vm_stat[item]);
75946+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
75947+ atomic_long_add_unchecked(x, &vm_stat[item]);
75948 }
75949
75950 static inline unsigned long global_page_state(enum zone_stat_item item)
75951 {
75952- long x = atomic_long_read(&vm_stat[item]);
75953+ long x = atomic_long_read_unchecked(&vm_stat[item]);
75954 #ifdef CONFIG_SMP
75955 if (x < 0)
75956 x = 0;
75957@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
75958 static inline unsigned long zone_page_state(struct zone *zone,
75959 enum zone_stat_item item)
75960 {
75961- long x = atomic_long_read(&zone->vm_stat[item]);
75962+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
75963 #ifdef CONFIG_SMP
75964 if (x < 0)
75965 x = 0;
75966@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
75967 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
75968 enum zone_stat_item item)
75969 {
75970- long x = atomic_long_read(&zone->vm_stat[item]);
75971+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
75972
75973 #ifdef CONFIG_SMP
75974 int cpu;
75975@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
75976
75977 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
75978 {
75979- atomic_long_inc(&zone->vm_stat[item]);
75980- atomic_long_inc(&vm_stat[item]);
75981+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
75982+ atomic_long_inc_unchecked(&vm_stat[item]);
75983 }
75984
75985 static inline void __inc_zone_page_state(struct page *page,
75986@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
75987
75988 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
75989 {
75990- atomic_long_dec(&zone->vm_stat[item]);
75991- atomic_long_dec(&vm_stat[item]);
75992+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
75993+ atomic_long_dec_unchecked(&vm_stat[item]);
75994 }
75995
75996 static inline void __dec_zone_page_state(struct page *page,
75997diff --git a/include/linux/xattr.h b/include/linux/xattr.h
75998index fdbafc6..49dfe4f 100644
75999--- a/include/linux/xattr.h
76000+++ b/include/linux/xattr.h
76001@@ -28,7 +28,7 @@ struct xattr_handler {
76002 size_t size, int handler_flags);
76003 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
76004 size_t size, int flags, int handler_flags);
76005-};
76006+} __do_const;
76007
76008 struct xattr {
76009 char *name;
76010@@ -37,6 +37,9 @@ struct xattr {
76011 };
76012
76013 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
76014+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76015+ssize_t pax_getxattr(struct dentry *, void *, size_t);
76016+#endif
76017 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
76018 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
76019 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
76020diff --git a/include/linux/zlib.h b/include/linux/zlib.h
76021index 9c5a6b4..09c9438 100644
76022--- a/include/linux/zlib.h
76023+++ b/include/linux/zlib.h
76024@@ -31,6 +31,7 @@
76025 #define _ZLIB_H
76026
76027 #include <linux/zconf.h>
76028+#include <linux/compiler.h>
76029
76030 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
76031 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
76032@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
76033
76034 /* basic functions */
76035
76036-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
76037+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
76038 /*
76039 Returns the number of bytes that needs to be allocated for a per-
76040 stream workspace with the specified parameters. A pointer to this
76041diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
76042index 95d1c91..6798cca 100644
76043--- a/include/media/v4l2-dev.h
76044+++ b/include/media/v4l2-dev.h
76045@@ -76,7 +76,7 @@ struct v4l2_file_operations {
76046 int (*mmap) (struct file *, struct vm_area_struct *);
76047 int (*open) (struct file *);
76048 int (*release) (struct file *);
76049-};
76050+} __do_const;
76051
76052 /*
76053 * Newer version of video_device, handled by videodev2.c
76054diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
76055index adcbb20..62c2559 100644
76056--- a/include/net/9p/transport.h
76057+++ b/include/net/9p/transport.h
76058@@ -57,7 +57,7 @@ struct p9_trans_module {
76059 int (*cancel) (struct p9_client *, struct p9_req_t *req);
76060 int (*zc_request)(struct p9_client *, struct p9_req_t *,
76061 char *, char *, int , int, int, int);
76062-};
76063+} __do_const;
76064
76065 void v9fs_register_trans(struct p9_trans_module *m);
76066 void v9fs_unregister_trans(struct p9_trans_module *m);
76067diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
76068index fb94cf1..7c0c987 100644
76069--- a/include/net/bluetooth/l2cap.h
76070+++ b/include/net/bluetooth/l2cap.h
76071@@ -551,7 +551,7 @@ struct l2cap_ops {
76072 void (*defer) (struct l2cap_chan *chan);
76073 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
76074 unsigned long len, int nb);
76075-};
76076+} __do_const;
76077
76078 struct l2cap_conn {
76079 struct hci_conn *hcon;
76080diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
76081index f2ae33d..c457cf0 100644
76082--- a/include/net/caif/cfctrl.h
76083+++ b/include/net/caif/cfctrl.h
76084@@ -52,7 +52,7 @@ struct cfctrl_rsp {
76085 void (*radioset_rsp)(void);
76086 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
76087 struct cflayer *client_layer);
76088-};
76089+} __no_const;
76090
76091 /* Link Setup Parameters for CAIF-Links. */
76092 struct cfctrl_link_param {
76093@@ -101,8 +101,8 @@ struct cfctrl_request_info {
76094 struct cfctrl {
76095 struct cfsrvl serv;
76096 struct cfctrl_rsp res;
76097- atomic_t req_seq_no;
76098- atomic_t rsp_seq_no;
76099+ atomic_unchecked_t req_seq_no;
76100+ atomic_unchecked_t rsp_seq_no;
76101 struct list_head list;
76102 /* Protects from simultaneous access to first_req list */
76103 spinlock_t info_list_lock;
76104diff --git a/include/net/flow.h b/include/net/flow.h
76105index 628e11b..4c475df 100644
76106--- a/include/net/flow.h
76107+++ b/include/net/flow.h
76108@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
76109
76110 extern void flow_cache_flush(void);
76111 extern void flow_cache_flush_deferred(void);
76112-extern atomic_t flow_cache_genid;
76113+extern atomic_unchecked_t flow_cache_genid;
76114
76115 #endif
76116diff --git a/include/net/genetlink.h b/include/net/genetlink.h
76117index 93024a4..eeb6b6e 100644
76118--- a/include/net/genetlink.h
76119+++ b/include/net/genetlink.h
76120@@ -119,7 +119,7 @@ struct genl_ops {
76121 struct netlink_callback *cb);
76122 int (*done)(struct netlink_callback *cb);
76123 struct list_head ops_list;
76124-};
76125+} __do_const;
76126
76127 extern int genl_register_family(struct genl_family *family);
76128 extern int genl_register_family_with_ops(struct genl_family *family,
76129diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
76130index 734d9b5..48a9a4b 100644
76131--- a/include/net/gro_cells.h
76132+++ b/include/net/gro_cells.h
76133@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
76134 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
76135
76136 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
76137- atomic_long_inc(&dev->rx_dropped);
76138+ atomic_long_inc_unchecked(&dev->rx_dropped);
76139 kfree_skb(skb);
76140 return;
76141 }
76142diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
76143index de2c785..0588a6b 100644
76144--- a/include/net/inet_connection_sock.h
76145+++ b/include/net/inet_connection_sock.h
76146@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
76147 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
76148 int (*bind_conflict)(const struct sock *sk,
76149 const struct inet_bind_bucket *tb, bool relax);
76150-};
76151+} __do_const;
76152
76153 /** inet_connection_sock - INET connection oriented sock
76154 *
76155diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
76156index 53f464d..ba76aaa 100644
76157--- a/include/net/inetpeer.h
76158+++ b/include/net/inetpeer.h
76159@@ -47,8 +47,8 @@ struct inet_peer {
76160 */
76161 union {
76162 struct {
76163- atomic_t rid; /* Frag reception counter */
76164- atomic_t ip_id_count; /* IP ID for the next packet */
76165+ atomic_unchecked_t rid; /* Frag reception counter */
76166+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
76167 };
76168 struct rcu_head rcu;
76169 struct inet_peer *gc_next;
76170@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
76171 more++;
76172 inet_peer_refcheck(p);
76173 do {
76174- old = atomic_read(&p->ip_id_count);
76175+ old = atomic_read_unchecked(&p->ip_id_count);
76176 new = old + more;
76177 if (!new)
76178 new = 1;
76179- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
76180+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
76181 return new;
76182 }
76183
76184diff --git a/include/net/ip.h b/include/net/ip.h
76185index a68f838..74518ab 100644
76186--- a/include/net/ip.h
76187+++ b/include/net/ip.h
76188@@ -202,7 +202,7 @@ extern struct local_ports {
76189 } sysctl_local_ports;
76190 extern void inet_get_local_port_range(int *low, int *high);
76191
76192-extern unsigned long *sysctl_local_reserved_ports;
76193+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
76194 static inline int inet_is_reserved_local_port(int port)
76195 {
76196 return test_bit(port, sysctl_local_reserved_ports);
76197diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
76198index e49db91..76a81de 100644
76199--- a/include/net/ip_fib.h
76200+++ b/include/net/ip_fib.h
76201@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
76202
76203 #define FIB_RES_SADDR(net, res) \
76204 ((FIB_RES_NH(res).nh_saddr_genid == \
76205- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
76206+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
76207 FIB_RES_NH(res).nh_saddr : \
76208 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
76209 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
76210diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
76211index 4c062cc..3562c31 100644
76212--- a/include/net/ip_vs.h
76213+++ b/include/net/ip_vs.h
76214@@ -612,7 +612,7 @@ struct ip_vs_conn {
76215 struct ip_vs_conn *control; /* Master control connection */
76216 atomic_t n_control; /* Number of controlled ones */
76217 struct ip_vs_dest *dest; /* real server */
76218- atomic_t in_pkts; /* incoming packet counter */
76219+ atomic_unchecked_t in_pkts; /* incoming packet counter */
76220
76221 /* packet transmitter for different forwarding methods. If it
76222 mangles the packet, it must return NF_DROP or better NF_STOLEN,
76223@@ -761,7 +761,7 @@ struct ip_vs_dest {
76224 __be16 port; /* port number of the server */
76225 union nf_inet_addr addr; /* IP address of the server */
76226 volatile unsigned int flags; /* dest status flags */
76227- atomic_t conn_flags; /* flags to copy to conn */
76228+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
76229 atomic_t weight; /* server weight */
76230
76231 atomic_t refcnt; /* reference counter */
76232@@ -1013,11 +1013,11 @@ struct netns_ipvs {
76233 /* ip_vs_lblc */
76234 int sysctl_lblc_expiration;
76235 struct ctl_table_header *lblc_ctl_header;
76236- struct ctl_table *lblc_ctl_table;
76237+ ctl_table_no_const *lblc_ctl_table;
76238 /* ip_vs_lblcr */
76239 int sysctl_lblcr_expiration;
76240 struct ctl_table_header *lblcr_ctl_header;
76241- struct ctl_table *lblcr_ctl_table;
76242+ ctl_table_no_const *lblcr_ctl_table;
76243 /* ip_vs_est */
76244 struct list_head est_list; /* estimator list */
76245 spinlock_t est_lock;
76246diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
76247index 80ffde3..968b0f4 100644
76248--- a/include/net/irda/ircomm_tty.h
76249+++ b/include/net/irda/ircomm_tty.h
76250@@ -35,6 +35,7 @@
76251 #include <linux/termios.h>
76252 #include <linux/timer.h>
76253 #include <linux/tty.h> /* struct tty_struct */
76254+#include <asm/local.h>
76255
76256 #include <net/irda/irias_object.h>
76257 #include <net/irda/ircomm_core.h>
76258diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
76259index 714cc9a..ea05f3e 100644
76260--- a/include/net/iucv/af_iucv.h
76261+++ b/include/net/iucv/af_iucv.h
76262@@ -149,7 +149,7 @@ struct iucv_skb_cb {
76263 struct iucv_sock_list {
76264 struct hlist_head head;
76265 rwlock_t lock;
76266- atomic_t autobind_name;
76267+ atomic_unchecked_t autobind_name;
76268 };
76269
76270 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
76271diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
76272index df83f69..9b640b8 100644
76273--- a/include/net/llc_c_ac.h
76274+++ b/include/net/llc_c_ac.h
76275@@ -87,7 +87,7 @@
76276 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
76277 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
76278
76279-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
76280+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
76281
76282 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
76283 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
76284diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
76285index 6ca3113..f8026dd 100644
76286--- a/include/net/llc_c_ev.h
76287+++ b/include/net/llc_c_ev.h
76288@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
76289 return (struct llc_conn_state_ev *)skb->cb;
76290 }
76291
76292-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
76293-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
76294+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
76295+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
76296
76297 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
76298 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
76299diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
76300index 0e79cfb..f46db31 100644
76301--- a/include/net/llc_c_st.h
76302+++ b/include/net/llc_c_st.h
76303@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
76304 u8 next_state;
76305 llc_conn_ev_qfyr_t *ev_qualifiers;
76306 llc_conn_action_t *ev_actions;
76307-};
76308+} __do_const;
76309
76310 struct llc_conn_state {
76311 u8 current_state;
76312diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
76313index 37a3bbd..55a4241 100644
76314--- a/include/net/llc_s_ac.h
76315+++ b/include/net/llc_s_ac.h
76316@@ -23,7 +23,7 @@
76317 #define SAP_ACT_TEST_IND 9
76318
76319 /* All action functions must look like this */
76320-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
76321+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
76322
76323 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
76324 struct sk_buff *skb);
76325diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
76326index 567c681..cd73ac0 100644
76327--- a/include/net/llc_s_st.h
76328+++ b/include/net/llc_s_st.h
76329@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
76330 llc_sap_ev_t ev;
76331 u8 next_state;
76332 llc_sap_action_t *ev_actions;
76333-};
76334+} __do_const;
76335
76336 struct llc_sap_state {
76337 u8 curr_state;
76338diff --git a/include/net/mac80211.h b/include/net/mac80211.h
76339index 885898a..cdace34 100644
76340--- a/include/net/mac80211.h
76341+++ b/include/net/mac80211.h
76342@@ -4205,7 +4205,7 @@ struct rate_control_ops {
76343 void (*add_sta_debugfs)(void *priv, void *priv_sta,
76344 struct dentry *dir);
76345 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
76346-};
76347+} __do_const;
76348
76349 static inline int rate_supported(struct ieee80211_sta *sta,
76350 enum ieee80211_band band,
76351diff --git a/include/net/neighbour.h b/include/net/neighbour.h
76352index 7e748ad..5c6229b 100644
76353--- a/include/net/neighbour.h
76354+++ b/include/net/neighbour.h
76355@@ -123,7 +123,7 @@ struct neigh_ops {
76356 void (*error_report)(struct neighbour *, struct sk_buff *);
76357 int (*output)(struct neighbour *, struct sk_buff *);
76358 int (*connected_output)(struct neighbour *, struct sk_buff *);
76359-};
76360+} __do_const;
76361
76362 struct pneigh_entry {
76363 struct pneigh_entry *next;
76364diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
76365index b176978..ea169f4 100644
76366--- a/include/net/net_namespace.h
76367+++ b/include/net/net_namespace.h
76368@@ -117,7 +117,7 @@ struct net {
76369 #endif
76370 struct netns_ipvs *ipvs;
76371 struct sock *diag_nlsk;
76372- atomic_t rt_genid;
76373+ atomic_unchecked_t rt_genid;
76374 };
76375
76376 /*
76377@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
76378 #define __net_init __init
76379 #define __net_exit __exit_refok
76380 #define __net_initdata __initdata
76381+#ifdef CONSTIFY_PLUGIN
76382 #define __net_initconst __initconst
76383+#else
76384+#define __net_initconst __initdata
76385+#endif
76386 #endif
76387
76388 struct pernet_operations {
76389@@ -284,7 +288,7 @@ struct pernet_operations {
76390 void (*exit_batch)(struct list_head *net_exit_list);
76391 int *id;
76392 size_t size;
76393-};
76394+} __do_const;
76395
76396 /*
76397 * Use these carefully. If you implement a network device and it
76398@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
76399
76400 static inline int rt_genid(struct net *net)
76401 {
76402- return atomic_read(&net->rt_genid);
76403+ return atomic_read_unchecked(&net->rt_genid);
76404 }
76405
76406 static inline void rt_genid_bump(struct net *net)
76407 {
76408- atomic_inc(&net->rt_genid);
76409+ atomic_inc_unchecked(&net->rt_genid);
76410 }
76411
76412 #endif /* __NET_NET_NAMESPACE_H */
76413diff --git a/include/net/netdma.h b/include/net/netdma.h
76414index 8ba8ce2..99b7fff 100644
76415--- a/include/net/netdma.h
76416+++ b/include/net/netdma.h
76417@@ -24,7 +24,7 @@
76418 #include <linux/dmaengine.h>
76419 #include <linux/skbuff.h>
76420
76421-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
76422+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
76423 struct sk_buff *skb, int offset, struct iovec *to,
76424 size_t len, struct dma_pinned_list *pinned_list);
76425
76426diff --git a/include/net/netlink.h b/include/net/netlink.h
76427index 9690b0f..87aded7 100644
76428--- a/include/net/netlink.h
76429+++ b/include/net/netlink.h
76430@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
76431 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
76432 {
76433 if (mark)
76434- skb_trim(skb, (unsigned char *) mark - skb->data);
76435+ skb_trim(skb, (const unsigned char *) mark - skb->data);
76436 }
76437
76438 /**
76439diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
76440index c9c0c53..53f24c3 100644
76441--- a/include/net/netns/conntrack.h
76442+++ b/include/net/netns/conntrack.h
76443@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
76444 struct nf_proto_net {
76445 #ifdef CONFIG_SYSCTL
76446 struct ctl_table_header *ctl_table_header;
76447- struct ctl_table *ctl_table;
76448+ ctl_table_no_const *ctl_table;
76449 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
76450 struct ctl_table_header *ctl_compat_header;
76451- struct ctl_table *ctl_compat_table;
76452+ ctl_table_no_const *ctl_compat_table;
76453 #endif
76454 #endif
76455 unsigned int users;
76456@@ -58,7 +58,7 @@ struct nf_ip_net {
76457 struct nf_icmp_net icmpv6;
76458 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
76459 struct ctl_table_header *ctl_table_header;
76460- struct ctl_table *ctl_table;
76461+ ctl_table_no_const *ctl_table;
76462 #endif
76463 };
76464
76465diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
76466index 2ba9de8..47bd6c7 100644
76467--- a/include/net/netns/ipv4.h
76468+++ b/include/net/netns/ipv4.h
76469@@ -67,7 +67,7 @@ struct netns_ipv4 {
76470 kgid_t sysctl_ping_group_range[2];
76471 long sysctl_tcp_mem[3];
76472
76473- atomic_t dev_addr_genid;
76474+ atomic_unchecked_t dev_addr_genid;
76475
76476 #ifdef CONFIG_IP_MROUTE
76477 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
76478diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
76479index 005e2c2..023d340 100644
76480--- a/include/net/netns/ipv6.h
76481+++ b/include/net/netns/ipv6.h
76482@@ -71,7 +71,7 @@ struct netns_ipv6 {
76483 struct fib_rules_ops *mr6_rules_ops;
76484 #endif
76485 #endif
76486- atomic_t dev_addr_genid;
76487+ atomic_unchecked_t dev_addr_genid;
76488 };
76489
76490 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
76491diff --git a/include/net/protocol.h b/include/net/protocol.h
76492index 047c047..b9dad15 100644
76493--- a/include/net/protocol.h
76494+++ b/include/net/protocol.h
76495@@ -44,7 +44,7 @@ struct net_protocol {
76496 void (*err_handler)(struct sk_buff *skb, u32 info);
76497 unsigned int no_policy:1,
76498 netns_ok:1;
76499-};
76500+} __do_const;
76501
76502 #if IS_ENABLED(CONFIG_IPV6)
76503 struct inet6_protocol {
76504@@ -57,7 +57,7 @@ struct inet6_protocol {
76505 u8 type, u8 code, int offset,
76506 __be32 info);
76507 unsigned int flags; /* INET6_PROTO_xxx */
76508-};
76509+} __do_const;
76510
76511 #define INET6_PROTO_NOPOLICY 0x1
76512 #define INET6_PROTO_FINAL 0x2
76513diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
76514index 7026648..584cc8c 100644
76515--- a/include/net/rtnetlink.h
76516+++ b/include/net/rtnetlink.h
76517@@ -81,7 +81,7 @@ struct rtnl_link_ops {
76518 const struct net_device *dev);
76519 unsigned int (*get_num_tx_queues)(void);
76520 unsigned int (*get_num_rx_queues)(void);
76521-};
76522+} __do_const;
76523
76524 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
76525 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
76526diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
76527index cd89510..d67810f 100644
76528--- a/include/net/sctp/sctp.h
76529+++ b/include/net/sctp/sctp.h
76530@@ -330,9 +330,9 @@ do { \
76531
76532 #else /* SCTP_DEBUG */
76533
76534-#define SCTP_DEBUG_PRINTK(whatever...)
76535-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
76536-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
76537+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
76538+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
76539+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
76540 #define SCTP_ENABLE_DEBUG
76541 #define SCTP_DISABLE_DEBUG
76542 #define SCTP_ASSERT(expr, str, func)
76543diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
76544index 2a82d13..62a31c2 100644
76545--- a/include/net/sctp/sm.h
76546+++ b/include/net/sctp/sm.h
76547@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
76548 typedef struct {
76549 sctp_state_fn_t *fn;
76550 const char *name;
76551-} sctp_sm_table_entry_t;
76552+} __do_const sctp_sm_table_entry_t;
76553
76554 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
76555 * currently in use.
76556@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
76557 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
76558
76559 /* Extern declarations for major data structures. */
76560-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
76561+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
76562
76563
76564 /* Get the size of a DATA chunk payload. */
76565diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
76566index 1bd4c41..9250b5b 100644
76567--- a/include/net/sctp/structs.h
76568+++ b/include/net/sctp/structs.h
76569@@ -516,7 +516,7 @@ struct sctp_pf {
76570 struct sctp_association *asoc);
76571 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
76572 struct sctp_af *af;
76573-};
76574+} __do_const;
76575
76576
76577 /* Structure to track chunk fragments that have been acked, but peer
76578diff --git a/include/net/sock.h b/include/net/sock.h
76579index 66772cf..25bc45b 100644
76580--- a/include/net/sock.h
76581+++ b/include/net/sock.h
76582@@ -325,7 +325,7 @@ struct sock {
76583 #ifdef CONFIG_RPS
76584 __u32 sk_rxhash;
76585 #endif
76586- atomic_t sk_drops;
76587+ atomic_unchecked_t sk_drops;
76588 int sk_rcvbuf;
76589
76590 struct sk_filter __rcu *sk_filter;
76591@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
76592 }
76593
76594 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
76595- char __user *from, char *to,
76596+ char __user *from, unsigned char *to,
76597 int copy, int offset)
76598 {
76599 if (skb->ip_summed == CHECKSUM_NONE) {
76600@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
76601 }
76602 }
76603
76604-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
76605+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
76606
76607 /**
76608 * sk_page_frag - return an appropriate page_frag
76609diff --git a/include/net/tcp.h b/include/net/tcp.h
76610index 5bba80f..8520a82 100644
76611--- a/include/net/tcp.h
76612+++ b/include/net/tcp.h
76613@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
76614 extern void tcp_xmit_retransmit_queue(struct sock *);
76615 extern void tcp_simple_retransmit(struct sock *);
76616 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
76617-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
76618+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
76619
76620 extern void tcp_send_probe0(struct sock *);
76621 extern void tcp_send_partial(struct sock *);
76622@@ -697,8 +697,8 @@ struct tcp_skb_cb {
76623 struct inet6_skb_parm h6;
76624 #endif
76625 } header; /* For incoming frames */
76626- __u32 seq; /* Starting sequence number */
76627- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
76628+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
76629+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
76630 __u32 when; /* used to compute rtt's */
76631 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
76632
76633@@ -712,7 +712,7 @@ struct tcp_skb_cb {
76634
76635 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
76636 /* 1 byte hole */
76637- __u32 ack_seq; /* Sequence number ACK'd */
76638+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
76639 };
76640
76641 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
76642diff --git a/include/net/xfrm.h b/include/net/xfrm.h
76643index 94ce082..62b278d 100644
76644--- a/include/net/xfrm.h
76645+++ b/include/net/xfrm.h
76646@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
76647 struct net_device *dev,
76648 const struct flowi *fl);
76649 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
76650-};
76651+} __do_const;
76652
76653 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
76654 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
76655@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
76656 struct sk_buff *skb);
76657 int (*transport_finish)(struct sk_buff *skb,
76658 int async);
76659-};
76660+} __do_const;
76661
76662 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
76663 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
76664@@ -424,7 +424,7 @@ struct xfrm_mode {
76665 struct module *owner;
76666 unsigned int encap;
76667 int flags;
76668-};
76669+} __do_const;
76670
76671 /* Flags for xfrm_mode. */
76672 enum {
76673@@ -521,7 +521,7 @@ struct xfrm_policy {
76674 struct timer_list timer;
76675
76676 struct flow_cache_object flo;
76677- atomic_t genid;
76678+ atomic_unchecked_t genid;
76679 u32 priority;
76680 u32 index;
76681 struct xfrm_mark mark;
76682diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
76683index 1a046b1..ee0bef0 100644
76684--- a/include/rdma/iw_cm.h
76685+++ b/include/rdma/iw_cm.h
76686@@ -122,7 +122,7 @@ struct iw_cm_verbs {
76687 int backlog);
76688
76689 int (*destroy_listen)(struct iw_cm_id *cm_id);
76690-};
76691+} __no_const;
76692
76693 /**
76694 * iw_create_cm_id - Create an IW CM identifier.
76695diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
76696index e1379b4..67eafbe 100644
76697--- a/include/scsi/libfc.h
76698+++ b/include/scsi/libfc.h
76699@@ -762,6 +762,7 @@ struct libfc_function_template {
76700 */
76701 void (*disc_stop_final) (struct fc_lport *);
76702 };
76703+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
76704
76705 /**
76706 * struct fc_disc - Discovery context
76707@@ -866,7 +867,7 @@ struct fc_lport {
76708 struct fc_vport *vport;
76709
76710 /* Operational Information */
76711- struct libfc_function_template tt;
76712+ libfc_function_template_no_const tt;
76713 u8 link_up;
76714 u8 qfull;
76715 enum fc_lport_state state;
76716diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
76717index cc64587..608f523 100644
76718--- a/include/scsi/scsi_device.h
76719+++ b/include/scsi/scsi_device.h
76720@@ -171,9 +171,9 @@ struct scsi_device {
76721 unsigned int max_device_blocked; /* what device_blocked counts down from */
76722 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
76723
76724- atomic_t iorequest_cnt;
76725- atomic_t iodone_cnt;
76726- atomic_t ioerr_cnt;
76727+ atomic_unchecked_t iorequest_cnt;
76728+ atomic_unchecked_t iodone_cnt;
76729+ atomic_unchecked_t ioerr_cnt;
76730
76731 struct device sdev_gendev,
76732 sdev_dev;
76733diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
76734index b797e8f..8e2c3aa 100644
76735--- a/include/scsi/scsi_transport_fc.h
76736+++ b/include/scsi/scsi_transport_fc.h
76737@@ -751,7 +751,8 @@ struct fc_function_template {
76738 unsigned long show_host_system_hostname:1;
76739
76740 unsigned long disable_target_scan:1;
76741-};
76742+} __do_const;
76743+typedef struct fc_function_template __no_const fc_function_template_no_const;
76744
76745
76746 /**
76747diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
76748index 9031a26..750d592 100644
76749--- a/include/sound/compress_driver.h
76750+++ b/include/sound/compress_driver.h
76751@@ -128,7 +128,7 @@ struct snd_compr_ops {
76752 struct snd_compr_caps *caps);
76753 int (*get_codec_caps) (struct snd_compr_stream *stream,
76754 struct snd_compr_codec_caps *codec);
76755-};
76756+} __no_const;
76757
76758 /**
76759 * struct snd_compr: Compressed device
76760diff --git a/include/sound/soc.h b/include/sound/soc.h
76761index 85c1522..f44bad1 100644
76762--- a/include/sound/soc.h
76763+++ b/include/sound/soc.h
76764@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
76765 /* probe ordering - for components with runtime dependencies */
76766 int probe_order;
76767 int remove_order;
76768-};
76769+} __do_const;
76770
76771 /* SoC platform interface */
76772 struct snd_soc_platform_driver {
76773@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
76774 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
76775 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
76776 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
76777-};
76778+} __do_const;
76779
76780 struct snd_soc_platform {
76781 const char *name;
76782diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
76783index 4ea4f98..a63629b 100644
76784--- a/include/target/target_core_base.h
76785+++ b/include/target/target_core_base.h
76786@@ -653,7 +653,7 @@ struct se_device {
76787 spinlock_t stats_lock;
76788 /* Active commands on this virtual SE device */
76789 atomic_t simple_cmds;
76790- atomic_t dev_ordered_id;
76791+ atomic_unchecked_t dev_ordered_id;
76792 atomic_t dev_ordered_sync;
76793 atomic_t dev_qf_count;
76794 int export_count;
76795diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
76796new file mode 100644
76797index 0000000..fb634b7
76798--- /dev/null
76799+++ b/include/trace/events/fs.h
76800@@ -0,0 +1,53 @@
76801+#undef TRACE_SYSTEM
76802+#define TRACE_SYSTEM fs
76803+
76804+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
76805+#define _TRACE_FS_H
76806+
76807+#include <linux/fs.h>
76808+#include <linux/tracepoint.h>
76809+
76810+TRACE_EVENT(do_sys_open,
76811+
76812+ TP_PROTO(const char *filename, int flags, int mode),
76813+
76814+ TP_ARGS(filename, flags, mode),
76815+
76816+ TP_STRUCT__entry(
76817+ __string( filename, filename )
76818+ __field( int, flags )
76819+ __field( int, mode )
76820+ ),
76821+
76822+ TP_fast_assign(
76823+ __assign_str(filename, filename);
76824+ __entry->flags = flags;
76825+ __entry->mode = mode;
76826+ ),
76827+
76828+ TP_printk("\"%s\" %x %o",
76829+ __get_str(filename), __entry->flags, __entry->mode)
76830+);
76831+
76832+TRACE_EVENT(open_exec,
76833+
76834+ TP_PROTO(const char *filename),
76835+
76836+ TP_ARGS(filename),
76837+
76838+ TP_STRUCT__entry(
76839+ __string( filename, filename )
76840+ ),
76841+
76842+ TP_fast_assign(
76843+ __assign_str(filename, filename);
76844+ ),
76845+
76846+ TP_printk("\"%s\"",
76847+ __get_str(filename))
76848+);
76849+
76850+#endif /* _TRACE_FS_H */
76851+
76852+/* This part must be outside protection */
76853+#include <trace/define_trace.h>
76854diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
76855index 1c09820..7f5ec79 100644
76856--- a/include/trace/events/irq.h
76857+++ b/include/trace/events/irq.h
76858@@ -36,7 +36,7 @@ struct softirq_action;
76859 */
76860 TRACE_EVENT(irq_handler_entry,
76861
76862- TP_PROTO(int irq, struct irqaction *action),
76863+ TP_PROTO(int irq, const struct irqaction *action),
76864
76865 TP_ARGS(irq, action),
76866
76867@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
76868 */
76869 TRACE_EVENT(irq_handler_exit,
76870
76871- TP_PROTO(int irq, struct irqaction *action, int ret),
76872+ TP_PROTO(int irq, const struct irqaction *action, int ret),
76873
76874 TP_ARGS(irq, action, ret),
76875
76876diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
76877index 7caf44c..23c6f27 100644
76878--- a/include/uapi/linux/a.out.h
76879+++ b/include/uapi/linux/a.out.h
76880@@ -39,6 +39,14 @@ enum machine_type {
76881 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
76882 };
76883
76884+/* Constants for the N_FLAGS field */
76885+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
76886+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
76887+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
76888+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
76889+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
76890+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
76891+
76892 #if !defined (N_MAGIC)
76893 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
76894 #endif
76895diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
76896index d876736..ccce5c0 100644
76897--- a/include/uapi/linux/byteorder/little_endian.h
76898+++ b/include/uapi/linux/byteorder/little_endian.h
76899@@ -42,51 +42,51 @@
76900
76901 static inline __le64 __cpu_to_le64p(const __u64 *p)
76902 {
76903- return (__force __le64)*p;
76904+ return (__force const __le64)*p;
76905 }
76906-static inline __u64 __le64_to_cpup(const __le64 *p)
76907+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
76908 {
76909- return (__force __u64)*p;
76910+ return (__force const __u64)*p;
76911 }
76912 static inline __le32 __cpu_to_le32p(const __u32 *p)
76913 {
76914- return (__force __le32)*p;
76915+ return (__force const __le32)*p;
76916 }
76917 static inline __u32 __le32_to_cpup(const __le32 *p)
76918 {
76919- return (__force __u32)*p;
76920+ return (__force const __u32)*p;
76921 }
76922 static inline __le16 __cpu_to_le16p(const __u16 *p)
76923 {
76924- return (__force __le16)*p;
76925+ return (__force const __le16)*p;
76926 }
76927 static inline __u16 __le16_to_cpup(const __le16 *p)
76928 {
76929- return (__force __u16)*p;
76930+ return (__force const __u16)*p;
76931 }
76932 static inline __be64 __cpu_to_be64p(const __u64 *p)
76933 {
76934- return (__force __be64)__swab64p(p);
76935+ return (__force const __be64)__swab64p(p);
76936 }
76937 static inline __u64 __be64_to_cpup(const __be64 *p)
76938 {
76939- return __swab64p((__u64 *)p);
76940+ return __swab64p((const __u64 *)p);
76941 }
76942 static inline __be32 __cpu_to_be32p(const __u32 *p)
76943 {
76944- return (__force __be32)__swab32p(p);
76945+ return (__force const __be32)__swab32p(p);
76946 }
76947-static inline __u32 __be32_to_cpup(const __be32 *p)
76948+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
76949 {
76950- return __swab32p((__u32 *)p);
76951+ return __swab32p((const __u32 *)p);
76952 }
76953 static inline __be16 __cpu_to_be16p(const __u16 *p)
76954 {
76955- return (__force __be16)__swab16p(p);
76956+ return (__force const __be16)__swab16p(p);
76957 }
76958 static inline __u16 __be16_to_cpup(const __be16 *p)
76959 {
76960- return __swab16p((__u16 *)p);
76961+ return __swab16p((const __u16 *)p);
76962 }
76963 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
76964 #define __le64_to_cpus(x) do { (void)(x); } while (0)
76965diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
76966index ef6103b..d4e65dd 100644
76967--- a/include/uapi/linux/elf.h
76968+++ b/include/uapi/linux/elf.h
76969@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
76970 #define PT_GNU_EH_FRAME 0x6474e550
76971
76972 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
76973+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
76974+
76975+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
76976+
76977+/* Constants for the e_flags field */
76978+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
76979+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
76980+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
76981+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
76982+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
76983+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
76984
76985 /*
76986 * Extended Numbering
76987@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
76988 #define DT_DEBUG 21
76989 #define DT_TEXTREL 22
76990 #define DT_JMPREL 23
76991+#define DT_FLAGS 30
76992+ #define DF_TEXTREL 0x00000004
76993 #define DT_ENCODING 32
76994 #define OLD_DT_LOOS 0x60000000
76995 #define DT_LOOS 0x6000000d
76996@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
76997 #define PF_W 0x2
76998 #define PF_X 0x1
76999
77000+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
77001+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
77002+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
77003+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
77004+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
77005+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
77006+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
77007+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
77008+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
77009+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
77010+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
77011+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
77012+
77013 typedef struct elf32_phdr{
77014 Elf32_Word p_type;
77015 Elf32_Off p_offset;
77016@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
77017 #define EI_OSABI 7
77018 #define EI_PAD 8
77019
77020+#define EI_PAX 14
77021+
77022 #define ELFMAG0 0x7f /* EI_MAG */
77023 #define ELFMAG1 'E'
77024 #define ELFMAG2 'L'
77025diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
77026index aa169c4..6a2771d 100644
77027--- a/include/uapi/linux/personality.h
77028+++ b/include/uapi/linux/personality.h
77029@@ -30,6 +30,7 @@ enum {
77030 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
77031 ADDR_NO_RANDOMIZE | \
77032 ADDR_COMPAT_LAYOUT | \
77033+ ADDR_LIMIT_3GB | \
77034 MMAP_PAGE_ZERO)
77035
77036 /*
77037diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
77038index 7530e74..e714828 100644
77039--- a/include/uapi/linux/screen_info.h
77040+++ b/include/uapi/linux/screen_info.h
77041@@ -43,7 +43,8 @@ struct screen_info {
77042 __u16 pages; /* 0x32 */
77043 __u16 vesa_attributes; /* 0x34 */
77044 __u32 capabilities; /* 0x36 */
77045- __u8 _reserved[6]; /* 0x3a */
77046+ __u16 vesapm_size; /* 0x3a */
77047+ __u8 _reserved[4]; /* 0x3c */
77048 } __attribute__((packed));
77049
77050 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
77051diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
77052index 0e011eb..82681b1 100644
77053--- a/include/uapi/linux/swab.h
77054+++ b/include/uapi/linux/swab.h
77055@@ -43,7 +43,7 @@
77056 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
77057 */
77058
77059-static inline __attribute_const__ __u16 __fswab16(__u16 val)
77060+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
77061 {
77062 #ifdef __HAVE_BUILTIN_BSWAP16__
77063 return __builtin_bswap16(val);
77064@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
77065 #endif
77066 }
77067
77068-static inline __attribute_const__ __u32 __fswab32(__u32 val)
77069+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
77070 {
77071 #ifdef __HAVE_BUILTIN_BSWAP32__
77072 return __builtin_bswap32(val);
77073@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
77074 #endif
77075 }
77076
77077-static inline __attribute_const__ __u64 __fswab64(__u64 val)
77078+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
77079 {
77080 #ifdef __HAVE_BUILTIN_BSWAP64__
77081 return __builtin_bswap64(val);
77082diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
77083index 6d67213..8dab561 100644
77084--- a/include/uapi/linux/sysctl.h
77085+++ b/include/uapi/linux/sysctl.h
77086@@ -155,7 +155,11 @@ enum
77087 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
77088 };
77089
77090-
77091+#ifdef CONFIG_PAX_SOFTMODE
77092+enum {
77093+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
77094+};
77095+#endif
77096
77097 /* CTL_VM names: */
77098 enum
77099diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
77100index e4629b9..6958086 100644
77101--- a/include/uapi/linux/xattr.h
77102+++ b/include/uapi/linux/xattr.h
77103@@ -63,5 +63,9 @@
77104 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
77105 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
77106
77107+/* User namespace */
77108+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
77109+#define XATTR_PAX_FLAGS_SUFFIX "flags"
77110+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
77111
77112 #endif /* _UAPI_LINUX_XATTR_H */
77113diff --git a/include/video/udlfb.h b/include/video/udlfb.h
77114index f9466fa..f4e2b81 100644
77115--- a/include/video/udlfb.h
77116+++ b/include/video/udlfb.h
77117@@ -53,10 +53,10 @@ struct dlfb_data {
77118 u32 pseudo_palette[256];
77119 int blank_mode; /*one of FB_BLANK_ */
77120 /* blit-only rendering path metrics, exposed through sysfs */
77121- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
77122- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
77123- atomic_t bytes_sent; /* to usb, after compression including overhead */
77124- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
77125+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
77126+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
77127+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
77128+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
77129 };
77130
77131 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
77132diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
77133index 1a91850..28573f8 100644
77134--- a/include/video/uvesafb.h
77135+++ b/include/video/uvesafb.h
77136@@ -122,6 +122,7 @@ struct uvesafb_par {
77137 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
77138 u8 pmi_setpal; /* PMI for palette changes */
77139 u16 *pmi_base; /* protected mode interface location */
77140+ u8 *pmi_code; /* protected mode code location */
77141 void *pmi_start;
77142 void *pmi_pal;
77143 u8 *vbe_state_orig; /*
77144diff --git a/init/Kconfig b/init/Kconfig
77145index 2d9b831..ae4c8ac 100644
77146--- a/init/Kconfig
77147+++ b/init/Kconfig
77148@@ -1029,6 +1029,7 @@ endif # CGROUPS
77149
77150 config CHECKPOINT_RESTORE
77151 bool "Checkpoint/restore support" if EXPERT
77152+ depends on !GRKERNSEC
77153 default n
77154 help
77155 Enables additional kernel features in a sake of checkpoint/restore.
77156@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
77157
77158 config COMPAT_BRK
77159 bool "Disable heap randomization"
77160- default y
77161+ default n
77162 help
77163 Randomizing heap placement makes heap exploits harder, but it
77164 also breaks ancient binaries (including anything libc5 based).
77165@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
77166 config STOP_MACHINE
77167 bool
77168 default y
77169- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
77170+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
77171 help
77172 Need stop_machine() primitive.
77173
77174diff --git a/init/Makefile b/init/Makefile
77175index 7bc47ee..6da2dc7 100644
77176--- a/init/Makefile
77177+++ b/init/Makefile
77178@@ -2,6 +2,9 @@
77179 # Makefile for the linux kernel.
77180 #
77181
77182+ccflags-y := $(GCC_PLUGINS_CFLAGS)
77183+asflags-y := $(GCC_PLUGINS_AFLAGS)
77184+
77185 obj-y := main.o version.o mounts.o
77186 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
77187 obj-y += noinitramfs.o
77188diff --git a/init/do_mounts.c b/init/do_mounts.c
77189index a2b49f2..03a0e17c 100644
77190--- a/init/do_mounts.c
77191+++ b/init/do_mounts.c
77192@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
77193 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
77194 {
77195 struct super_block *s;
77196- int err = sys_mount(name, "/root", fs, flags, data);
77197+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
77198 if (err)
77199 return err;
77200
77201- sys_chdir("/root");
77202+ sys_chdir((const char __force_user *)"/root");
77203 s = current->fs->pwd.dentry->d_sb;
77204 ROOT_DEV = s->s_dev;
77205 printk(KERN_INFO
77206@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
77207 va_start(args, fmt);
77208 vsprintf(buf, fmt, args);
77209 va_end(args);
77210- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
77211+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
77212 if (fd >= 0) {
77213 sys_ioctl(fd, FDEJECT, 0);
77214 sys_close(fd);
77215 }
77216 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
77217- fd = sys_open("/dev/console", O_RDWR, 0);
77218+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
77219 if (fd >= 0) {
77220 sys_ioctl(fd, TCGETS, (long)&termios);
77221 termios.c_lflag &= ~ICANON;
77222 sys_ioctl(fd, TCSETSF, (long)&termios);
77223- sys_read(fd, &c, 1);
77224+ sys_read(fd, (char __user *)&c, 1);
77225 termios.c_lflag |= ICANON;
77226 sys_ioctl(fd, TCSETSF, (long)&termios);
77227 sys_close(fd);
77228@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
77229 mount_root();
77230 out:
77231 devtmpfs_mount("dev");
77232- sys_mount(".", "/", NULL, MS_MOVE, NULL);
77233- sys_chroot(".");
77234+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
77235+ sys_chroot((const char __force_user *)".");
77236 }
77237diff --git a/init/do_mounts.h b/init/do_mounts.h
77238index f5b978a..69dbfe8 100644
77239--- a/init/do_mounts.h
77240+++ b/init/do_mounts.h
77241@@ -15,15 +15,15 @@ extern int root_mountflags;
77242
77243 static inline int create_dev(char *name, dev_t dev)
77244 {
77245- sys_unlink(name);
77246- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
77247+ sys_unlink((char __force_user *)name);
77248+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
77249 }
77250
77251 #if BITS_PER_LONG == 32
77252 static inline u32 bstat(char *name)
77253 {
77254 struct stat64 stat;
77255- if (sys_stat64(name, &stat) != 0)
77256+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
77257 return 0;
77258 if (!S_ISBLK(stat.st_mode))
77259 return 0;
77260@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
77261 static inline u32 bstat(char *name)
77262 {
77263 struct stat stat;
77264- if (sys_newstat(name, &stat) != 0)
77265+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
77266 return 0;
77267 if (!S_ISBLK(stat.st_mode))
77268 return 0;
77269diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
77270index 3e0878e..8a9d7a0 100644
77271--- a/init/do_mounts_initrd.c
77272+++ b/init/do_mounts_initrd.c
77273@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
77274 {
77275 sys_unshare(CLONE_FS | CLONE_FILES);
77276 /* stdin/stdout/stderr for /linuxrc */
77277- sys_open("/dev/console", O_RDWR, 0);
77278+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
77279 sys_dup(0);
77280 sys_dup(0);
77281 /* move initrd over / and chdir/chroot in initrd root */
77282- sys_chdir("/root");
77283- sys_mount(".", "/", NULL, MS_MOVE, NULL);
77284- sys_chroot(".");
77285+ sys_chdir((const char __force_user *)"/root");
77286+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
77287+ sys_chroot((const char __force_user *)".");
77288 sys_setsid();
77289 return 0;
77290 }
77291@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
77292 create_dev("/dev/root.old", Root_RAM0);
77293 /* mount initrd on rootfs' /root */
77294 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
77295- sys_mkdir("/old", 0700);
77296- sys_chdir("/old");
77297+ sys_mkdir((const char __force_user *)"/old", 0700);
77298+ sys_chdir((const char __force_user *)"/old");
77299
77300 /* try loading default modules from initrd */
77301 load_default_modules();
77302@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
77303 current->flags &= ~PF_FREEZER_SKIP;
77304
77305 /* move initrd to rootfs' /old */
77306- sys_mount("..", ".", NULL, MS_MOVE, NULL);
77307+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
77308 /* switch root and cwd back to / of rootfs */
77309- sys_chroot("..");
77310+ sys_chroot((const char __force_user *)"..");
77311
77312 if (new_decode_dev(real_root_dev) == Root_RAM0) {
77313- sys_chdir("/old");
77314+ sys_chdir((const char __force_user *)"/old");
77315 return;
77316 }
77317
77318- sys_chdir("/");
77319+ sys_chdir((const char __force_user *)"/");
77320 ROOT_DEV = new_decode_dev(real_root_dev);
77321 mount_root();
77322
77323 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
77324- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
77325+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
77326 if (!error)
77327 printk("okay\n");
77328 else {
77329- int fd = sys_open("/dev/root.old", O_RDWR, 0);
77330+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
77331 if (error == -ENOENT)
77332 printk("/initrd does not exist. Ignored.\n");
77333 else
77334 printk("failed\n");
77335 printk(KERN_NOTICE "Unmounting old root\n");
77336- sys_umount("/old", MNT_DETACH);
77337+ sys_umount((char __force_user *)"/old", MNT_DETACH);
77338 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
77339 if (fd < 0) {
77340 error = fd;
77341@@ -127,11 +127,11 @@ int __init initrd_load(void)
77342 * mounted in the normal path.
77343 */
77344 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
77345- sys_unlink("/initrd.image");
77346+ sys_unlink((const char __force_user *)"/initrd.image");
77347 handle_initrd();
77348 return 1;
77349 }
77350 }
77351- sys_unlink("/initrd.image");
77352+ sys_unlink((const char __force_user *)"/initrd.image");
77353 return 0;
77354 }
77355diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
77356index 8cb6db5..d729f50 100644
77357--- a/init/do_mounts_md.c
77358+++ b/init/do_mounts_md.c
77359@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
77360 partitioned ? "_d" : "", minor,
77361 md_setup_args[ent].device_names);
77362
77363- fd = sys_open(name, 0, 0);
77364+ fd = sys_open((char __force_user *)name, 0, 0);
77365 if (fd < 0) {
77366 printk(KERN_ERR "md: open failed - cannot start "
77367 "array %s\n", name);
77368@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
77369 * array without it
77370 */
77371 sys_close(fd);
77372- fd = sys_open(name, 0, 0);
77373+ fd = sys_open((char __force_user *)name, 0, 0);
77374 sys_ioctl(fd, BLKRRPART, 0);
77375 }
77376 sys_close(fd);
77377@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
77378
77379 wait_for_device_probe();
77380
77381- fd = sys_open("/dev/md0", 0, 0);
77382+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
77383 if (fd >= 0) {
77384 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
77385 sys_close(fd);
77386diff --git a/init/init_task.c b/init/init_task.c
77387index ba0a7f36..2bcf1d5 100644
77388--- a/init/init_task.c
77389+++ b/init/init_task.c
77390@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
77391 * Initial thread structure. Alignment of this is handled by a special
77392 * linker map entry.
77393 */
77394+#ifdef CONFIG_X86
77395+union thread_union init_thread_union __init_task_data;
77396+#else
77397 union thread_union init_thread_union __init_task_data =
77398 { INIT_THREAD_INFO(init_task) };
77399+#endif
77400diff --git a/init/initramfs.c b/init/initramfs.c
77401index a67ef9d..2d17ed9 100644
77402--- a/init/initramfs.c
77403+++ b/init/initramfs.c
77404@@ -84,7 +84,7 @@ static void __init free_hash(void)
77405 }
77406 }
77407
77408-static long __init do_utime(char *filename, time_t mtime)
77409+static long __init do_utime(char __force_user *filename, time_t mtime)
77410 {
77411 struct timespec t[2];
77412
77413@@ -119,7 +119,7 @@ static void __init dir_utime(void)
77414 struct dir_entry *de, *tmp;
77415 list_for_each_entry_safe(de, tmp, &dir_list, list) {
77416 list_del(&de->list);
77417- do_utime(de->name, de->mtime);
77418+ do_utime((char __force_user *)de->name, de->mtime);
77419 kfree(de->name);
77420 kfree(de);
77421 }
77422@@ -281,7 +281,7 @@ static int __init maybe_link(void)
77423 if (nlink >= 2) {
77424 char *old = find_link(major, minor, ino, mode, collected);
77425 if (old)
77426- return (sys_link(old, collected) < 0) ? -1 : 1;
77427+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
77428 }
77429 return 0;
77430 }
77431@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
77432 {
77433 struct stat st;
77434
77435- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
77436+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
77437 if (S_ISDIR(st.st_mode))
77438- sys_rmdir(path);
77439+ sys_rmdir((char __force_user *)path);
77440 else
77441- sys_unlink(path);
77442+ sys_unlink((char __force_user *)path);
77443 }
77444 }
77445
77446@@ -315,7 +315,7 @@ static int __init do_name(void)
77447 int openflags = O_WRONLY|O_CREAT;
77448 if (ml != 1)
77449 openflags |= O_TRUNC;
77450- wfd = sys_open(collected, openflags, mode);
77451+ wfd = sys_open((char __force_user *)collected, openflags, mode);
77452
77453 if (wfd >= 0) {
77454 sys_fchown(wfd, uid, gid);
77455@@ -327,17 +327,17 @@ static int __init do_name(void)
77456 }
77457 }
77458 } else if (S_ISDIR(mode)) {
77459- sys_mkdir(collected, mode);
77460- sys_chown(collected, uid, gid);
77461- sys_chmod(collected, mode);
77462+ sys_mkdir((char __force_user *)collected, mode);
77463+ sys_chown((char __force_user *)collected, uid, gid);
77464+ sys_chmod((char __force_user *)collected, mode);
77465 dir_add(collected, mtime);
77466 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
77467 S_ISFIFO(mode) || S_ISSOCK(mode)) {
77468 if (maybe_link() == 0) {
77469- sys_mknod(collected, mode, rdev);
77470- sys_chown(collected, uid, gid);
77471- sys_chmod(collected, mode);
77472- do_utime(collected, mtime);
77473+ sys_mknod((char __force_user *)collected, mode, rdev);
77474+ sys_chown((char __force_user *)collected, uid, gid);
77475+ sys_chmod((char __force_user *)collected, mode);
77476+ do_utime((char __force_user *)collected, mtime);
77477 }
77478 }
77479 return 0;
77480@@ -346,15 +346,15 @@ static int __init do_name(void)
77481 static int __init do_copy(void)
77482 {
77483 if (count >= body_len) {
77484- sys_write(wfd, victim, body_len);
77485+ sys_write(wfd, (char __force_user *)victim, body_len);
77486 sys_close(wfd);
77487- do_utime(vcollected, mtime);
77488+ do_utime((char __force_user *)vcollected, mtime);
77489 kfree(vcollected);
77490 eat(body_len);
77491 state = SkipIt;
77492 return 0;
77493 } else {
77494- sys_write(wfd, victim, count);
77495+ sys_write(wfd, (char __force_user *)victim, count);
77496 body_len -= count;
77497 eat(count);
77498 return 1;
77499@@ -365,9 +365,9 @@ static int __init do_symlink(void)
77500 {
77501 collected[N_ALIGN(name_len) + body_len] = '\0';
77502 clean_path(collected, 0);
77503- sys_symlink(collected + N_ALIGN(name_len), collected);
77504- sys_lchown(collected, uid, gid);
77505- do_utime(collected, mtime);
77506+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
77507+ sys_lchown((char __force_user *)collected, uid, gid);
77508+ do_utime((char __force_user *)collected, mtime);
77509 state = SkipIt;
77510 next_state = Reset;
77511 return 0;
77512@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
77513 {
77514 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
77515 if (err)
77516- panic(err); /* Failed to decompress INTERNAL initramfs */
77517+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
77518 if (initrd_start) {
77519 #ifdef CONFIG_BLK_DEV_RAM
77520 int fd;
77521diff --git a/init/main.c b/init/main.c
77522index 9484f4b..0eac7c3 100644
77523--- a/init/main.c
77524+++ b/init/main.c
77525@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
77526 extern void tc_init(void);
77527 #endif
77528
77529+extern void grsecurity_init(void);
77530+
77531 /*
77532 * Debug helper: via this flag we know that we are in 'early bootup code'
77533 * where only the boot processor is running with IRQ disabled. This means
77534@@ -153,6 +155,74 @@ static int __init set_reset_devices(char *str)
77535
77536 __setup("reset_devices", set_reset_devices);
77537
77538+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
77539+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
77540+static int __init setup_grsec_proc_gid(char *str)
77541+{
77542+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
77543+ return 1;
77544+}
77545+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
77546+#endif
77547+
77548+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
77549+unsigned long pax_user_shadow_base __read_only;
77550+EXPORT_SYMBOL(pax_user_shadow_base);
77551+extern char pax_enter_kernel_user[];
77552+extern char pax_exit_kernel_user[];
77553+#endif
77554+
77555+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
77556+static int __init setup_pax_nouderef(char *str)
77557+{
77558+#ifdef CONFIG_X86_32
77559+ unsigned int cpu;
77560+ struct desc_struct *gdt;
77561+
77562+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
77563+ gdt = get_cpu_gdt_table(cpu);
77564+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
77565+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
77566+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
77567+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
77568+ }
77569+ loadsegment(ds, __KERNEL_DS);
77570+ loadsegment(es, __KERNEL_DS);
77571+ loadsegment(ss, __KERNEL_DS);
77572+#else
77573+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
77574+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
77575+ clone_pgd_mask = ~(pgdval_t)0UL;
77576+ pax_user_shadow_base = 0UL;
77577+ setup_clear_cpu_cap(X86_FEATURE_PCID);
77578+#endif
77579+
77580+ return 0;
77581+}
77582+early_param("pax_nouderef", setup_pax_nouderef);
77583+
77584+#ifdef CONFIG_X86_64
77585+static int __init setup_pax_weakuderef(char *str)
77586+{
77587+ if (clone_pgd_mask != ~(pgdval_t)0UL)
77588+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
77589+ return 1;
77590+}
77591+__setup("pax_weakuderef", setup_pax_weakuderef);
77592+#endif
77593+#endif
77594+
77595+#ifdef CONFIG_PAX_SOFTMODE
77596+int pax_softmode;
77597+
77598+static int __init setup_pax_softmode(char *str)
77599+{
77600+ get_option(&str, &pax_softmode);
77601+ return 1;
77602+}
77603+__setup("pax_softmode=", setup_pax_softmode);
77604+#endif
77605+
77606 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
77607 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
77608 static const char *panic_later, *panic_param;
77609@@ -655,8 +725,6 @@ static void __init do_ctors(void)
77610 bool initcall_debug;
77611 core_param(initcall_debug, initcall_debug, bool, 0644);
77612
77613-static char msgbuf[64];
77614-
77615 static int __init_or_module do_one_initcall_debug(initcall_t fn)
77616 {
77617 ktime_t calltime, delta, rettime;
77618@@ -679,23 +747,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
77619 {
77620 int count = preempt_count();
77621 int ret;
77622+ const char *msg1 = "", *msg2 = "";
77623
77624 if (initcall_debug)
77625 ret = do_one_initcall_debug(fn);
77626 else
77627 ret = fn();
77628
77629- msgbuf[0] = 0;
77630-
77631 if (preempt_count() != count) {
77632- sprintf(msgbuf, "preemption imbalance ");
77633+ msg1 = " preemption imbalance";
77634 preempt_count() = count;
77635 }
77636 if (irqs_disabled()) {
77637- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
77638+ msg2 = " disabled interrupts";
77639 local_irq_enable();
77640 }
77641- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
77642+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
77643
77644 return ret;
77645 }
77646@@ -748,8 +815,14 @@ static void __init do_initcall_level(int level)
77647 level, level,
77648 &repair_env_string);
77649
77650- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
77651+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
77652 do_one_initcall(*fn);
77653+
77654+#ifdef LATENT_ENTROPY_PLUGIN
77655+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
77656+#endif
77657+
77658+ }
77659 }
77660
77661 static void __init do_initcalls(void)
77662@@ -783,8 +856,14 @@ static void __init do_pre_smp_initcalls(void)
77663 {
77664 initcall_t *fn;
77665
77666- for (fn = __initcall_start; fn < __initcall0_start; fn++)
77667+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
77668 do_one_initcall(*fn);
77669+
77670+#ifdef LATENT_ENTROPY_PLUGIN
77671+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
77672+#endif
77673+
77674+ }
77675 }
77676
77677 /*
77678@@ -802,8 +881,8 @@ static int run_init_process(const char *init_filename)
77679 {
77680 argv_init[0] = init_filename;
77681 return do_execve(init_filename,
77682- (const char __user *const __user *)argv_init,
77683- (const char __user *const __user *)envp_init);
77684+ (const char __user *const __force_user *)argv_init,
77685+ (const char __user *const __force_user *)envp_init);
77686 }
77687
77688 static noinline void __init kernel_init_freeable(void);
77689@@ -880,7 +959,7 @@ static noinline void __init kernel_init_freeable(void)
77690 do_basic_setup();
77691
77692 /* Open the /dev/console on the rootfs, this should never fail */
77693- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
77694+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
77695 pr_err("Warning: unable to open an initial console.\n");
77696
77697 (void) sys_dup(0);
77698@@ -893,11 +972,13 @@ static noinline void __init kernel_init_freeable(void)
77699 if (!ramdisk_execute_command)
77700 ramdisk_execute_command = "/init";
77701
77702- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
77703+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
77704 ramdisk_execute_command = NULL;
77705 prepare_namespace();
77706 }
77707
77708+ grsecurity_init();
77709+
77710 /*
77711 * Ok, we have completed the initial bootup, and
77712 * we're essentially up and running. Get rid of the
77713diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
77714index 130dfec..cc88451 100644
77715--- a/ipc/ipc_sysctl.c
77716+++ b/ipc/ipc_sysctl.c
77717@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
77718 static int proc_ipc_dointvec(ctl_table *table, int write,
77719 void __user *buffer, size_t *lenp, loff_t *ppos)
77720 {
77721- struct ctl_table ipc_table;
77722+ ctl_table_no_const ipc_table;
77723
77724 memcpy(&ipc_table, table, sizeof(ipc_table));
77725 ipc_table.data = get_ipc(table);
77726@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
77727 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
77728 void __user *buffer, size_t *lenp, loff_t *ppos)
77729 {
77730- struct ctl_table ipc_table;
77731+ ctl_table_no_const ipc_table;
77732
77733 memcpy(&ipc_table, table, sizeof(ipc_table));
77734 ipc_table.data = get_ipc(table);
77735@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
77736 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
77737 void __user *buffer, size_t *lenp, loff_t *ppos)
77738 {
77739- struct ctl_table ipc_table;
77740+ ctl_table_no_const ipc_table;
77741 size_t lenp_bef = *lenp;
77742 int rc;
77743
77744@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
77745 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
77746 void __user *buffer, size_t *lenp, loff_t *ppos)
77747 {
77748- struct ctl_table ipc_table;
77749+ ctl_table_no_const ipc_table;
77750 memcpy(&ipc_table, table, sizeof(ipc_table));
77751 ipc_table.data = get_ipc(table);
77752
77753@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
77754 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
77755 void __user *buffer, size_t *lenp, loff_t *ppos)
77756 {
77757- struct ctl_table ipc_table;
77758+ ctl_table_no_const ipc_table;
77759 size_t lenp_bef = *lenp;
77760 int oldval;
77761 int rc;
77762diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
77763index 383d638..943fdbb 100644
77764--- a/ipc/mq_sysctl.c
77765+++ b/ipc/mq_sysctl.c
77766@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
77767 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
77768 void __user *buffer, size_t *lenp, loff_t *ppos)
77769 {
77770- struct ctl_table mq_table;
77771+ ctl_table_no_const mq_table;
77772 memcpy(&mq_table, table, sizeof(mq_table));
77773 mq_table.data = get_mq(table);
77774
77775diff --git a/ipc/mqueue.c b/ipc/mqueue.c
77776index e4e47f6..a85e0ad 100644
77777--- a/ipc/mqueue.c
77778+++ b/ipc/mqueue.c
77779@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
77780 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
77781 info->attr.mq_msgsize);
77782
77783+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
77784 spin_lock(&mq_lock);
77785 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
77786 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
77787diff --git a/ipc/msg.c b/ipc/msg.c
77788index d0c6d96..69a893c 100644
77789--- a/ipc/msg.c
77790+++ b/ipc/msg.c
77791@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
77792 return security_msg_queue_associate(msq, msgflg);
77793 }
77794
77795+static struct ipc_ops msg_ops = {
77796+ .getnew = newque,
77797+ .associate = msg_security,
77798+ .more_checks = NULL
77799+};
77800+
77801 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
77802 {
77803 struct ipc_namespace *ns;
77804- struct ipc_ops msg_ops;
77805 struct ipc_params msg_params;
77806
77807 ns = current->nsproxy->ipc_ns;
77808
77809- msg_ops.getnew = newque;
77810- msg_ops.associate = msg_security;
77811- msg_ops.more_checks = NULL;
77812-
77813 msg_params.key = key;
77814 msg_params.flg = msgflg;
77815
77816diff --git a/ipc/sem.c b/ipc/sem.c
77817index 70480a3..f4e8262 100644
77818--- a/ipc/sem.c
77819+++ b/ipc/sem.c
77820@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
77821 return 0;
77822 }
77823
77824+static struct ipc_ops sem_ops = {
77825+ .getnew = newary,
77826+ .associate = sem_security,
77827+ .more_checks = sem_more_checks
77828+};
77829+
77830 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
77831 {
77832 struct ipc_namespace *ns;
77833- struct ipc_ops sem_ops;
77834 struct ipc_params sem_params;
77835
77836 ns = current->nsproxy->ipc_ns;
77837@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
77838 if (nsems < 0 || nsems > ns->sc_semmsl)
77839 return -EINVAL;
77840
77841- sem_ops.getnew = newary;
77842- sem_ops.associate = sem_security;
77843- sem_ops.more_checks = sem_more_checks;
77844-
77845 sem_params.key = key;
77846 sem_params.flg = semflg;
77847 sem_params.u.nsems = nsems;
77848diff --git a/ipc/shm.c b/ipc/shm.c
77849index 7e199fa..180a1ca 100644
77850--- a/ipc/shm.c
77851+++ b/ipc/shm.c
77852@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
77853 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
77854 #endif
77855
77856+#ifdef CONFIG_GRKERNSEC
77857+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77858+ const time_t shm_createtime, const kuid_t cuid,
77859+ const int shmid);
77860+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77861+ const time_t shm_createtime);
77862+#endif
77863+
77864 void shm_init_ns(struct ipc_namespace *ns)
77865 {
77866 ns->shm_ctlmax = SHMMAX;
77867@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
77868 shp->shm_lprid = 0;
77869 shp->shm_atim = shp->shm_dtim = 0;
77870 shp->shm_ctim = get_seconds();
77871+#ifdef CONFIG_GRKERNSEC
77872+ {
77873+ struct timespec timeval;
77874+ do_posix_clock_monotonic_gettime(&timeval);
77875+
77876+ shp->shm_createtime = timeval.tv_sec;
77877+ }
77878+#endif
77879 shp->shm_segsz = size;
77880 shp->shm_nattch = 0;
77881 shp->shm_file = file;
77882@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
77883 return 0;
77884 }
77885
77886+static struct ipc_ops shm_ops = {
77887+ .getnew = newseg,
77888+ .associate = shm_security,
77889+ .more_checks = shm_more_checks
77890+};
77891+
77892 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
77893 {
77894 struct ipc_namespace *ns;
77895- struct ipc_ops shm_ops;
77896 struct ipc_params shm_params;
77897
77898 ns = current->nsproxy->ipc_ns;
77899
77900- shm_ops.getnew = newseg;
77901- shm_ops.associate = shm_security;
77902- shm_ops.more_checks = shm_more_checks;
77903-
77904 shm_params.key = key;
77905 shm_params.flg = shmflg;
77906 shm_params.u.size = size;
77907@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
77908 f_mode = FMODE_READ | FMODE_WRITE;
77909 }
77910 if (shmflg & SHM_EXEC) {
77911+
77912+#ifdef CONFIG_PAX_MPROTECT
77913+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
77914+ goto out;
77915+#endif
77916+
77917 prot |= PROT_EXEC;
77918 acc_mode |= S_IXUGO;
77919 }
77920@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
77921 if (err)
77922 goto out_unlock;
77923
77924+#ifdef CONFIG_GRKERNSEC
77925+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
77926+ shp->shm_perm.cuid, shmid) ||
77927+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
77928+ err = -EACCES;
77929+ goto out_unlock;
77930+ }
77931+#endif
77932+
77933 path = shp->shm_file->f_path;
77934 path_get(&path);
77935 shp->shm_nattch++;
77936+#ifdef CONFIG_GRKERNSEC
77937+ shp->shm_lapid = current->pid;
77938+#endif
77939 size = i_size_read(path.dentry->d_inode);
77940 shm_unlock(shp);
77941
77942diff --git a/kernel/acct.c b/kernel/acct.c
77943index 8d6e145..33e0b1e 100644
77944--- a/kernel/acct.c
77945+++ b/kernel/acct.c
77946@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
77947 */
77948 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
77949 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
77950- file->f_op->write(file, (char *)&ac,
77951+ file->f_op->write(file, (char __force_user *)&ac,
77952 sizeof(acct_t), &file->f_pos);
77953 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
77954 set_fs(fs);
77955diff --git a/kernel/audit.c b/kernel/audit.c
77956index 91e53d0..d9e3ec4 100644
77957--- a/kernel/audit.c
77958+++ b/kernel/audit.c
77959@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
77960 3) suppressed due to audit_rate_limit
77961 4) suppressed due to audit_backlog_limit
77962 */
77963-static atomic_t audit_lost = ATOMIC_INIT(0);
77964+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
77965
77966 /* The netlink socket. */
77967 static struct sock *audit_sock;
77968@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
77969 unsigned long now;
77970 int print;
77971
77972- atomic_inc(&audit_lost);
77973+ atomic_inc_unchecked(&audit_lost);
77974
77975 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
77976
77977@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
77978 printk(KERN_WARNING
77979 "audit: audit_lost=%d audit_rate_limit=%d "
77980 "audit_backlog_limit=%d\n",
77981- atomic_read(&audit_lost),
77982+ atomic_read_unchecked(&audit_lost),
77983 audit_rate_limit,
77984 audit_backlog_limit);
77985 audit_panic(message);
77986@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
77987 status_set.pid = audit_pid;
77988 status_set.rate_limit = audit_rate_limit;
77989 status_set.backlog_limit = audit_backlog_limit;
77990- status_set.lost = atomic_read(&audit_lost);
77991+ status_set.lost = atomic_read_unchecked(&audit_lost);
77992 status_set.backlog = skb_queue_len(&audit_skb_queue);
77993 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
77994 &status_set, sizeof(status_set));
77995diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
77996index 6bd4a90..0ee9eff 100644
77997--- a/kernel/auditfilter.c
77998+++ b/kernel/auditfilter.c
77999@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
78000 f->lsm_rule = NULL;
78001
78002 /* Support legacy tests for a valid loginuid */
78003- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
78004+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
78005 f->type = AUDIT_LOGINUID_SET;
78006 f->val = 0;
78007 }
78008diff --git a/kernel/auditsc.c b/kernel/auditsc.c
78009index 3c8a601..3a416f6 100644
78010--- a/kernel/auditsc.c
78011+++ b/kernel/auditsc.c
78012@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
78013 }
78014
78015 /* global counter which is incremented every time something logs in */
78016-static atomic_t session_id = ATOMIC_INIT(0);
78017+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
78018
78019 /**
78020 * audit_set_loginuid - set current task's audit_context loginuid
78021@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
78022 return -EPERM;
78023 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
78024
78025- sessionid = atomic_inc_return(&session_id);
78026+ sessionid = atomic_inc_return_unchecked(&session_id);
78027 if (context && context->in_syscall) {
78028 struct audit_buffer *ab;
78029
78030diff --git a/kernel/capability.c b/kernel/capability.c
78031index f6c2ce5..982c0f9 100644
78032--- a/kernel/capability.c
78033+++ b/kernel/capability.c
78034@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
78035 * before modification is attempted and the application
78036 * fails.
78037 */
78038+ if (tocopy > ARRAY_SIZE(kdata))
78039+ return -EFAULT;
78040+
78041 if (copy_to_user(dataptr, kdata, tocopy
78042 * sizeof(struct __user_cap_data_struct))) {
78043 return -EFAULT;
78044@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
78045 int ret;
78046
78047 rcu_read_lock();
78048- ret = security_capable(__task_cred(t), ns, cap);
78049+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
78050+ gr_task_is_capable(t, __task_cred(t), cap);
78051 rcu_read_unlock();
78052
78053- return (ret == 0);
78054+ return ret;
78055 }
78056
78057 /**
78058@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
78059 int ret;
78060
78061 rcu_read_lock();
78062- ret = security_capable_noaudit(__task_cred(t), ns, cap);
78063+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
78064 rcu_read_unlock();
78065
78066- return (ret == 0);
78067+ return ret;
78068 }
78069
78070 /**
78071@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
78072 BUG();
78073 }
78074
78075- if (security_capable(current_cred(), ns, cap) == 0) {
78076+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
78077 current->flags |= PF_SUPERPRIV;
78078 return true;
78079 }
78080@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
78081 }
78082 EXPORT_SYMBOL(ns_capable);
78083
78084+bool ns_capable_nolog(struct user_namespace *ns, int cap)
78085+{
78086+ if (unlikely(!cap_valid(cap))) {
78087+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
78088+ BUG();
78089+ }
78090+
78091+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
78092+ current->flags |= PF_SUPERPRIV;
78093+ return true;
78094+ }
78095+ return false;
78096+}
78097+EXPORT_SYMBOL(ns_capable_nolog);
78098+
78099 /**
78100 * file_ns_capable - Determine if the file's opener had a capability in effect
78101 * @file: The file we want to check
78102@@ -432,6 +451,12 @@ bool capable(int cap)
78103 }
78104 EXPORT_SYMBOL(capable);
78105
78106+bool capable_nolog(int cap)
78107+{
78108+ return ns_capable_nolog(&init_user_ns, cap);
78109+}
78110+EXPORT_SYMBOL(capable_nolog);
78111+
78112 /**
78113 * nsown_capable - Check superior capability to one's own user_ns
78114 * @cap: The capability in question
78115@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
78116
78117 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
78118 }
78119+
78120+bool inode_capable_nolog(const struct inode *inode, int cap)
78121+{
78122+ struct user_namespace *ns = current_user_ns();
78123+
78124+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
78125+}
78126diff --git a/kernel/cgroup.c b/kernel/cgroup.c
78127index 2e9b387..61817b1 100644
78128--- a/kernel/cgroup.c
78129+++ b/kernel/cgroup.c
78130@@ -5398,7 +5398,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
78131 struct css_set *cg = link->cg;
78132 struct task_struct *task;
78133 int count = 0;
78134- seq_printf(seq, "css_set %p\n", cg);
78135+ seq_printf(seq, "css_set %pK\n", cg);
78136 list_for_each_entry(task, &cg->tasks, cg_list) {
78137 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
78138 seq_puts(seq, " ...\n");
78139diff --git a/kernel/compat.c b/kernel/compat.c
78140index 0a09e48..f44f3f0 100644
78141--- a/kernel/compat.c
78142+++ b/kernel/compat.c
78143@@ -13,6 +13,7 @@
78144
78145 #include <linux/linkage.h>
78146 #include <linux/compat.h>
78147+#include <linux/module.h>
78148 #include <linux/errno.h>
78149 #include <linux/time.h>
78150 #include <linux/signal.h>
78151@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
78152 mm_segment_t oldfs;
78153 long ret;
78154
78155- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
78156+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
78157 oldfs = get_fs();
78158 set_fs(KERNEL_DS);
78159 ret = hrtimer_nanosleep_restart(restart);
78160@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
78161 oldfs = get_fs();
78162 set_fs(KERNEL_DS);
78163 ret = hrtimer_nanosleep(&tu,
78164- rmtp ? (struct timespec __user *)&rmt : NULL,
78165+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
78166 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
78167 set_fs(oldfs);
78168
78169@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
78170 mm_segment_t old_fs = get_fs();
78171
78172 set_fs(KERNEL_DS);
78173- ret = sys_sigpending((old_sigset_t __user *) &s);
78174+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
78175 set_fs(old_fs);
78176 if (ret == 0)
78177 ret = put_user(s, set);
78178@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
78179 mm_segment_t old_fs = get_fs();
78180
78181 set_fs(KERNEL_DS);
78182- ret = sys_old_getrlimit(resource, &r);
78183+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
78184 set_fs(old_fs);
78185
78186 if (!ret) {
78187@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
78188 set_fs (KERNEL_DS);
78189 ret = sys_wait4(pid,
78190 (stat_addr ?
78191- (unsigned int __user *) &status : NULL),
78192- options, (struct rusage __user *) &r);
78193+ (unsigned int __force_user *) &status : NULL),
78194+ options, (struct rusage __force_user *) &r);
78195 set_fs (old_fs);
78196
78197 if (ret > 0) {
78198@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
78199 memset(&info, 0, sizeof(info));
78200
78201 set_fs(KERNEL_DS);
78202- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
78203- uru ? (struct rusage __user *)&ru : NULL);
78204+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
78205+ uru ? (struct rusage __force_user *)&ru : NULL);
78206 set_fs(old_fs);
78207
78208 if ((ret < 0) || (info.si_signo == 0))
78209@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
78210 oldfs = get_fs();
78211 set_fs(KERNEL_DS);
78212 err = sys_timer_settime(timer_id, flags,
78213- (struct itimerspec __user *) &newts,
78214- (struct itimerspec __user *) &oldts);
78215+ (struct itimerspec __force_user *) &newts,
78216+ (struct itimerspec __force_user *) &oldts);
78217 set_fs(oldfs);
78218 if (!err && old && put_compat_itimerspec(old, &oldts))
78219 return -EFAULT;
78220@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
78221 oldfs = get_fs();
78222 set_fs(KERNEL_DS);
78223 err = sys_timer_gettime(timer_id,
78224- (struct itimerspec __user *) &ts);
78225+ (struct itimerspec __force_user *) &ts);
78226 set_fs(oldfs);
78227 if (!err && put_compat_itimerspec(setting, &ts))
78228 return -EFAULT;
78229@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
78230 oldfs = get_fs();
78231 set_fs(KERNEL_DS);
78232 err = sys_clock_settime(which_clock,
78233- (struct timespec __user *) &ts);
78234+ (struct timespec __force_user *) &ts);
78235 set_fs(oldfs);
78236 return err;
78237 }
78238@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
78239 oldfs = get_fs();
78240 set_fs(KERNEL_DS);
78241 err = sys_clock_gettime(which_clock,
78242- (struct timespec __user *) &ts);
78243+ (struct timespec __force_user *) &ts);
78244 set_fs(oldfs);
78245 if (!err && put_compat_timespec(&ts, tp))
78246 return -EFAULT;
78247@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
78248
78249 oldfs = get_fs();
78250 set_fs(KERNEL_DS);
78251- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
78252+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
78253 set_fs(oldfs);
78254
78255 err = compat_put_timex(utp, &txc);
78256@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
78257 oldfs = get_fs();
78258 set_fs(KERNEL_DS);
78259 err = sys_clock_getres(which_clock,
78260- (struct timespec __user *) &ts);
78261+ (struct timespec __force_user *) &ts);
78262 set_fs(oldfs);
78263 if (!err && tp && put_compat_timespec(&ts, tp))
78264 return -EFAULT;
78265@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
78266 long err;
78267 mm_segment_t oldfs;
78268 struct timespec tu;
78269- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
78270+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
78271
78272- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
78273+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
78274 oldfs = get_fs();
78275 set_fs(KERNEL_DS);
78276 err = clock_nanosleep_restart(restart);
78277@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
78278 oldfs = get_fs();
78279 set_fs(KERNEL_DS);
78280 err = sys_clock_nanosleep(which_clock, flags,
78281- (struct timespec __user *) &in,
78282- (struct timespec __user *) &out);
78283+ (struct timespec __force_user *) &in,
78284+ (struct timespec __force_user *) &out);
78285 set_fs(oldfs);
78286
78287 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
78288diff --git a/kernel/configs.c b/kernel/configs.c
78289index c18b1f1..b9a0132 100644
78290--- a/kernel/configs.c
78291+++ b/kernel/configs.c
78292@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
78293 struct proc_dir_entry *entry;
78294
78295 /* create the current config file */
78296+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
78297+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
78298+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
78299+ &ikconfig_file_ops);
78300+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78301+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
78302+ &ikconfig_file_ops);
78303+#endif
78304+#else
78305 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
78306 &ikconfig_file_ops);
78307+#endif
78308+
78309 if (!entry)
78310 return -ENOMEM;
78311
78312diff --git a/kernel/cred.c b/kernel/cred.c
78313index e0573a4..3874e41 100644
78314--- a/kernel/cred.c
78315+++ b/kernel/cred.c
78316@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
78317 validate_creds(cred);
78318 alter_cred_subscribers(cred, -1);
78319 put_cred(cred);
78320+
78321+#ifdef CONFIG_GRKERNSEC_SETXID
78322+ cred = (struct cred *) tsk->delayed_cred;
78323+ if (cred != NULL) {
78324+ tsk->delayed_cred = NULL;
78325+ validate_creds(cred);
78326+ alter_cred_subscribers(cred, -1);
78327+ put_cred(cred);
78328+ }
78329+#endif
78330 }
78331
78332 /**
78333@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
78334 * Always returns 0 thus allowing this function to be tail-called at the end
78335 * of, say, sys_setgid().
78336 */
78337-int commit_creds(struct cred *new)
78338+static int __commit_creds(struct cred *new)
78339 {
78340 struct task_struct *task = current;
78341 const struct cred *old = task->real_cred;
78342@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
78343
78344 get_cred(new); /* we will require a ref for the subj creds too */
78345
78346+ gr_set_role_label(task, new->uid, new->gid);
78347+
78348 /* dumpability changes */
78349 if (!uid_eq(old->euid, new->euid) ||
78350 !gid_eq(old->egid, new->egid) ||
78351@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
78352 put_cred(old);
78353 return 0;
78354 }
78355+#ifdef CONFIG_GRKERNSEC_SETXID
78356+extern int set_user(struct cred *new);
78357+
78358+void gr_delayed_cred_worker(void)
78359+{
78360+ const struct cred *new = current->delayed_cred;
78361+ struct cred *ncred;
78362+
78363+ current->delayed_cred = NULL;
78364+
78365+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
78366+ // from doing get_cred on it when queueing this
78367+ put_cred(new);
78368+ return;
78369+ } else if (new == NULL)
78370+ return;
78371+
78372+ ncred = prepare_creds();
78373+ if (!ncred)
78374+ goto die;
78375+ // uids
78376+ ncred->uid = new->uid;
78377+ ncred->euid = new->euid;
78378+ ncred->suid = new->suid;
78379+ ncred->fsuid = new->fsuid;
78380+ // gids
78381+ ncred->gid = new->gid;
78382+ ncred->egid = new->egid;
78383+ ncred->sgid = new->sgid;
78384+ ncred->fsgid = new->fsgid;
78385+ // groups
78386+ if (set_groups(ncred, new->group_info) < 0) {
78387+ abort_creds(ncred);
78388+ goto die;
78389+ }
78390+ // caps
78391+ ncred->securebits = new->securebits;
78392+ ncred->cap_inheritable = new->cap_inheritable;
78393+ ncred->cap_permitted = new->cap_permitted;
78394+ ncred->cap_effective = new->cap_effective;
78395+ ncred->cap_bset = new->cap_bset;
78396+
78397+ if (set_user(ncred)) {
78398+ abort_creds(ncred);
78399+ goto die;
78400+ }
78401+
78402+ // from doing get_cred on it when queueing this
78403+ put_cred(new);
78404+
78405+ __commit_creds(ncred);
78406+ return;
78407+die:
78408+ // from doing get_cred on it when queueing this
78409+ put_cred(new);
78410+ do_group_exit(SIGKILL);
78411+}
78412+#endif
78413+
78414+int commit_creds(struct cred *new)
78415+{
78416+#ifdef CONFIG_GRKERNSEC_SETXID
78417+ int ret;
78418+ int schedule_it = 0;
78419+ struct task_struct *t;
78420+
78421+ /* we won't get called with tasklist_lock held for writing
78422+ and interrupts disabled as the cred struct in that case is
78423+ init_cred
78424+ */
78425+ if (grsec_enable_setxid && !current_is_single_threaded() &&
78426+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
78427+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
78428+ schedule_it = 1;
78429+ }
78430+ ret = __commit_creds(new);
78431+ if (schedule_it) {
78432+ rcu_read_lock();
78433+ read_lock(&tasklist_lock);
78434+ for (t = next_thread(current); t != current;
78435+ t = next_thread(t)) {
78436+ if (t->delayed_cred == NULL) {
78437+ t->delayed_cred = get_cred(new);
78438+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
78439+ set_tsk_need_resched(t);
78440+ }
78441+ }
78442+ read_unlock(&tasklist_lock);
78443+ rcu_read_unlock();
78444+ }
78445+ return ret;
78446+#else
78447+ return __commit_creds(new);
78448+#endif
78449+}
78450+
78451 EXPORT_SYMBOL(commit_creds);
78452
78453 /**
78454diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
78455index 0506d44..2c20034 100644
78456--- a/kernel/debug/debug_core.c
78457+++ b/kernel/debug/debug_core.c
78458@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
78459 */
78460 static atomic_t masters_in_kgdb;
78461 static atomic_t slaves_in_kgdb;
78462-static atomic_t kgdb_break_tasklet_var;
78463+static atomic_unchecked_t kgdb_break_tasklet_var;
78464 atomic_t kgdb_setting_breakpoint;
78465
78466 struct task_struct *kgdb_usethread;
78467@@ -133,7 +133,7 @@ int kgdb_single_step;
78468 static pid_t kgdb_sstep_pid;
78469
78470 /* to keep track of the CPU which is doing the single stepping*/
78471-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
78472+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
78473
78474 /*
78475 * If you are debugging a problem where roundup (the collection of
78476@@ -541,7 +541,7 @@ return_normal:
78477 * kernel will only try for the value of sstep_tries before
78478 * giving up and continuing on.
78479 */
78480- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
78481+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
78482 (kgdb_info[cpu].task &&
78483 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
78484 atomic_set(&kgdb_active, -1);
78485@@ -635,8 +635,8 @@ cpu_master_loop:
78486 }
78487
78488 kgdb_restore:
78489- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
78490- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
78491+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
78492+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
78493 if (kgdb_info[sstep_cpu].task)
78494 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
78495 else
78496@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
78497 static void kgdb_tasklet_bpt(unsigned long ing)
78498 {
78499 kgdb_breakpoint();
78500- atomic_set(&kgdb_break_tasklet_var, 0);
78501+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
78502 }
78503
78504 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
78505
78506 void kgdb_schedule_breakpoint(void)
78507 {
78508- if (atomic_read(&kgdb_break_tasklet_var) ||
78509+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
78510 atomic_read(&kgdb_active) != -1 ||
78511 atomic_read(&kgdb_setting_breakpoint))
78512 return;
78513- atomic_inc(&kgdb_break_tasklet_var);
78514+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
78515 tasklet_schedule(&kgdb_tasklet_breakpoint);
78516 }
78517 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
78518diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
78519index 00eb8f7..d7e3244 100644
78520--- a/kernel/debug/kdb/kdb_main.c
78521+++ b/kernel/debug/kdb/kdb_main.c
78522@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
78523 continue;
78524
78525 kdb_printf("%-20s%8u 0x%p ", mod->name,
78526- mod->core_size, (void *)mod);
78527+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
78528 #ifdef CONFIG_MODULE_UNLOAD
78529 kdb_printf("%4ld ", module_refcount(mod));
78530 #endif
78531@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
78532 kdb_printf(" (Loading)");
78533 else
78534 kdb_printf(" (Live)");
78535- kdb_printf(" 0x%p", mod->module_core);
78536+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
78537
78538 #ifdef CONFIG_MODULE_UNLOAD
78539 {
78540diff --git a/kernel/events/core.c b/kernel/events/core.c
78541index e76e495..cbfe63a 100644
78542--- a/kernel/events/core.c
78543+++ b/kernel/events/core.c
78544@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
78545 * 0 - disallow raw tracepoint access for unpriv
78546 * 1 - disallow cpu events for unpriv
78547 * 2 - disallow kernel profiling for unpriv
78548+ * 3 - disallow all unpriv perf event use
78549 */
78550-int sysctl_perf_event_paranoid __read_mostly = 1;
78551+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
78552+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
78553+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
78554+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
78555+#else
78556+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
78557+#endif
78558
78559 /* Minimum for 512 kiB + 1 user control page */
78560 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
78561@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
78562 return 0;
78563 }
78564
78565-static atomic64_t perf_event_id;
78566+static atomic64_unchecked_t perf_event_id;
78567
78568 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
78569 enum event_type_t event_type);
78570@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
78571
78572 static inline u64 perf_event_count(struct perf_event *event)
78573 {
78574- return local64_read(&event->count) + atomic64_read(&event->child_count);
78575+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
78576 }
78577
78578 static u64 perf_event_read(struct perf_event *event)
78579@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
78580 mutex_lock(&event->child_mutex);
78581 total += perf_event_read(event);
78582 *enabled += event->total_time_enabled +
78583- atomic64_read(&event->child_total_time_enabled);
78584+ atomic64_read_unchecked(&event->child_total_time_enabled);
78585 *running += event->total_time_running +
78586- atomic64_read(&event->child_total_time_running);
78587+ atomic64_read_unchecked(&event->child_total_time_running);
78588
78589 list_for_each_entry(child, &event->child_list, child_list) {
78590 total += perf_event_read(child);
78591@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
78592 userpg->offset -= local64_read(&event->hw.prev_count);
78593
78594 userpg->time_enabled = enabled +
78595- atomic64_read(&event->child_total_time_enabled);
78596+ atomic64_read_unchecked(&event->child_total_time_enabled);
78597
78598 userpg->time_running = running +
78599- atomic64_read(&event->child_total_time_running);
78600+ atomic64_read_unchecked(&event->child_total_time_running);
78601
78602 arch_perf_update_userpage(userpg, now);
78603
78604@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
78605
78606 /* Data. */
78607 sp = perf_user_stack_pointer(regs);
78608- rem = __output_copy_user(handle, (void *) sp, dump_size);
78609+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
78610 dyn_size = dump_size - rem;
78611
78612 perf_output_skip(handle, rem);
78613@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
78614 values[n++] = perf_event_count(event);
78615 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
78616 values[n++] = enabled +
78617- atomic64_read(&event->child_total_time_enabled);
78618+ atomic64_read_unchecked(&event->child_total_time_enabled);
78619 }
78620 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
78621 values[n++] = running +
78622- atomic64_read(&event->child_total_time_running);
78623+ atomic64_read_unchecked(&event->child_total_time_running);
78624 }
78625 if (read_format & PERF_FORMAT_ID)
78626 values[n++] = primary_event_id(event);
78627@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
78628 * need to add enough zero bytes after the string to handle
78629 * the 64bit alignment we do later.
78630 */
78631- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
78632+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
78633 if (!buf) {
78634 name = strncpy(tmp, "//enomem", sizeof(tmp));
78635 goto got_name;
78636 }
78637- name = d_path(&file->f_path, buf, PATH_MAX);
78638+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
78639 if (IS_ERR(name)) {
78640 name = strncpy(tmp, "//toolong", sizeof(tmp));
78641 goto got_name;
78642@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
78643 event->parent = parent_event;
78644
78645 event->ns = get_pid_ns(task_active_pid_ns(current));
78646- event->id = atomic64_inc_return(&perf_event_id);
78647+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
78648
78649 event->state = PERF_EVENT_STATE_INACTIVE;
78650
78651@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
78652 if (flags & ~PERF_FLAG_ALL)
78653 return -EINVAL;
78654
78655+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
78656+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
78657+ return -EACCES;
78658+#endif
78659+
78660 err = perf_copy_attr(attr_uptr, &attr);
78661 if (err)
78662 return err;
78663@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
78664 /*
78665 * Add back the child's count to the parent's count:
78666 */
78667- atomic64_add(child_val, &parent_event->child_count);
78668- atomic64_add(child_event->total_time_enabled,
78669+ atomic64_add_unchecked(child_val, &parent_event->child_count);
78670+ atomic64_add_unchecked(child_event->total_time_enabled,
78671 &parent_event->child_total_time_enabled);
78672- atomic64_add(child_event->total_time_running,
78673+ atomic64_add_unchecked(child_event->total_time_running,
78674 &parent_event->child_total_time_running);
78675
78676 /*
78677diff --git a/kernel/events/internal.h b/kernel/events/internal.h
78678index ca65997..cc8cee4 100644
78679--- a/kernel/events/internal.h
78680+++ b/kernel/events/internal.h
78681@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
78682 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
78683 }
78684
78685-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
78686+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
78687 static inline unsigned int \
78688 func_name(struct perf_output_handle *handle, \
78689- const void *buf, unsigned int len) \
78690+ const void user *buf, unsigned int len) \
78691 { \
78692 unsigned long size, written; \
78693 \
78694@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
78695 return n;
78696 }
78697
78698-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
78699+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
78700
78701 #define MEMCPY_SKIP(dst, src, n) (n)
78702
78703-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
78704+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
78705
78706 #ifndef arch_perf_out_copy_user
78707 #define arch_perf_out_copy_user __copy_from_user_inatomic
78708 #endif
78709
78710-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
78711+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
78712
78713 /* Callchain handling */
78714 extern struct perf_callchain_entry *
78715diff --git a/kernel/exit.c b/kernel/exit.c
78716index 7bb73f9..d7978ed 100644
78717--- a/kernel/exit.c
78718+++ b/kernel/exit.c
78719@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
78720 struct task_struct *leader;
78721 int zap_leader;
78722 repeat:
78723+#ifdef CONFIG_NET
78724+ gr_del_task_from_ip_table(p);
78725+#endif
78726+
78727 /* don't need to get the RCU readlock here - the process is dead and
78728 * can't be modifying its own credentials. But shut RCU-lockdep up */
78729 rcu_read_lock();
78730@@ -340,7 +344,7 @@ int allow_signal(int sig)
78731 * know it'll be handled, so that they don't get converted to
78732 * SIGKILL or just silently dropped.
78733 */
78734- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
78735+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
78736 recalc_sigpending();
78737 spin_unlock_irq(&current->sighand->siglock);
78738 return 0;
78739@@ -709,6 +713,8 @@ void do_exit(long code)
78740 struct task_struct *tsk = current;
78741 int group_dead;
78742
78743+ set_fs(USER_DS);
78744+
78745 profile_task_exit(tsk);
78746
78747 WARN_ON(blk_needs_flush_plug(tsk));
78748@@ -725,7 +731,6 @@ void do_exit(long code)
78749 * mm_release()->clear_child_tid() from writing to a user-controlled
78750 * kernel address.
78751 */
78752- set_fs(USER_DS);
78753
78754 ptrace_event(PTRACE_EVENT_EXIT, code);
78755
78756@@ -784,6 +789,9 @@ void do_exit(long code)
78757 tsk->exit_code = code;
78758 taskstats_exit(tsk, group_dead);
78759
78760+ gr_acl_handle_psacct(tsk, code);
78761+ gr_acl_handle_exit();
78762+
78763 exit_mm(tsk);
78764
78765 if (group_dead)
78766@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
78767 * Take down every thread in the group. This is called by fatal signals
78768 * as well as by sys_exit_group (below).
78769 */
78770-void
78771+__noreturn void
78772 do_group_exit(int exit_code)
78773 {
78774 struct signal_struct *sig = current->signal;
78775diff --git a/kernel/fork.c b/kernel/fork.c
78776index 987b28a..11ee8a5 100644
78777--- a/kernel/fork.c
78778+++ b/kernel/fork.c
78779@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
78780 *stackend = STACK_END_MAGIC; /* for overflow detection */
78781
78782 #ifdef CONFIG_CC_STACKPROTECTOR
78783- tsk->stack_canary = get_random_int();
78784+ tsk->stack_canary = pax_get_random_long();
78785 #endif
78786
78787 /*
78788@@ -345,13 +345,81 @@ free_tsk:
78789 }
78790
78791 #ifdef CONFIG_MMU
78792+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
78793+{
78794+ struct vm_area_struct *tmp;
78795+ unsigned long charge;
78796+ struct mempolicy *pol;
78797+ struct file *file;
78798+
78799+ charge = 0;
78800+ if (mpnt->vm_flags & VM_ACCOUNT) {
78801+ unsigned long len = vma_pages(mpnt);
78802+
78803+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
78804+ goto fail_nomem;
78805+ charge = len;
78806+ }
78807+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78808+ if (!tmp)
78809+ goto fail_nomem;
78810+ *tmp = *mpnt;
78811+ tmp->vm_mm = mm;
78812+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
78813+ pol = mpol_dup(vma_policy(mpnt));
78814+ if (IS_ERR(pol))
78815+ goto fail_nomem_policy;
78816+ vma_set_policy(tmp, pol);
78817+ if (anon_vma_fork(tmp, mpnt))
78818+ goto fail_nomem_anon_vma_fork;
78819+ tmp->vm_flags &= ~VM_LOCKED;
78820+ tmp->vm_next = tmp->vm_prev = NULL;
78821+ tmp->vm_mirror = NULL;
78822+ file = tmp->vm_file;
78823+ if (file) {
78824+ struct inode *inode = file_inode(file);
78825+ struct address_space *mapping = file->f_mapping;
78826+
78827+ get_file(file);
78828+ if (tmp->vm_flags & VM_DENYWRITE)
78829+ atomic_dec(&inode->i_writecount);
78830+ mutex_lock(&mapping->i_mmap_mutex);
78831+ if (tmp->vm_flags & VM_SHARED)
78832+ mapping->i_mmap_writable++;
78833+ flush_dcache_mmap_lock(mapping);
78834+ /* insert tmp into the share list, just after mpnt */
78835+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
78836+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
78837+ else
78838+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
78839+ flush_dcache_mmap_unlock(mapping);
78840+ mutex_unlock(&mapping->i_mmap_mutex);
78841+ }
78842+
78843+ /*
78844+ * Clear hugetlb-related page reserves for children. This only
78845+ * affects MAP_PRIVATE mappings. Faults generated by the child
78846+ * are not guaranteed to succeed, even if read-only
78847+ */
78848+ if (is_vm_hugetlb_page(tmp))
78849+ reset_vma_resv_huge_pages(tmp);
78850+
78851+ return tmp;
78852+
78853+fail_nomem_anon_vma_fork:
78854+ mpol_put(pol);
78855+fail_nomem_policy:
78856+ kmem_cache_free(vm_area_cachep, tmp);
78857+fail_nomem:
78858+ vm_unacct_memory(charge);
78859+ return NULL;
78860+}
78861+
78862 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78863 {
78864 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
78865 struct rb_node **rb_link, *rb_parent;
78866 int retval;
78867- unsigned long charge;
78868- struct mempolicy *pol;
78869
78870 uprobe_start_dup_mmap();
78871 down_write(&oldmm->mmap_sem);
78872@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78873 mm->locked_vm = 0;
78874 mm->mmap = NULL;
78875 mm->mmap_cache = NULL;
78876- mm->free_area_cache = oldmm->mmap_base;
78877- mm->cached_hole_size = ~0UL;
78878+ mm->free_area_cache = oldmm->free_area_cache;
78879+ mm->cached_hole_size = oldmm->cached_hole_size;
78880 mm->map_count = 0;
78881 cpumask_clear(mm_cpumask(mm));
78882 mm->mm_rb = RB_ROOT;
78883@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78884
78885 prev = NULL;
78886 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
78887- struct file *file;
78888-
78889 if (mpnt->vm_flags & VM_DONTCOPY) {
78890 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
78891 -vma_pages(mpnt));
78892 continue;
78893 }
78894- charge = 0;
78895- if (mpnt->vm_flags & VM_ACCOUNT) {
78896- unsigned long len = vma_pages(mpnt);
78897-
78898- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
78899- goto fail_nomem;
78900- charge = len;
78901- }
78902- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78903- if (!tmp)
78904- goto fail_nomem;
78905- *tmp = *mpnt;
78906- INIT_LIST_HEAD(&tmp->anon_vma_chain);
78907- pol = mpol_dup(vma_policy(mpnt));
78908- retval = PTR_ERR(pol);
78909- if (IS_ERR(pol))
78910- goto fail_nomem_policy;
78911- vma_set_policy(tmp, pol);
78912- tmp->vm_mm = mm;
78913- if (anon_vma_fork(tmp, mpnt))
78914- goto fail_nomem_anon_vma_fork;
78915- tmp->vm_flags &= ~VM_LOCKED;
78916- tmp->vm_next = tmp->vm_prev = NULL;
78917- file = tmp->vm_file;
78918- if (file) {
78919- struct inode *inode = file_inode(file);
78920- struct address_space *mapping = file->f_mapping;
78921-
78922- get_file(file);
78923- if (tmp->vm_flags & VM_DENYWRITE)
78924- atomic_dec(&inode->i_writecount);
78925- mutex_lock(&mapping->i_mmap_mutex);
78926- if (tmp->vm_flags & VM_SHARED)
78927- mapping->i_mmap_writable++;
78928- flush_dcache_mmap_lock(mapping);
78929- /* insert tmp into the share list, just after mpnt */
78930- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
78931- vma_nonlinear_insert(tmp,
78932- &mapping->i_mmap_nonlinear);
78933- else
78934- vma_interval_tree_insert_after(tmp, mpnt,
78935- &mapping->i_mmap);
78936- flush_dcache_mmap_unlock(mapping);
78937- mutex_unlock(&mapping->i_mmap_mutex);
78938+ tmp = dup_vma(mm, oldmm, mpnt);
78939+ if (!tmp) {
78940+ retval = -ENOMEM;
78941+ goto out;
78942 }
78943
78944 /*
78945@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78946 if (retval)
78947 goto out;
78948 }
78949+
78950+#ifdef CONFIG_PAX_SEGMEXEC
78951+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
78952+ struct vm_area_struct *mpnt_m;
78953+
78954+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
78955+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
78956+
78957+ if (!mpnt->vm_mirror)
78958+ continue;
78959+
78960+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
78961+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
78962+ mpnt->vm_mirror = mpnt_m;
78963+ } else {
78964+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
78965+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
78966+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
78967+ mpnt->vm_mirror->vm_mirror = mpnt;
78968+ }
78969+ }
78970+ BUG_ON(mpnt_m);
78971+ }
78972+#endif
78973+
78974 /* a new mm has just been created */
78975 arch_dup_mmap(oldmm, mm);
78976 retval = 0;
78977@@ -473,14 +524,6 @@ out:
78978 up_write(&oldmm->mmap_sem);
78979 uprobe_end_dup_mmap();
78980 return retval;
78981-fail_nomem_anon_vma_fork:
78982- mpol_put(pol);
78983-fail_nomem_policy:
78984- kmem_cache_free(vm_area_cachep, tmp);
78985-fail_nomem:
78986- retval = -ENOMEM;
78987- vm_unacct_memory(charge);
78988- goto out;
78989 }
78990
78991 static inline int mm_alloc_pgd(struct mm_struct *mm)
78992@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
78993 return ERR_PTR(err);
78994
78995 mm = get_task_mm(task);
78996- if (mm && mm != current->mm &&
78997- !ptrace_may_access(task, mode)) {
78998+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
78999+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
79000 mmput(mm);
79001 mm = ERR_PTR(-EACCES);
79002 }
79003@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
79004 spin_unlock(&fs->lock);
79005 return -EAGAIN;
79006 }
79007- fs->users++;
79008+ atomic_inc(&fs->users);
79009 spin_unlock(&fs->lock);
79010 return 0;
79011 }
79012 tsk->fs = copy_fs_struct(fs);
79013 if (!tsk->fs)
79014 return -ENOMEM;
79015+ /* Carry through gr_chroot_dentry and is_chrooted instead
79016+ of recomputing it here. Already copied when the task struct
79017+ is duplicated. This allows pivot_root to not be treated as
79018+ a chroot
79019+ */
79020+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
79021+
79022 return 0;
79023 }
79024
79025@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
79026 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
79027 #endif
79028 retval = -EAGAIN;
79029+
79030+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
79031+
79032 if (atomic_read(&p->real_cred->user->processes) >=
79033 task_rlimit(p, RLIMIT_NPROC)) {
79034- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
79035- p->real_cred->user != INIT_USER)
79036+ if (p->real_cred->user != INIT_USER &&
79037+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
79038 goto bad_fork_free;
79039 }
79040 current->flags &= ~PF_NPROC_EXCEEDED;
79041@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
79042 goto bad_fork_free_pid;
79043 }
79044
79045+ /* synchronizes with gr_set_acls()
79046+ we need to call this past the point of no return for fork()
79047+ */
79048+ gr_copy_label(p);
79049+
79050 if (clone_flags & CLONE_THREAD) {
79051 current->signal->nr_threads++;
79052 atomic_inc(&current->signal->live);
79053@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
79054 bad_fork_free:
79055 free_task(p);
79056 fork_out:
79057+ gr_log_forkfail(retval);
79058+
79059 return ERR_PTR(retval);
79060 }
79061
79062@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
79063 if (clone_flags & CLONE_PARENT_SETTID)
79064 put_user(nr, parent_tidptr);
79065
79066+ gr_handle_brute_check();
79067+
79068 if (clone_flags & CLONE_VFORK) {
79069 p->vfork_done = &vfork;
79070 init_completion(&vfork);
79071@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
79072 mm_cachep = kmem_cache_create("mm_struct",
79073 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
79074 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
79075- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
79076+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
79077 mmap_init();
79078 nsproxy_cache_init();
79079 }
79080@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
79081 return 0;
79082
79083 /* don't need lock here; in the worst case we'll do useless copy */
79084- if (fs->users == 1)
79085+ if (atomic_read(&fs->users) == 1)
79086 return 0;
79087
79088 *new_fsp = copy_fs_struct(fs);
79089@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
79090 fs = current->fs;
79091 spin_lock(&fs->lock);
79092 current->fs = new_fs;
79093- if (--fs->users)
79094+ gr_set_chroot_entries(current, &current->fs->root);
79095+ if (atomic_dec_return(&fs->users))
79096 new_fs = NULL;
79097 else
79098 new_fs = fs;
79099diff --git a/kernel/futex.c b/kernel/futex.c
79100index 49dacfb..5c6b450 100644
79101--- a/kernel/futex.c
79102+++ b/kernel/futex.c
79103@@ -54,6 +54,7 @@
79104 #include <linux/mount.h>
79105 #include <linux/pagemap.h>
79106 #include <linux/syscalls.h>
79107+#include <linux/ptrace.h>
79108 #include <linux/signal.h>
79109 #include <linux/export.h>
79110 #include <linux/magic.h>
79111@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
79112 struct page *page, *page_head;
79113 int err, ro = 0;
79114
79115+#ifdef CONFIG_PAX_SEGMEXEC
79116+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
79117+ return -EFAULT;
79118+#endif
79119+
79120 /*
79121 * The futex address must be "naturally" aligned.
79122 */
79123@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
79124 {
79125 u32 curval;
79126 int i;
79127+ mm_segment_t oldfs;
79128
79129 /*
79130 * This will fail and we want it. Some arch implementations do
79131@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
79132 * implementation, the non-functional ones will return
79133 * -ENOSYS.
79134 */
79135+ oldfs = get_fs();
79136+ set_fs(USER_DS);
79137 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
79138 futex_cmpxchg_enabled = 1;
79139+ set_fs(oldfs);
79140
79141 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
79142 plist_head_init(&futex_queues[i].chain);
79143diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
79144index f9f44fd..29885e4 100644
79145--- a/kernel/futex_compat.c
79146+++ b/kernel/futex_compat.c
79147@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
79148 return 0;
79149 }
79150
79151-static void __user *futex_uaddr(struct robust_list __user *entry,
79152+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
79153 compat_long_t futex_offset)
79154 {
79155 compat_uptr_t base = ptr_to_compat(entry);
79156diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
79157index 9b22d03..6295b62 100644
79158--- a/kernel/gcov/base.c
79159+++ b/kernel/gcov/base.c
79160@@ -102,11 +102,6 @@ void gcov_enable_events(void)
79161 }
79162
79163 #ifdef CONFIG_MODULES
79164-static inline int within(void *addr, void *start, unsigned long size)
79165-{
79166- return ((addr >= start) && (addr < start + size));
79167-}
79168-
79169 /* Update list and generate events when modules are unloaded. */
79170 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
79171 void *data)
79172@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
79173 prev = NULL;
79174 /* Remove entries located in module from linked list. */
79175 for (info = gcov_info_head; info; info = info->next) {
79176- if (within(info, mod->module_core, mod->core_size)) {
79177+ if (within_module_core_rw((unsigned long)info, mod)) {
79178 if (prev)
79179 prev->next = info->next;
79180 else
79181diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
79182index 2288fbd..0f3941f 100644
79183--- a/kernel/hrtimer.c
79184+++ b/kernel/hrtimer.c
79185@@ -1435,7 +1435,7 @@ void hrtimer_peek_ahead_timers(void)
79186 local_irq_restore(flags);
79187 }
79188
79189-static void run_hrtimer_softirq(struct softirq_action *h)
79190+static void run_hrtimer_softirq(void)
79191 {
79192 hrtimer_peek_ahead_timers();
79193 }
79194@@ -1770,7 +1770,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
79195 return NOTIFY_OK;
79196 }
79197
79198-static struct notifier_block __cpuinitdata hrtimers_nb = {
79199+static struct notifier_block hrtimers_nb = {
79200 .notifier_call = hrtimer_cpu_notify,
79201 };
79202
79203diff --git a/kernel/irq_work.c b/kernel/irq_work.c
79204index 55fcce6..0e4cf34 100644
79205--- a/kernel/irq_work.c
79206+++ b/kernel/irq_work.c
79207@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
79208 return NOTIFY_OK;
79209 }
79210
79211-static struct notifier_block cpu_notify;
79212+static struct notifier_block cpu_notify = {
79213+ .notifier_call = irq_work_cpu_notify,
79214+ .priority = 0,
79215+};
79216
79217 static __init int irq_work_init_cpu_notifier(void)
79218 {
79219- cpu_notify.notifier_call = irq_work_cpu_notify;
79220- cpu_notify.priority = 0;
79221 register_cpu_notifier(&cpu_notify);
79222 return 0;
79223 }
79224diff --git a/kernel/jump_label.c b/kernel/jump_label.c
79225index 60f48fa..7f3a770 100644
79226--- a/kernel/jump_label.c
79227+++ b/kernel/jump_label.c
79228@@ -13,6 +13,7 @@
79229 #include <linux/sort.h>
79230 #include <linux/err.h>
79231 #include <linux/static_key.h>
79232+#include <linux/mm.h>
79233
79234 #ifdef HAVE_JUMP_LABEL
79235
79236@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
79237
79238 size = (((unsigned long)stop - (unsigned long)start)
79239 / sizeof(struct jump_entry));
79240+ pax_open_kernel();
79241 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
79242+ pax_close_kernel();
79243 }
79244
79245 static void jump_label_update(struct static_key *key, int enable);
79246@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
79247 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
79248 struct jump_entry *iter;
79249
79250+ pax_open_kernel();
79251 for (iter = iter_start; iter < iter_stop; iter++) {
79252 if (within_module_init(iter->code, mod))
79253 iter->code = 0;
79254 }
79255+ pax_close_kernel();
79256 }
79257
79258 static int
79259diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
79260index 3127ad5..159d880 100644
79261--- a/kernel/kallsyms.c
79262+++ b/kernel/kallsyms.c
79263@@ -11,6 +11,9 @@
79264 * Changed the compression method from stem compression to "table lookup"
79265 * compression (see scripts/kallsyms.c for a more complete description)
79266 */
79267+#ifdef CONFIG_GRKERNSEC_HIDESYM
79268+#define __INCLUDED_BY_HIDESYM 1
79269+#endif
79270 #include <linux/kallsyms.h>
79271 #include <linux/module.h>
79272 #include <linux/init.h>
79273@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
79274
79275 static inline int is_kernel_inittext(unsigned long addr)
79276 {
79277+ if (system_state != SYSTEM_BOOTING)
79278+ return 0;
79279+
79280 if (addr >= (unsigned long)_sinittext
79281 && addr <= (unsigned long)_einittext)
79282 return 1;
79283 return 0;
79284 }
79285
79286+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79287+#ifdef CONFIG_MODULES
79288+static inline int is_module_text(unsigned long addr)
79289+{
79290+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
79291+ return 1;
79292+
79293+ addr = ktla_ktva(addr);
79294+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
79295+}
79296+#else
79297+static inline int is_module_text(unsigned long addr)
79298+{
79299+ return 0;
79300+}
79301+#endif
79302+#endif
79303+
79304 static inline int is_kernel_text(unsigned long addr)
79305 {
79306 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
79307@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
79308
79309 static inline int is_kernel(unsigned long addr)
79310 {
79311+
79312+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79313+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
79314+ return 1;
79315+
79316+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
79317+#else
79318 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
79319+#endif
79320+
79321 return 1;
79322 return in_gate_area_no_mm(addr);
79323 }
79324
79325 static int is_ksym_addr(unsigned long addr)
79326 {
79327+
79328+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79329+ if (is_module_text(addr))
79330+ return 0;
79331+#endif
79332+
79333 if (all_var)
79334 return is_kernel(addr);
79335
79336@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
79337
79338 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
79339 {
79340- iter->name[0] = '\0';
79341 iter->nameoff = get_symbol_offset(new_pos);
79342 iter->pos = new_pos;
79343 }
79344@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
79345 {
79346 struct kallsym_iter *iter = m->private;
79347
79348+#ifdef CONFIG_GRKERNSEC_HIDESYM
79349+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
79350+ return 0;
79351+#endif
79352+
79353 /* Some debugging symbols have no name. Ignore them. */
79354 if (!iter->name[0])
79355 return 0;
79356@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
79357 */
79358 type = iter->exported ? toupper(iter->type) :
79359 tolower(iter->type);
79360+
79361 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
79362 type, iter->name, iter->module_name);
79363 } else
79364@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
79365 struct kallsym_iter *iter;
79366 int ret;
79367
79368- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
79369+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
79370 if (!iter)
79371 return -ENOMEM;
79372 reset_iter(iter, 0);
79373diff --git a/kernel/kcmp.c b/kernel/kcmp.c
79374index e30ac0f..3528cac 100644
79375--- a/kernel/kcmp.c
79376+++ b/kernel/kcmp.c
79377@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
79378 struct task_struct *task1, *task2;
79379 int ret;
79380
79381+#ifdef CONFIG_GRKERNSEC
79382+ return -ENOSYS;
79383+#endif
79384+
79385 rcu_read_lock();
79386
79387 /*
79388diff --git a/kernel/kexec.c b/kernel/kexec.c
79389index 59f7b55..4022f65 100644
79390--- a/kernel/kexec.c
79391+++ b/kernel/kexec.c
79392@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
79393 unsigned long flags)
79394 {
79395 struct compat_kexec_segment in;
79396- struct kexec_segment out, __user *ksegments;
79397+ struct kexec_segment out;
79398+ struct kexec_segment __user *ksegments;
79399 unsigned long i, result;
79400
79401 /* Don't allow clients that don't understand the native
79402diff --git a/kernel/kmod.c b/kernel/kmod.c
79403index 8241906..d625f2c 100644
79404--- a/kernel/kmod.c
79405+++ b/kernel/kmod.c
79406@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
79407 kfree(info->argv);
79408 }
79409
79410-static int call_modprobe(char *module_name, int wait)
79411+static int call_modprobe(char *module_name, char *module_param, int wait)
79412 {
79413 struct subprocess_info *info;
79414 static char *envp[] = {
79415@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
79416 NULL
79417 };
79418
79419- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
79420+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
79421 if (!argv)
79422 goto out;
79423
79424@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
79425 argv[1] = "-q";
79426 argv[2] = "--";
79427 argv[3] = module_name; /* check free_modprobe_argv() */
79428- argv[4] = NULL;
79429+ argv[4] = module_param;
79430+ argv[5] = NULL;
79431
79432 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
79433 NULL, free_modprobe_argv, NULL);
79434@@ -129,9 +130,8 @@ out:
79435 * If module auto-loading support is disabled then this function
79436 * becomes a no-operation.
79437 */
79438-int __request_module(bool wait, const char *fmt, ...)
79439+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
79440 {
79441- va_list args;
79442 char module_name[MODULE_NAME_LEN];
79443 unsigned int max_modprobes;
79444 int ret;
79445@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
79446 */
79447 WARN_ON_ONCE(wait && current_is_async());
79448
79449- va_start(args, fmt);
79450- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
79451- va_end(args);
79452+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
79453 if (ret >= MODULE_NAME_LEN)
79454 return -ENAMETOOLONG;
79455
79456@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
79457 if (ret)
79458 return ret;
79459
79460+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79461+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
79462+ /* hack to workaround consolekit/udisks stupidity */
79463+ read_lock(&tasklist_lock);
79464+ if (!strcmp(current->comm, "mount") &&
79465+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
79466+ read_unlock(&tasklist_lock);
79467+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
79468+ return -EPERM;
79469+ }
79470+ read_unlock(&tasklist_lock);
79471+ }
79472+#endif
79473+
79474 /* If modprobe needs a service that is in a module, we get a recursive
79475 * loop. Limit the number of running kmod threads to max_threads/2 or
79476 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
79477@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
79478
79479 trace_module_request(module_name, wait, _RET_IP_);
79480
79481- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
79482+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
79483
79484 atomic_dec(&kmod_concurrent);
79485 return ret;
79486 }
79487+
79488+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
79489+{
79490+ va_list args;
79491+ int ret;
79492+
79493+ va_start(args, fmt);
79494+ ret = ____request_module(wait, module_param, fmt, args);
79495+ va_end(args);
79496+
79497+ return ret;
79498+}
79499+
79500+int __request_module(bool wait, const char *fmt, ...)
79501+{
79502+ va_list args;
79503+ int ret;
79504+
79505+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79506+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
79507+ char module_param[MODULE_NAME_LEN];
79508+
79509+ memset(module_param, 0, sizeof(module_param));
79510+
79511+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
79512+
79513+ va_start(args, fmt);
79514+ ret = ____request_module(wait, module_param, fmt, args);
79515+ va_end(args);
79516+
79517+ return ret;
79518+ }
79519+#endif
79520+
79521+ va_start(args, fmt);
79522+ ret = ____request_module(wait, NULL, fmt, args);
79523+ va_end(args);
79524+
79525+ return ret;
79526+}
79527+
79528 EXPORT_SYMBOL(__request_module);
79529 #endif /* CONFIG_MODULES */
79530
79531@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
79532 *
79533 * Thus the __user pointer cast is valid here.
79534 */
79535- sys_wait4(pid, (int __user *)&ret, 0, NULL);
79536+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
79537
79538 /*
79539 * If ret is 0, either ____call_usermodehelper failed and the
79540@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
79541 static int proc_cap_handler(struct ctl_table *table, int write,
79542 void __user *buffer, size_t *lenp, loff_t *ppos)
79543 {
79544- struct ctl_table t;
79545+ ctl_table_no_const t;
79546 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
79547 kernel_cap_t new_cap;
79548 int err, i;
79549diff --git a/kernel/kprobes.c b/kernel/kprobes.c
79550index bddf3b2..233bf40 100644
79551--- a/kernel/kprobes.c
79552+++ b/kernel/kprobes.c
79553@@ -31,6 +31,9 @@
79554 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
79555 * <prasanna@in.ibm.com> added function-return probes.
79556 */
79557+#ifdef CONFIG_GRKERNSEC_HIDESYM
79558+#define __INCLUDED_BY_HIDESYM 1
79559+#endif
79560 #include <linux/kprobes.h>
79561 #include <linux/hash.h>
79562 #include <linux/init.h>
79563@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
79564 * kernel image and loaded module images reside. This is required
79565 * so x86_64 can correctly handle the %rip-relative fixups.
79566 */
79567- kip->insns = module_alloc(PAGE_SIZE);
79568+ kip->insns = module_alloc_exec(PAGE_SIZE);
79569 if (!kip->insns) {
79570 kfree(kip);
79571 return NULL;
79572@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
79573 */
79574 if (!list_is_singular(&kip->list)) {
79575 list_del(&kip->list);
79576- module_free(NULL, kip->insns);
79577+ module_free_exec(NULL, kip->insns);
79578 kfree(kip);
79579 }
79580 return 1;
79581@@ -2083,7 +2086,7 @@ static int __init init_kprobes(void)
79582 {
79583 int i, err = 0;
79584 unsigned long offset = 0, size = 0;
79585- char *modname, namebuf[128];
79586+ char *modname, namebuf[KSYM_NAME_LEN];
79587 const char *symbol_name;
79588 void *addr;
79589 struct kprobe_blackpoint *kb;
79590@@ -2168,11 +2171,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
79591 kprobe_type = "k";
79592
79593 if (sym)
79594- seq_printf(pi, "%p %s %s+0x%x %s ",
79595+ seq_printf(pi, "%pK %s %s+0x%x %s ",
79596 p->addr, kprobe_type, sym, offset,
79597 (modname ? modname : " "));
79598 else
79599- seq_printf(pi, "%p %s %p ",
79600+ seq_printf(pi, "%pK %s %pK ",
79601 p->addr, kprobe_type, p->addr);
79602
79603 if (!pp)
79604@@ -2209,7 +2212,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
79605 const char *sym = NULL;
79606 unsigned int i = *(loff_t *) v;
79607 unsigned long offset = 0;
79608- char *modname, namebuf[128];
79609+ char *modname, namebuf[KSYM_NAME_LEN];
79610
79611 head = &kprobe_table[i];
79612 preempt_disable();
79613diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
79614index 6ada93c..dce7d5d 100644
79615--- a/kernel/ksysfs.c
79616+++ b/kernel/ksysfs.c
79617@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
79618 {
79619 if (count+1 > UEVENT_HELPER_PATH_LEN)
79620 return -ENOENT;
79621+ if (!capable(CAP_SYS_ADMIN))
79622+ return -EPERM;
79623 memcpy(uevent_helper, buf, count);
79624 uevent_helper[count] = '\0';
79625 if (count && uevent_helper[count-1] == '\n')
79626@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
79627 return count;
79628 }
79629
79630-static struct bin_attribute notes_attr = {
79631+static bin_attribute_no_const notes_attr __read_only = {
79632 .attr = {
79633 .name = "notes",
79634 .mode = S_IRUGO,
79635diff --git a/kernel/lockdep.c b/kernel/lockdep.c
79636index 1f3186b..bb7dbc6 100644
79637--- a/kernel/lockdep.c
79638+++ b/kernel/lockdep.c
79639@@ -596,6 +596,10 @@ static int static_obj(void *obj)
79640 end = (unsigned long) &_end,
79641 addr = (unsigned long) obj;
79642
79643+#ifdef CONFIG_PAX_KERNEXEC
79644+ start = ktla_ktva(start);
79645+#endif
79646+
79647 /*
79648 * static variable?
79649 */
79650@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
79651 if (!static_obj(lock->key)) {
79652 debug_locks_off();
79653 printk("INFO: trying to register non-static key.\n");
79654+ printk("lock:%pS key:%pS.\n", lock, lock->key);
79655 printk("the code is fine but needs lockdep annotation.\n");
79656 printk("turning off the locking correctness validator.\n");
79657 dump_stack();
79658@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
79659 if (!class)
79660 return 0;
79661 }
79662- atomic_inc((atomic_t *)&class->ops);
79663+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
79664 if (very_verbose(class)) {
79665 printk("\nacquire class [%p] %s", class->key, class->name);
79666 if (class->name_version > 1)
79667diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
79668index b2c71c5..7b88d63 100644
79669--- a/kernel/lockdep_proc.c
79670+++ b/kernel/lockdep_proc.c
79671@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
79672 return 0;
79673 }
79674
79675- seq_printf(m, "%p", class->key);
79676+ seq_printf(m, "%pK", class->key);
79677 #ifdef CONFIG_DEBUG_LOCKDEP
79678 seq_printf(m, " OPS:%8ld", class->ops);
79679 #endif
79680@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
79681
79682 list_for_each_entry(entry, &class->locks_after, entry) {
79683 if (entry->distance == 1) {
79684- seq_printf(m, " -> [%p] ", entry->class->key);
79685+ seq_printf(m, " -> [%pK] ", entry->class->key);
79686 print_name(m, entry->class);
79687 seq_puts(m, "\n");
79688 }
79689@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
79690 if (!class->key)
79691 continue;
79692
79693- seq_printf(m, "[%p] ", class->key);
79694+ seq_printf(m, "[%pK] ", class->key);
79695 print_name(m, class);
79696 seq_puts(m, "\n");
79697 }
79698@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
79699 if (!i)
79700 seq_line(m, '-', 40-namelen, namelen);
79701
79702- snprintf(ip, sizeof(ip), "[<%p>]",
79703+ snprintf(ip, sizeof(ip), "[<%pK>]",
79704 (void *)class->contention_point[i]);
79705 seq_printf(m, "%40s %14lu %29s %pS\n",
79706 name, stats->contention_point[i],
79707@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
79708 if (!i)
79709 seq_line(m, '-', 40-namelen, namelen);
79710
79711- snprintf(ip, sizeof(ip), "[<%p>]",
79712+ snprintf(ip, sizeof(ip), "[<%pK>]",
79713 (void *)class->contending_point[i]);
79714 seq_printf(m, "%40s %14lu %29s %pS\n",
79715 name, stats->contending_point[i],
79716diff --git a/kernel/module.c b/kernel/module.c
79717index fa53db8..6f17200 100644
79718--- a/kernel/module.c
79719+++ b/kernel/module.c
79720@@ -61,6 +61,7 @@
79721 #include <linux/pfn.h>
79722 #include <linux/bsearch.h>
79723 #include <linux/fips.h>
79724+#include <linux/grsecurity.h>
79725 #include <uapi/linux/module.h>
79726 #include "module-internal.h"
79727
79728@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
79729
79730 /* Bounds of module allocation, for speeding __module_address.
79731 * Protected by module_mutex. */
79732-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
79733+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
79734+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
79735
79736 int register_module_notifier(struct notifier_block * nb)
79737 {
79738@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
79739 return true;
79740
79741 list_for_each_entry_rcu(mod, &modules, list) {
79742- struct symsearch arr[] = {
79743+ struct symsearch modarr[] = {
79744 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
79745 NOT_GPL_ONLY, false },
79746 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
79747@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
79748 if (mod->state == MODULE_STATE_UNFORMED)
79749 continue;
79750
79751- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
79752+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
79753 return true;
79754 }
79755 return false;
79756@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
79757 static int percpu_modalloc(struct module *mod,
79758 unsigned long size, unsigned long align)
79759 {
79760- if (align > PAGE_SIZE) {
79761+ if (align-1 >= PAGE_SIZE) {
79762 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
79763 mod->name, align, PAGE_SIZE);
79764 align = PAGE_SIZE;
79765@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
79766 static ssize_t show_coresize(struct module_attribute *mattr,
79767 struct module_kobject *mk, char *buffer)
79768 {
79769- return sprintf(buffer, "%u\n", mk->mod->core_size);
79770+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
79771 }
79772
79773 static struct module_attribute modinfo_coresize =
79774@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
79775 static ssize_t show_initsize(struct module_attribute *mattr,
79776 struct module_kobject *mk, char *buffer)
79777 {
79778- return sprintf(buffer, "%u\n", mk->mod->init_size);
79779+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
79780 }
79781
79782 static struct module_attribute modinfo_initsize =
79783@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
79784 */
79785 #ifdef CONFIG_SYSFS
79786
79787-#ifdef CONFIG_KALLSYMS
79788+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
79789 static inline bool sect_empty(const Elf_Shdr *sect)
79790 {
79791 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
79792@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
79793 {
79794 unsigned int notes, loaded, i;
79795 struct module_notes_attrs *notes_attrs;
79796- struct bin_attribute *nattr;
79797+ bin_attribute_no_const *nattr;
79798
79799 /* failed to create section attributes, so can't create notes */
79800 if (!mod->sect_attrs)
79801@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
79802 static int module_add_modinfo_attrs(struct module *mod)
79803 {
79804 struct module_attribute *attr;
79805- struct module_attribute *temp_attr;
79806+ module_attribute_no_const *temp_attr;
79807 int error = 0;
79808 int i;
79809
79810@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
79811
79812 static void unset_module_core_ro_nx(struct module *mod)
79813 {
79814- set_page_attributes(mod->module_core + mod->core_text_size,
79815- mod->module_core + mod->core_size,
79816+ set_page_attributes(mod->module_core_rw,
79817+ mod->module_core_rw + mod->core_size_rw,
79818 set_memory_x);
79819- set_page_attributes(mod->module_core,
79820- mod->module_core + mod->core_ro_size,
79821+ set_page_attributes(mod->module_core_rx,
79822+ mod->module_core_rx + mod->core_size_rx,
79823 set_memory_rw);
79824 }
79825
79826 static void unset_module_init_ro_nx(struct module *mod)
79827 {
79828- set_page_attributes(mod->module_init + mod->init_text_size,
79829- mod->module_init + mod->init_size,
79830+ set_page_attributes(mod->module_init_rw,
79831+ mod->module_init_rw + mod->init_size_rw,
79832 set_memory_x);
79833- set_page_attributes(mod->module_init,
79834- mod->module_init + mod->init_ro_size,
79835+ set_page_attributes(mod->module_init_rx,
79836+ mod->module_init_rx + mod->init_size_rx,
79837 set_memory_rw);
79838 }
79839
79840@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
79841 list_for_each_entry_rcu(mod, &modules, list) {
79842 if (mod->state == MODULE_STATE_UNFORMED)
79843 continue;
79844- if ((mod->module_core) && (mod->core_text_size)) {
79845- set_page_attributes(mod->module_core,
79846- mod->module_core + mod->core_text_size,
79847+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
79848+ set_page_attributes(mod->module_core_rx,
79849+ mod->module_core_rx + mod->core_size_rx,
79850 set_memory_rw);
79851 }
79852- if ((mod->module_init) && (mod->init_text_size)) {
79853- set_page_attributes(mod->module_init,
79854- mod->module_init + mod->init_text_size,
79855+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
79856+ set_page_attributes(mod->module_init_rx,
79857+ mod->module_init_rx + mod->init_size_rx,
79858 set_memory_rw);
79859 }
79860 }
79861@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
79862 list_for_each_entry_rcu(mod, &modules, list) {
79863 if (mod->state == MODULE_STATE_UNFORMED)
79864 continue;
79865- if ((mod->module_core) && (mod->core_text_size)) {
79866- set_page_attributes(mod->module_core,
79867- mod->module_core + mod->core_text_size,
79868+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
79869+ set_page_attributes(mod->module_core_rx,
79870+ mod->module_core_rx + mod->core_size_rx,
79871 set_memory_ro);
79872 }
79873- if ((mod->module_init) && (mod->init_text_size)) {
79874- set_page_attributes(mod->module_init,
79875- mod->module_init + mod->init_text_size,
79876+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
79877+ set_page_attributes(mod->module_init_rx,
79878+ mod->module_init_rx + mod->init_size_rx,
79879 set_memory_ro);
79880 }
79881 }
79882@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
79883
79884 /* This may be NULL, but that's OK */
79885 unset_module_init_ro_nx(mod);
79886- module_free(mod, mod->module_init);
79887+ module_free(mod, mod->module_init_rw);
79888+ module_free_exec(mod, mod->module_init_rx);
79889 kfree(mod->args);
79890 percpu_modfree(mod);
79891
79892 /* Free lock-classes: */
79893- lockdep_free_key_range(mod->module_core, mod->core_size);
79894+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
79895+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
79896
79897 /* Finally, free the core (containing the module structure) */
79898 unset_module_core_ro_nx(mod);
79899- module_free(mod, mod->module_core);
79900+ module_free_exec(mod, mod->module_core_rx);
79901+ module_free(mod, mod->module_core_rw);
79902
79903 #ifdef CONFIG_MPU
79904 update_protections(current->mm);
79905@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
79906 int ret = 0;
79907 const struct kernel_symbol *ksym;
79908
79909+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79910+ int is_fs_load = 0;
79911+ int register_filesystem_found = 0;
79912+ char *p;
79913+
79914+ p = strstr(mod->args, "grsec_modharden_fs");
79915+ if (p) {
79916+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
79917+ /* copy \0 as well */
79918+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
79919+ is_fs_load = 1;
79920+ }
79921+#endif
79922+
79923 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
79924 const char *name = info->strtab + sym[i].st_name;
79925
79926+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79927+ /* it's a real shame this will never get ripped and copied
79928+ upstream! ;(
79929+ */
79930+ if (is_fs_load && !strcmp(name, "register_filesystem"))
79931+ register_filesystem_found = 1;
79932+#endif
79933+
79934 switch (sym[i].st_shndx) {
79935 case SHN_COMMON:
79936 /* We compiled with -fno-common. These are not
79937@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
79938 ksym = resolve_symbol_wait(mod, info, name);
79939 /* Ok if resolved. */
79940 if (ksym && !IS_ERR(ksym)) {
79941+ pax_open_kernel();
79942 sym[i].st_value = ksym->value;
79943+ pax_close_kernel();
79944 break;
79945 }
79946
79947@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
79948 secbase = (unsigned long)mod_percpu(mod);
79949 else
79950 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
79951+ pax_open_kernel();
79952 sym[i].st_value += secbase;
79953+ pax_close_kernel();
79954 break;
79955 }
79956 }
79957
79958+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79959+ if (is_fs_load && !register_filesystem_found) {
79960+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
79961+ ret = -EPERM;
79962+ }
79963+#endif
79964+
79965 return ret;
79966 }
79967
79968@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
79969 || s->sh_entsize != ~0UL
79970 || strstarts(sname, ".init"))
79971 continue;
79972- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
79973+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
79974+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
79975+ else
79976+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
79977 pr_debug("\t%s\n", sname);
79978 }
79979- switch (m) {
79980- case 0: /* executable */
79981- mod->core_size = debug_align(mod->core_size);
79982- mod->core_text_size = mod->core_size;
79983- break;
79984- case 1: /* RO: text and ro-data */
79985- mod->core_size = debug_align(mod->core_size);
79986- mod->core_ro_size = mod->core_size;
79987- break;
79988- case 3: /* whole core */
79989- mod->core_size = debug_align(mod->core_size);
79990- break;
79991- }
79992 }
79993
79994 pr_debug("Init section allocation order:\n");
79995@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
79996 || s->sh_entsize != ~0UL
79997 || !strstarts(sname, ".init"))
79998 continue;
79999- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
80000- | INIT_OFFSET_MASK);
80001+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
80002+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
80003+ else
80004+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
80005+ s->sh_entsize |= INIT_OFFSET_MASK;
80006 pr_debug("\t%s\n", sname);
80007 }
80008- switch (m) {
80009- case 0: /* executable */
80010- mod->init_size = debug_align(mod->init_size);
80011- mod->init_text_size = mod->init_size;
80012- break;
80013- case 1: /* RO: text and ro-data */
80014- mod->init_size = debug_align(mod->init_size);
80015- mod->init_ro_size = mod->init_size;
80016- break;
80017- case 3: /* whole init */
80018- mod->init_size = debug_align(mod->init_size);
80019- break;
80020- }
80021 }
80022 }
80023
80024@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
80025
80026 /* Put symbol section at end of init part of module. */
80027 symsect->sh_flags |= SHF_ALLOC;
80028- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
80029+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
80030 info->index.sym) | INIT_OFFSET_MASK;
80031 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
80032
80033@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
80034 }
80035
80036 /* Append room for core symbols at end of core part. */
80037- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
80038- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
80039- mod->core_size += strtab_size;
80040+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
80041+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
80042+ mod->core_size_rx += strtab_size;
80043
80044 /* Put string table section at end of init part of module. */
80045 strsect->sh_flags |= SHF_ALLOC;
80046- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
80047+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
80048 info->index.str) | INIT_OFFSET_MASK;
80049 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
80050 }
80051@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
80052 /* Make sure we get permanent strtab: don't use info->strtab. */
80053 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
80054
80055+ pax_open_kernel();
80056+
80057 /* Set types up while we still have access to sections. */
80058 for (i = 0; i < mod->num_symtab; i++)
80059 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
80060
80061- mod->core_symtab = dst = mod->module_core + info->symoffs;
80062- mod->core_strtab = s = mod->module_core + info->stroffs;
80063+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
80064+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
80065 src = mod->symtab;
80066 for (ndst = i = 0; i < mod->num_symtab; i++) {
80067 if (i == 0 ||
80068@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
80069 }
80070 }
80071 mod->core_num_syms = ndst;
80072+
80073+ pax_close_kernel();
80074 }
80075 #else
80076 static inline void layout_symtab(struct module *mod, struct load_info *info)
80077@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
80078 return vmalloc_exec(size);
80079 }
80080
80081-static void *module_alloc_update_bounds(unsigned long size)
80082+static void *module_alloc_update_bounds_rw(unsigned long size)
80083 {
80084 void *ret = module_alloc(size);
80085
80086 if (ret) {
80087 mutex_lock(&module_mutex);
80088 /* Update module bounds. */
80089- if ((unsigned long)ret < module_addr_min)
80090- module_addr_min = (unsigned long)ret;
80091- if ((unsigned long)ret + size > module_addr_max)
80092- module_addr_max = (unsigned long)ret + size;
80093+ if ((unsigned long)ret < module_addr_min_rw)
80094+ module_addr_min_rw = (unsigned long)ret;
80095+ if ((unsigned long)ret + size > module_addr_max_rw)
80096+ module_addr_max_rw = (unsigned long)ret + size;
80097+ mutex_unlock(&module_mutex);
80098+ }
80099+ return ret;
80100+}
80101+
80102+static void *module_alloc_update_bounds_rx(unsigned long size)
80103+{
80104+ void *ret = module_alloc_exec(size);
80105+
80106+ if (ret) {
80107+ mutex_lock(&module_mutex);
80108+ /* Update module bounds. */
80109+ if ((unsigned long)ret < module_addr_min_rx)
80110+ module_addr_min_rx = (unsigned long)ret;
80111+ if ((unsigned long)ret + size > module_addr_max_rx)
80112+ module_addr_max_rx = (unsigned long)ret + size;
80113 mutex_unlock(&module_mutex);
80114 }
80115 return ret;
80116@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
80117 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
80118 {
80119 const char *modmagic = get_modinfo(info, "vermagic");
80120+ const char *license = get_modinfo(info, "license");
80121 int err;
80122
80123+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
80124+ if (!license || !license_is_gpl_compatible(license))
80125+ return -ENOEXEC;
80126+#endif
80127+
80128 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
80129 modmagic = NULL;
80130
80131@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
80132 }
80133
80134 /* Set up license info based on the info section */
80135- set_license(mod, get_modinfo(info, "license"));
80136+ set_license(mod, license);
80137
80138 return 0;
80139 }
80140@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
80141 void *ptr;
80142
80143 /* Do the allocs. */
80144- ptr = module_alloc_update_bounds(mod->core_size);
80145+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
80146 /*
80147 * The pointer to this block is stored in the module structure
80148 * which is inside the block. Just mark it as not being a
80149@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
80150 if (!ptr)
80151 return -ENOMEM;
80152
80153- memset(ptr, 0, mod->core_size);
80154- mod->module_core = ptr;
80155+ memset(ptr, 0, mod->core_size_rw);
80156+ mod->module_core_rw = ptr;
80157
80158- if (mod->init_size) {
80159- ptr = module_alloc_update_bounds(mod->init_size);
80160+ if (mod->init_size_rw) {
80161+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
80162 /*
80163 * The pointer to this block is stored in the module structure
80164 * which is inside the block. This block doesn't need to be
80165@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
80166 */
80167 kmemleak_ignore(ptr);
80168 if (!ptr) {
80169- module_free(mod, mod->module_core);
80170+ module_free(mod, mod->module_core_rw);
80171 return -ENOMEM;
80172 }
80173- memset(ptr, 0, mod->init_size);
80174- mod->module_init = ptr;
80175+ memset(ptr, 0, mod->init_size_rw);
80176+ mod->module_init_rw = ptr;
80177 } else
80178- mod->module_init = NULL;
80179+ mod->module_init_rw = NULL;
80180+
80181+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
80182+ kmemleak_not_leak(ptr);
80183+ if (!ptr) {
80184+ if (mod->module_init_rw)
80185+ module_free(mod, mod->module_init_rw);
80186+ module_free(mod, mod->module_core_rw);
80187+ return -ENOMEM;
80188+ }
80189+
80190+ pax_open_kernel();
80191+ memset(ptr, 0, mod->core_size_rx);
80192+ pax_close_kernel();
80193+ mod->module_core_rx = ptr;
80194+
80195+ if (mod->init_size_rx) {
80196+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
80197+ kmemleak_ignore(ptr);
80198+ if (!ptr && mod->init_size_rx) {
80199+ module_free_exec(mod, mod->module_core_rx);
80200+ if (mod->module_init_rw)
80201+ module_free(mod, mod->module_init_rw);
80202+ module_free(mod, mod->module_core_rw);
80203+ return -ENOMEM;
80204+ }
80205+
80206+ pax_open_kernel();
80207+ memset(ptr, 0, mod->init_size_rx);
80208+ pax_close_kernel();
80209+ mod->module_init_rx = ptr;
80210+ } else
80211+ mod->module_init_rx = NULL;
80212
80213 /* Transfer each section which specifies SHF_ALLOC */
80214 pr_debug("final section addresses:\n");
80215@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
80216 if (!(shdr->sh_flags & SHF_ALLOC))
80217 continue;
80218
80219- if (shdr->sh_entsize & INIT_OFFSET_MASK)
80220- dest = mod->module_init
80221- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
80222- else
80223- dest = mod->module_core + shdr->sh_entsize;
80224+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
80225+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
80226+ dest = mod->module_init_rw
80227+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
80228+ else
80229+ dest = mod->module_init_rx
80230+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
80231+ } else {
80232+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
80233+ dest = mod->module_core_rw + shdr->sh_entsize;
80234+ else
80235+ dest = mod->module_core_rx + shdr->sh_entsize;
80236+ }
80237+
80238+ if (shdr->sh_type != SHT_NOBITS) {
80239+
80240+#ifdef CONFIG_PAX_KERNEXEC
80241+#ifdef CONFIG_X86_64
80242+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
80243+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
80244+#endif
80245+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
80246+ pax_open_kernel();
80247+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
80248+ pax_close_kernel();
80249+ } else
80250+#endif
80251
80252- if (shdr->sh_type != SHT_NOBITS)
80253 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
80254+ }
80255 /* Update sh_addr to point to copy in image. */
80256- shdr->sh_addr = (unsigned long)dest;
80257+
80258+#ifdef CONFIG_PAX_KERNEXEC
80259+ if (shdr->sh_flags & SHF_EXECINSTR)
80260+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
80261+ else
80262+#endif
80263+
80264+ shdr->sh_addr = (unsigned long)dest;
80265 pr_debug("\t0x%lx %s\n",
80266 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
80267 }
80268@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
80269 * Do it before processing of module parameters, so the module
80270 * can provide parameter accessor functions of its own.
80271 */
80272- if (mod->module_init)
80273- flush_icache_range((unsigned long)mod->module_init,
80274- (unsigned long)mod->module_init
80275- + mod->init_size);
80276- flush_icache_range((unsigned long)mod->module_core,
80277- (unsigned long)mod->module_core + mod->core_size);
80278+ if (mod->module_init_rx)
80279+ flush_icache_range((unsigned long)mod->module_init_rx,
80280+ (unsigned long)mod->module_init_rx
80281+ + mod->init_size_rx);
80282+ flush_icache_range((unsigned long)mod->module_core_rx,
80283+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
80284
80285 set_fs(old_fs);
80286 }
80287@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
80288 static void module_deallocate(struct module *mod, struct load_info *info)
80289 {
80290 percpu_modfree(mod);
80291- module_free(mod, mod->module_init);
80292- module_free(mod, mod->module_core);
80293+ module_free_exec(mod, mod->module_init_rx);
80294+ module_free_exec(mod, mod->module_core_rx);
80295+ module_free(mod, mod->module_init_rw);
80296+ module_free(mod, mod->module_core_rw);
80297 }
80298
80299 int __weak module_finalize(const Elf_Ehdr *hdr,
80300@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
80301 static int post_relocation(struct module *mod, const struct load_info *info)
80302 {
80303 /* Sort exception table now relocations are done. */
80304+ pax_open_kernel();
80305 sort_extable(mod->extable, mod->extable + mod->num_exentries);
80306+ pax_close_kernel();
80307
80308 /* Copy relocated percpu area over. */
80309 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
80310@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
80311 MODULE_STATE_COMING, mod);
80312
80313 /* Set RO and NX regions for core */
80314- set_section_ro_nx(mod->module_core,
80315- mod->core_text_size,
80316- mod->core_ro_size,
80317- mod->core_size);
80318+ set_section_ro_nx(mod->module_core_rx,
80319+ mod->core_size_rx,
80320+ mod->core_size_rx,
80321+ mod->core_size_rx);
80322
80323 /* Set RO and NX regions for init */
80324- set_section_ro_nx(mod->module_init,
80325- mod->init_text_size,
80326- mod->init_ro_size,
80327- mod->init_size);
80328+ set_section_ro_nx(mod->module_init_rx,
80329+ mod->init_size_rx,
80330+ mod->init_size_rx,
80331+ mod->init_size_rx);
80332
80333 do_mod_ctors(mod);
80334 /* Start the module */
80335@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
80336 mod->strtab = mod->core_strtab;
80337 #endif
80338 unset_module_init_ro_nx(mod);
80339- module_free(mod, mod->module_init);
80340- mod->module_init = NULL;
80341- mod->init_size = 0;
80342- mod->init_ro_size = 0;
80343- mod->init_text_size = 0;
80344+ module_free(mod, mod->module_init_rw);
80345+ module_free_exec(mod, mod->module_init_rx);
80346+ mod->module_init_rw = NULL;
80347+ mod->module_init_rx = NULL;
80348+ mod->init_size_rw = 0;
80349+ mod->init_size_rx = 0;
80350 mutex_unlock(&module_mutex);
80351 wake_up_all(&module_wq);
80352
80353@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
80354 if (err)
80355 goto free_unload;
80356
80357+ /* Now copy in args */
80358+ mod->args = strndup_user(uargs, ~0UL >> 1);
80359+ if (IS_ERR(mod->args)) {
80360+ err = PTR_ERR(mod->args);
80361+ goto free_unload;
80362+ }
80363+
80364 /* Set up MODINFO_ATTR fields */
80365 setup_modinfo(mod, info);
80366
80367+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80368+ {
80369+ char *p, *p2;
80370+
80371+ if (strstr(mod->args, "grsec_modharden_netdev")) {
80372+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
80373+ err = -EPERM;
80374+ goto free_modinfo;
80375+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
80376+ p += sizeof("grsec_modharden_normal") - 1;
80377+ p2 = strstr(p, "_");
80378+ if (p2) {
80379+ *p2 = '\0';
80380+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
80381+ *p2 = '_';
80382+ }
80383+ err = -EPERM;
80384+ goto free_modinfo;
80385+ }
80386+ }
80387+#endif
80388+
80389 /* Fix up syms, so that st_value is a pointer to location. */
80390 err = simplify_symbols(mod, info);
80391 if (err < 0)
80392@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
80393
80394 flush_module_icache(mod);
80395
80396- /* Now copy in args */
80397- mod->args = strndup_user(uargs, ~0UL >> 1);
80398- if (IS_ERR(mod->args)) {
80399- err = PTR_ERR(mod->args);
80400- goto free_arch_cleanup;
80401- }
80402-
80403 dynamic_debug_setup(info->debug, info->num_debug);
80404
80405 /* Finally it's fully formed, ready to start executing. */
80406@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
80407 ddebug_cleanup:
80408 dynamic_debug_remove(info->debug);
80409 synchronize_sched();
80410- kfree(mod->args);
80411- free_arch_cleanup:
80412 module_arch_cleanup(mod);
80413 free_modinfo:
80414 free_modinfo(mod);
80415+ kfree(mod->args);
80416 free_unload:
80417 module_unload_free(mod);
80418 unlink_mod:
80419@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
80420 unsigned long nextval;
80421
80422 /* At worse, next value is at end of module */
80423- if (within_module_init(addr, mod))
80424- nextval = (unsigned long)mod->module_init+mod->init_text_size;
80425+ if (within_module_init_rx(addr, mod))
80426+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
80427+ else if (within_module_init_rw(addr, mod))
80428+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
80429+ else if (within_module_core_rx(addr, mod))
80430+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
80431+ else if (within_module_core_rw(addr, mod))
80432+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
80433 else
80434- nextval = (unsigned long)mod->module_core+mod->core_text_size;
80435+ return NULL;
80436
80437 /* Scan for closest preceding symbol, and next symbol. (ELF
80438 starts real symbols at 1). */
80439@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
80440 return 0;
80441
80442 seq_printf(m, "%s %u",
80443- mod->name, mod->init_size + mod->core_size);
80444+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
80445 print_unload_info(m, mod);
80446
80447 /* Informative for users. */
80448@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
80449 mod->state == MODULE_STATE_COMING ? "Loading":
80450 "Live");
80451 /* Used by oprofile and other similar tools. */
80452- seq_printf(m, " 0x%pK", mod->module_core);
80453+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
80454
80455 /* Taints info */
80456 if (mod->taints)
80457@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
80458
80459 static int __init proc_modules_init(void)
80460 {
80461+#ifndef CONFIG_GRKERNSEC_HIDESYM
80462+#ifdef CONFIG_GRKERNSEC_PROC_USER
80463+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
80464+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80465+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
80466+#else
80467 proc_create("modules", 0, NULL, &proc_modules_operations);
80468+#endif
80469+#else
80470+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
80471+#endif
80472 return 0;
80473 }
80474 module_init(proc_modules_init);
80475@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
80476 {
80477 struct module *mod;
80478
80479- if (addr < module_addr_min || addr > module_addr_max)
80480+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
80481+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
80482 return NULL;
80483
80484 list_for_each_entry_rcu(mod, &modules, list) {
80485 if (mod->state == MODULE_STATE_UNFORMED)
80486 continue;
80487- if (within_module_core(addr, mod)
80488- || within_module_init(addr, mod))
80489+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
80490 return mod;
80491 }
80492 return NULL;
80493@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
80494 */
80495 struct module *__module_text_address(unsigned long addr)
80496 {
80497- struct module *mod = __module_address(addr);
80498+ struct module *mod;
80499+
80500+#ifdef CONFIG_X86_32
80501+ addr = ktla_ktva(addr);
80502+#endif
80503+
80504+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
80505+ return NULL;
80506+
80507+ mod = __module_address(addr);
80508+
80509 if (mod) {
80510 /* Make sure it's within the text section. */
80511- if (!within(addr, mod->module_init, mod->init_text_size)
80512- && !within(addr, mod->module_core, mod->core_text_size))
80513+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
80514 mod = NULL;
80515 }
80516 return mod;
80517diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
80518index 7e3443f..b2a1e6b 100644
80519--- a/kernel/mutex-debug.c
80520+++ b/kernel/mutex-debug.c
80521@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
80522 }
80523
80524 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
80525- struct thread_info *ti)
80526+ struct task_struct *task)
80527 {
80528 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
80529
80530 /* Mark the current thread as blocked on the lock: */
80531- ti->task->blocked_on = waiter;
80532+ task->blocked_on = waiter;
80533 }
80534
80535 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
80536- struct thread_info *ti)
80537+ struct task_struct *task)
80538 {
80539 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
80540- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
80541- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
80542- ti->task->blocked_on = NULL;
80543+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
80544+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
80545+ task->blocked_on = NULL;
80546
80547 list_del_init(&waiter->list);
80548 waiter->task = NULL;
80549diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
80550index 0799fd3..d06ae3b 100644
80551--- a/kernel/mutex-debug.h
80552+++ b/kernel/mutex-debug.h
80553@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
80554 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
80555 extern void debug_mutex_add_waiter(struct mutex *lock,
80556 struct mutex_waiter *waiter,
80557- struct thread_info *ti);
80558+ struct task_struct *task);
80559 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
80560- struct thread_info *ti);
80561+ struct task_struct *task);
80562 extern void debug_mutex_unlock(struct mutex *lock);
80563 extern void debug_mutex_init(struct mutex *lock, const char *name,
80564 struct lock_class_key *key);
80565diff --git a/kernel/mutex.c b/kernel/mutex.c
80566index ad53a66..f1bf8bc 100644
80567--- a/kernel/mutex.c
80568+++ b/kernel/mutex.c
80569@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
80570 node->locked = 1;
80571 return;
80572 }
80573- ACCESS_ONCE(prev->next) = node;
80574+ ACCESS_ONCE_RW(prev->next) = node;
80575 smp_wmb();
80576 /* Wait until the lock holder passes the lock down */
80577 while (!ACCESS_ONCE(node->locked))
80578@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
80579 while (!(next = ACCESS_ONCE(node->next)))
80580 arch_mutex_cpu_relax();
80581 }
80582- ACCESS_ONCE(next->locked) = 1;
80583+ ACCESS_ONCE_RW(next->locked) = 1;
80584 smp_wmb();
80585 }
80586
80587@@ -341,7 +341,7 @@ slowpath:
80588 spin_lock_mutex(&lock->wait_lock, flags);
80589
80590 debug_mutex_lock_common(lock, &waiter);
80591- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
80592+ debug_mutex_add_waiter(lock, &waiter, task);
80593
80594 /* add waiting tasks to the end of the waitqueue (FIFO): */
80595 list_add_tail(&waiter.list, &lock->wait_list);
80596@@ -371,8 +371,7 @@ slowpath:
80597 * TASK_UNINTERRUPTIBLE case.)
80598 */
80599 if (unlikely(signal_pending_state(state, task))) {
80600- mutex_remove_waiter(lock, &waiter,
80601- task_thread_info(task));
80602+ mutex_remove_waiter(lock, &waiter, task);
80603 mutex_release(&lock->dep_map, 1, ip);
80604 spin_unlock_mutex(&lock->wait_lock, flags);
80605
80606@@ -391,7 +390,7 @@ slowpath:
80607 done:
80608 lock_acquired(&lock->dep_map, ip);
80609 /* got the lock - rejoice! */
80610- mutex_remove_waiter(lock, &waiter, current_thread_info());
80611+ mutex_remove_waiter(lock, &waiter, task);
80612 mutex_set_owner(lock);
80613
80614 /* set it to 0 if there are no waiters left: */
80615diff --git a/kernel/notifier.c b/kernel/notifier.c
80616index 2d5cc4c..d9ea600 100644
80617--- a/kernel/notifier.c
80618+++ b/kernel/notifier.c
80619@@ -5,6 +5,7 @@
80620 #include <linux/rcupdate.h>
80621 #include <linux/vmalloc.h>
80622 #include <linux/reboot.h>
80623+#include <linux/mm.h>
80624
80625 /*
80626 * Notifier list for kernel code which wants to be called
80627@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
80628 while ((*nl) != NULL) {
80629 if (n->priority > (*nl)->priority)
80630 break;
80631- nl = &((*nl)->next);
80632+ nl = (struct notifier_block **)&((*nl)->next);
80633 }
80634- n->next = *nl;
80635+ pax_open_kernel();
80636+ *(const void **)&n->next = *nl;
80637 rcu_assign_pointer(*nl, n);
80638+ pax_close_kernel();
80639 return 0;
80640 }
80641
80642@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
80643 return 0;
80644 if (n->priority > (*nl)->priority)
80645 break;
80646- nl = &((*nl)->next);
80647+ nl = (struct notifier_block **)&((*nl)->next);
80648 }
80649- n->next = *nl;
80650+ pax_open_kernel();
80651+ *(const void **)&n->next = *nl;
80652 rcu_assign_pointer(*nl, n);
80653+ pax_close_kernel();
80654 return 0;
80655 }
80656
80657@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
80658 {
80659 while ((*nl) != NULL) {
80660 if ((*nl) == n) {
80661+ pax_open_kernel();
80662 rcu_assign_pointer(*nl, n->next);
80663+ pax_close_kernel();
80664 return 0;
80665 }
80666- nl = &((*nl)->next);
80667+ nl = (struct notifier_block **)&((*nl)->next);
80668 }
80669 return -ENOENT;
80670 }
80671diff --git a/kernel/panic.c b/kernel/panic.c
80672index 167ec09..0dda5f9 100644
80673--- a/kernel/panic.c
80674+++ b/kernel/panic.c
80675@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
80676 unsigned taint, struct slowpath_args *args)
80677 {
80678 printk(KERN_WARNING "------------[ cut here ]------------\n");
80679- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
80680+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
80681
80682 if (args)
80683 vprintk(args->fmt, args->args);
80684@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
80685 */
80686 void __stack_chk_fail(void)
80687 {
80688- panic("stack-protector: Kernel stack is corrupted in: %p\n",
80689+ dump_stack();
80690+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
80691 __builtin_return_address(0));
80692 }
80693 EXPORT_SYMBOL(__stack_chk_fail);
80694diff --git a/kernel/pid.c b/kernel/pid.c
80695index 0db3e79..95b9dc2 100644
80696--- a/kernel/pid.c
80697+++ b/kernel/pid.c
80698@@ -33,6 +33,7 @@
80699 #include <linux/rculist.h>
80700 #include <linux/bootmem.h>
80701 #include <linux/hash.h>
80702+#include <linux/security.h>
80703 #include <linux/pid_namespace.h>
80704 #include <linux/init_task.h>
80705 #include <linux/syscalls.h>
80706@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
80707
80708 int pid_max = PID_MAX_DEFAULT;
80709
80710-#define RESERVED_PIDS 300
80711+#define RESERVED_PIDS 500
80712
80713 int pid_max_min = RESERVED_PIDS + 1;
80714 int pid_max_max = PID_MAX_LIMIT;
80715@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
80716 */
80717 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
80718 {
80719+ struct task_struct *task;
80720+
80721 rcu_lockdep_assert(rcu_read_lock_held(),
80722 "find_task_by_pid_ns() needs rcu_read_lock()"
80723 " protection");
80724- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
80725+
80726+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
80727+
80728+ if (gr_pid_is_chrooted(task))
80729+ return NULL;
80730+
80731+ return task;
80732 }
80733
80734 struct task_struct *find_task_by_vpid(pid_t vnr)
80735@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
80736 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
80737 }
80738
80739+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
80740+{
80741+ rcu_lockdep_assert(rcu_read_lock_held(),
80742+ "find_task_by_pid_ns() needs rcu_read_lock()"
80743+ " protection");
80744+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
80745+}
80746+
80747 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
80748 {
80749 struct pid *pid;
80750diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
80751index 6917e8e..9909aeb 100644
80752--- a/kernel/pid_namespace.c
80753+++ b/kernel/pid_namespace.c
80754@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
80755 void __user *buffer, size_t *lenp, loff_t *ppos)
80756 {
80757 struct pid_namespace *pid_ns = task_active_pid_ns(current);
80758- struct ctl_table tmp = *table;
80759+ ctl_table_no_const tmp = *table;
80760
80761 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
80762 return -EPERM;
80763diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
80764index 42670e9..8719c2f 100644
80765--- a/kernel/posix-cpu-timers.c
80766+++ b/kernel/posix-cpu-timers.c
80767@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
80768
80769 static __init int init_posix_cpu_timers(void)
80770 {
80771- struct k_clock process = {
80772+ static struct k_clock process = {
80773 .clock_getres = process_cpu_clock_getres,
80774 .clock_get = process_cpu_clock_get,
80775 .timer_create = process_cpu_timer_create,
80776 .nsleep = process_cpu_nsleep,
80777 .nsleep_restart = process_cpu_nsleep_restart,
80778 };
80779- struct k_clock thread = {
80780+ static struct k_clock thread = {
80781 .clock_getres = thread_cpu_clock_getres,
80782 .clock_get = thread_cpu_clock_get,
80783 .timer_create = thread_cpu_timer_create,
80784diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
80785index 424c2d4..679242f 100644
80786--- a/kernel/posix-timers.c
80787+++ b/kernel/posix-timers.c
80788@@ -43,6 +43,7 @@
80789 #include <linux/hash.h>
80790 #include <linux/posix-clock.h>
80791 #include <linux/posix-timers.h>
80792+#include <linux/grsecurity.h>
80793 #include <linux/syscalls.h>
80794 #include <linux/wait.h>
80795 #include <linux/workqueue.h>
80796@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
80797 * which we beg off on and pass to do_sys_settimeofday().
80798 */
80799
80800-static struct k_clock posix_clocks[MAX_CLOCKS];
80801+static struct k_clock *posix_clocks[MAX_CLOCKS];
80802
80803 /*
80804 * These ones are defined below.
80805@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
80806 */
80807 static __init int init_posix_timers(void)
80808 {
80809- struct k_clock clock_realtime = {
80810+ static struct k_clock clock_realtime = {
80811 .clock_getres = hrtimer_get_res,
80812 .clock_get = posix_clock_realtime_get,
80813 .clock_set = posix_clock_realtime_set,
80814@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
80815 .timer_get = common_timer_get,
80816 .timer_del = common_timer_del,
80817 };
80818- struct k_clock clock_monotonic = {
80819+ static struct k_clock clock_monotonic = {
80820 .clock_getres = hrtimer_get_res,
80821 .clock_get = posix_ktime_get_ts,
80822 .nsleep = common_nsleep,
80823@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
80824 .timer_get = common_timer_get,
80825 .timer_del = common_timer_del,
80826 };
80827- struct k_clock clock_monotonic_raw = {
80828+ static struct k_clock clock_monotonic_raw = {
80829 .clock_getres = hrtimer_get_res,
80830 .clock_get = posix_get_monotonic_raw,
80831 };
80832- struct k_clock clock_realtime_coarse = {
80833+ static struct k_clock clock_realtime_coarse = {
80834 .clock_getres = posix_get_coarse_res,
80835 .clock_get = posix_get_realtime_coarse,
80836 };
80837- struct k_clock clock_monotonic_coarse = {
80838+ static struct k_clock clock_monotonic_coarse = {
80839 .clock_getres = posix_get_coarse_res,
80840 .clock_get = posix_get_monotonic_coarse,
80841 };
80842- struct k_clock clock_tai = {
80843+ static struct k_clock clock_tai = {
80844 .clock_getres = hrtimer_get_res,
80845 .clock_get = posix_get_tai,
80846 .nsleep = common_nsleep,
80847@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
80848 .timer_get = common_timer_get,
80849 .timer_del = common_timer_del,
80850 };
80851- struct k_clock clock_boottime = {
80852+ static struct k_clock clock_boottime = {
80853 .clock_getres = hrtimer_get_res,
80854 .clock_get = posix_get_boottime,
80855 .nsleep = common_nsleep,
80856@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
80857 return;
80858 }
80859
80860- posix_clocks[clock_id] = *new_clock;
80861+ posix_clocks[clock_id] = new_clock;
80862 }
80863 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
80864
80865@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
80866 return (id & CLOCKFD_MASK) == CLOCKFD ?
80867 &clock_posix_dynamic : &clock_posix_cpu;
80868
80869- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
80870+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
80871 return NULL;
80872- return &posix_clocks[id];
80873+ return posix_clocks[id];
80874 }
80875
80876 static int common_timer_create(struct k_itimer *new_timer)
80877@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
80878 struct k_clock *kc = clockid_to_kclock(which_clock);
80879 struct k_itimer *new_timer;
80880 int error, new_timer_id;
80881- sigevent_t event;
80882+ sigevent_t event = { };
80883 int it_id_set = IT_ID_NOT_SET;
80884
80885 if (!kc)
80886@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
80887 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
80888 return -EFAULT;
80889
80890+ /* only the CLOCK_REALTIME clock can be set, all other clocks
80891+ have their clock_set fptr set to a nosettime dummy function
80892+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
80893+ call common_clock_set, which calls do_sys_settimeofday, which
80894+ we hook
80895+ */
80896+
80897 return kc->clock_set(which_clock, &new_tp);
80898 }
80899
80900diff --git a/kernel/power/process.c b/kernel/power/process.c
80901index 98088e0..aaf95c0 100644
80902--- a/kernel/power/process.c
80903+++ b/kernel/power/process.c
80904@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
80905 u64 elapsed_csecs64;
80906 unsigned int elapsed_csecs;
80907 bool wakeup = false;
80908+ bool timedout = false;
80909
80910 do_gettimeofday(&start);
80911
80912@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
80913
80914 while (true) {
80915 todo = 0;
80916+ if (time_after(jiffies, end_time))
80917+ timedout = true;
80918 read_lock(&tasklist_lock);
80919 do_each_thread(g, p) {
80920 if (p == current || !freeze_task(p))
80921 continue;
80922
80923- if (!freezer_should_skip(p))
80924+ if (!freezer_should_skip(p)) {
80925 todo++;
80926+ if (timedout) {
80927+ printk(KERN_ERR "Task refusing to freeze:\n");
80928+ sched_show_task(p);
80929+ }
80930+ }
80931 } while_each_thread(g, p);
80932 read_unlock(&tasklist_lock);
80933
80934@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
80935 todo += wq_busy;
80936 }
80937
80938- if (!todo || time_after(jiffies, end_time))
80939+ if (!todo || timedout)
80940 break;
80941
80942 if (pm_wakeup_pending()) {
80943diff --git a/kernel/printk.c b/kernel/printk.c
80944index d37d45c..ab918b3 100644
80945--- a/kernel/printk.c
80946+++ b/kernel/printk.c
80947@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
80948 if (from_file && type != SYSLOG_ACTION_OPEN)
80949 return 0;
80950
80951+#ifdef CONFIG_GRKERNSEC_DMESG
80952+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
80953+ return -EPERM;
80954+#endif
80955+
80956 if (syslog_action_restricted(type)) {
80957 if (capable(CAP_SYSLOG))
80958 return 0;
80959diff --git a/kernel/profile.c b/kernel/profile.c
80960index 0bf4007..6234708 100644
80961--- a/kernel/profile.c
80962+++ b/kernel/profile.c
80963@@ -37,7 +37,7 @@ struct profile_hit {
80964 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
80965 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
80966
80967-static atomic_t *prof_buffer;
80968+static atomic_unchecked_t *prof_buffer;
80969 static unsigned long prof_len, prof_shift;
80970
80971 int prof_on __read_mostly;
80972@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
80973 hits[i].pc = 0;
80974 continue;
80975 }
80976- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
80977+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
80978 hits[i].hits = hits[i].pc = 0;
80979 }
80980 }
80981@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
80982 * Add the current hit(s) and flush the write-queue out
80983 * to the global buffer:
80984 */
80985- atomic_add(nr_hits, &prof_buffer[pc]);
80986+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
80987 for (i = 0; i < NR_PROFILE_HIT; ++i) {
80988- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
80989+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
80990 hits[i].pc = hits[i].hits = 0;
80991 }
80992 out:
80993@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
80994 {
80995 unsigned long pc;
80996 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
80997- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
80998+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
80999 }
81000 #endif /* !CONFIG_SMP */
81001
81002@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
81003 return -EFAULT;
81004 buf++; p++; count--; read++;
81005 }
81006- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
81007+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
81008 if (copy_to_user(buf, (void *)pnt, count))
81009 return -EFAULT;
81010 read += count;
81011@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
81012 }
81013 #endif
81014 profile_discard_flip_buffers();
81015- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
81016+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
81017 return count;
81018 }
81019
81020diff --git a/kernel/ptrace.c b/kernel/ptrace.c
81021index 335a7ae..3bbbceb 100644
81022--- a/kernel/ptrace.c
81023+++ b/kernel/ptrace.c
81024@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
81025 if (seize)
81026 flags |= PT_SEIZED;
81027 rcu_read_lock();
81028- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
81029+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
81030 flags |= PT_PTRACE_CAP;
81031 rcu_read_unlock();
81032 task->ptrace = flags;
81033@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
81034 break;
81035 return -EIO;
81036 }
81037- if (copy_to_user(dst, buf, retval))
81038+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
81039 return -EFAULT;
81040 copied += retval;
81041 src += retval;
81042@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
81043 bool seized = child->ptrace & PT_SEIZED;
81044 int ret = -EIO;
81045 siginfo_t siginfo, *si;
81046- void __user *datavp = (void __user *) data;
81047+ void __user *datavp = (__force void __user *) data;
81048 unsigned long __user *datalp = datavp;
81049 unsigned long flags;
81050
81051@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
81052 goto out;
81053 }
81054
81055+ if (gr_handle_ptrace(child, request)) {
81056+ ret = -EPERM;
81057+ goto out_put_task_struct;
81058+ }
81059+
81060 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
81061 ret = ptrace_attach(child, request, addr, data);
81062 /*
81063 * Some architectures need to do book-keeping after
81064 * a ptrace attach.
81065 */
81066- if (!ret)
81067+ if (!ret) {
81068 arch_ptrace_attach(child);
81069+ gr_audit_ptrace(child);
81070+ }
81071 goto out_put_task_struct;
81072 }
81073
81074@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
81075 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
81076 if (copied != sizeof(tmp))
81077 return -EIO;
81078- return put_user(tmp, (unsigned long __user *)data);
81079+ return put_user(tmp, (__force unsigned long __user *)data);
81080 }
81081
81082 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
81083@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
81084 }
81085
81086 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81087- compat_long_t addr, compat_long_t data)
81088+ compat_ulong_t addr, compat_ulong_t data)
81089 {
81090 struct task_struct *child;
81091 long ret;
81092@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81093 goto out;
81094 }
81095
81096+ if (gr_handle_ptrace(child, request)) {
81097+ ret = -EPERM;
81098+ goto out_put_task_struct;
81099+ }
81100+
81101 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
81102 ret = ptrace_attach(child, request, addr, data);
81103 /*
81104 * Some architectures need to do book-keeping after
81105 * a ptrace attach.
81106 */
81107- if (!ret)
81108+ if (!ret) {
81109 arch_ptrace_attach(child);
81110+ gr_audit_ptrace(child);
81111+ }
81112 goto out_put_task_struct;
81113 }
81114
81115diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
81116index 48ab703..07561d4 100644
81117--- a/kernel/rcupdate.c
81118+++ b/kernel/rcupdate.c
81119@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
81120 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
81121 */
81122 if (till_stall_check < 3) {
81123- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
81124+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
81125 till_stall_check = 3;
81126 } else if (till_stall_check > 300) {
81127- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
81128+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
81129 till_stall_check = 300;
81130 }
81131 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
81132diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
81133index a0714a5..2ab5e34 100644
81134--- a/kernel/rcutiny.c
81135+++ b/kernel/rcutiny.c
81136@@ -46,7 +46,7 @@
81137 struct rcu_ctrlblk;
81138 static void invoke_rcu_callbacks(void);
81139 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
81140-static void rcu_process_callbacks(struct softirq_action *unused);
81141+static void rcu_process_callbacks(void);
81142 static void __call_rcu(struct rcu_head *head,
81143 void (*func)(struct rcu_head *rcu),
81144 struct rcu_ctrlblk *rcp);
81145@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
81146 rcu_is_callbacks_kthread()));
81147 }
81148
81149-static void rcu_process_callbacks(struct softirq_action *unused)
81150+static void rcu_process_callbacks(void)
81151 {
81152 __rcu_process_callbacks(&rcu_sched_ctrlblk);
81153 __rcu_process_callbacks(&rcu_bh_ctrlblk);
81154diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
81155index 8a23300..4255818 100644
81156--- a/kernel/rcutiny_plugin.h
81157+++ b/kernel/rcutiny_plugin.h
81158@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
81159 have_rcu_kthread_work = morework;
81160 local_irq_restore(flags);
81161 if (work)
81162- rcu_process_callbacks(NULL);
81163+ rcu_process_callbacks();
81164 schedule_timeout_interruptible(1); /* Leave CPU for others. */
81165 }
81166
81167diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
81168index e1f3a8c..42c94a2 100644
81169--- a/kernel/rcutorture.c
81170+++ b/kernel/rcutorture.c
81171@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
81172 { 0 };
81173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
81174 { 0 };
81175-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
81176-static atomic_t n_rcu_torture_alloc;
81177-static atomic_t n_rcu_torture_alloc_fail;
81178-static atomic_t n_rcu_torture_free;
81179-static atomic_t n_rcu_torture_mberror;
81180-static atomic_t n_rcu_torture_error;
81181+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
81182+static atomic_unchecked_t n_rcu_torture_alloc;
81183+static atomic_unchecked_t n_rcu_torture_alloc_fail;
81184+static atomic_unchecked_t n_rcu_torture_free;
81185+static atomic_unchecked_t n_rcu_torture_mberror;
81186+static atomic_unchecked_t n_rcu_torture_error;
81187 static long n_rcu_torture_barrier_error;
81188 static long n_rcu_torture_boost_ktrerror;
81189 static long n_rcu_torture_boost_rterror;
81190@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
81191
81192 spin_lock_bh(&rcu_torture_lock);
81193 if (list_empty(&rcu_torture_freelist)) {
81194- atomic_inc(&n_rcu_torture_alloc_fail);
81195+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
81196 spin_unlock_bh(&rcu_torture_lock);
81197 return NULL;
81198 }
81199- atomic_inc(&n_rcu_torture_alloc);
81200+ atomic_inc_unchecked(&n_rcu_torture_alloc);
81201 p = rcu_torture_freelist.next;
81202 list_del_init(p);
81203 spin_unlock_bh(&rcu_torture_lock);
81204@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
81205 static void
81206 rcu_torture_free(struct rcu_torture *p)
81207 {
81208- atomic_inc(&n_rcu_torture_free);
81209+ atomic_inc_unchecked(&n_rcu_torture_free);
81210 spin_lock_bh(&rcu_torture_lock);
81211 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
81212 spin_unlock_bh(&rcu_torture_lock);
81213@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
81214 i = rp->rtort_pipe_count;
81215 if (i > RCU_TORTURE_PIPE_LEN)
81216 i = RCU_TORTURE_PIPE_LEN;
81217- atomic_inc(&rcu_torture_wcount[i]);
81218+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
81219 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
81220 rp->rtort_mbtest = 0;
81221 rcu_torture_free(rp);
81222@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
81223 i = rp->rtort_pipe_count;
81224 if (i > RCU_TORTURE_PIPE_LEN)
81225 i = RCU_TORTURE_PIPE_LEN;
81226- atomic_inc(&rcu_torture_wcount[i]);
81227+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
81228 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
81229 rp->rtort_mbtest = 0;
81230 list_del(&rp->rtort_free);
81231@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
81232 i = old_rp->rtort_pipe_count;
81233 if (i > RCU_TORTURE_PIPE_LEN)
81234 i = RCU_TORTURE_PIPE_LEN;
81235- atomic_inc(&rcu_torture_wcount[i]);
81236+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
81237 old_rp->rtort_pipe_count++;
81238 cur_ops->deferred_free(old_rp);
81239 }
81240@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
81241 return;
81242 }
81243 if (p->rtort_mbtest == 0)
81244- atomic_inc(&n_rcu_torture_mberror);
81245+ atomic_inc_unchecked(&n_rcu_torture_mberror);
81246 spin_lock(&rand_lock);
81247 cur_ops->read_delay(&rand);
81248 n_rcu_torture_timers++;
81249@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
81250 continue;
81251 }
81252 if (p->rtort_mbtest == 0)
81253- atomic_inc(&n_rcu_torture_mberror);
81254+ atomic_inc_unchecked(&n_rcu_torture_mberror);
81255 cur_ops->read_delay(&rand);
81256 preempt_disable();
81257 pipe_count = p->rtort_pipe_count;
81258@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
81259 rcu_torture_current,
81260 rcu_torture_current_version,
81261 list_empty(&rcu_torture_freelist),
81262- atomic_read(&n_rcu_torture_alloc),
81263- atomic_read(&n_rcu_torture_alloc_fail),
81264- atomic_read(&n_rcu_torture_free));
81265+ atomic_read_unchecked(&n_rcu_torture_alloc),
81266+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
81267+ atomic_read_unchecked(&n_rcu_torture_free));
81268 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
81269- atomic_read(&n_rcu_torture_mberror),
81270+ atomic_read_unchecked(&n_rcu_torture_mberror),
81271 n_rcu_torture_boost_ktrerror,
81272 n_rcu_torture_boost_rterror);
81273 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
81274@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
81275 n_barrier_attempts,
81276 n_rcu_torture_barrier_error);
81277 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
81278- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
81279+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
81280 n_rcu_torture_barrier_error != 0 ||
81281 n_rcu_torture_boost_ktrerror != 0 ||
81282 n_rcu_torture_boost_rterror != 0 ||
81283 n_rcu_torture_boost_failure != 0 ||
81284 i > 1) {
81285 cnt += sprintf(&page[cnt], "!!! ");
81286- atomic_inc(&n_rcu_torture_error);
81287+ atomic_inc_unchecked(&n_rcu_torture_error);
81288 WARN_ON_ONCE(1);
81289 }
81290 cnt += sprintf(&page[cnt], "Reader Pipe: ");
81291@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
81292 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
81293 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
81294 cnt += sprintf(&page[cnt], " %d",
81295- atomic_read(&rcu_torture_wcount[i]));
81296+ atomic_read_unchecked(&rcu_torture_wcount[i]));
81297 }
81298 cnt += sprintf(&page[cnt], "\n");
81299 if (cur_ops->stats)
81300@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
81301
81302 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
81303
81304- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
81305+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
81306 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
81307 else if (n_online_successes != n_online_attempts ||
81308 n_offline_successes != n_offline_attempts)
81309@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
81310
81311 rcu_torture_current = NULL;
81312 rcu_torture_current_version = 0;
81313- atomic_set(&n_rcu_torture_alloc, 0);
81314- atomic_set(&n_rcu_torture_alloc_fail, 0);
81315- atomic_set(&n_rcu_torture_free, 0);
81316- atomic_set(&n_rcu_torture_mberror, 0);
81317- atomic_set(&n_rcu_torture_error, 0);
81318+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
81319+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
81320+ atomic_set_unchecked(&n_rcu_torture_free, 0);
81321+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
81322+ atomic_set_unchecked(&n_rcu_torture_error, 0);
81323 n_rcu_torture_barrier_error = 0;
81324 n_rcu_torture_boost_ktrerror = 0;
81325 n_rcu_torture_boost_rterror = 0;
81326 n_rcu_torture_boost_failure = 0;
81327 n_rcu_torture_boosts = 0;
81328 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
81329- atomic_set(&rcu_torture_wcount[i], 0);
81330+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
81331 for_each_possible_cpu(cpu) {
81332 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
81333 per_cpu(rcu_torture_count, cpu)[i] = 0;
81334diff --git a/kernel/rcutree.c b/kernel/rcutree.c
81335index 3538001..e379e0b 100644
81336--- a/kernel/rcutree.c
81337+++ b/kernel/rcutree.c
81338@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
81339 rcu_prepare_for_idle(smp_processor_id());
81340 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
81341 smp_mb__before_atomic_inc(); /* See above. */
81342- atomic_inc(&rdtp->dynticks);
81343+ atomic_inc_unchecked(&rdtp->dynticks);
81344 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
81345- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
81346+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
81347
81348 /*
81349 * It is illegal to enter an extended quiescent state while
81350@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
81351 int user)
81352 {
81353 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
81354- atomic_inc(&rdtp->dynticks);
81355+ atomic_inc_unchecked(&rdtp->dynticks);
81356 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
81357 smp_mb__after_atomic_inc(); /* See above. */
81358- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
81359+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
81360 rcu_cleanup_after_idle(smp_processor_id());
81361 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
81362 if (!user && !is_idle_task(current)) {
81363@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
81364 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
81365
81366 if (rdtp->dynticks_nmi_nesting == 0 &&
81367- (atomic_read(&rdtp->dynticks) & 0x1))
81368+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
81369 return;
81370 rdtp->dynticks_nmi_nesting++;
81371 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
81372- atomic_inc(&rdtp->dynticks);
81373+ atomic_inc_unchecked(&rdtp->dynticks);
81374 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
81375 smp_mb__after_atomic_inc(); /* See above. */
81376- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
81377+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
81378 }
81379
81380 /**
81381@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
81382 return;
81383 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
81384 smp_mb__before_atomic_inc(); /* See above. */
81385- atomic_inc(&rdtp->dynticks);
81386+ atomic_inc_unchecked(&rdtp->dynticks);
81387 smp_mb__after_atomic_inc(); /* Force delay to next write. */
81388- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
81389+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
81390 }
81391
81392 /**
81393@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
81394 int ret;
81395
81396 preempt_disable();
81397- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
81398+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
81399 preempt_enable();
81400 return ret;
81401 }
81402@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
81403 */
81404 static int dyntick_save_progress_counter(struct rcu_data *rdp)
81405 {
81406- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
81407+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
81408 return (rdp->dynticks_snap & 0x1) == 0;
81409 }
81410
81411@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
81412 unsigned int curr;
81413 unsigned int snap;
81414
81415- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
81416+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
81417 snap = (unsigned int)rdp->dynticks_snap;
81418
81419 /*
81420@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
81421 rdp = this_cpu_ptr(rsp->rda);
81422 rcu_preempt_check_blocked_tasks(rnp);
81423 rnp->qsmask = rnp->qsmaskinit;
81424- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
81425+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
81426 WARN_ON_ONCE(rnp->completed != rsp->completed);
81427- ACCESS_ONCE(rnp->completed) = rsp->completed;
81428+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
81429 if (rnp == rdp->mynode)
81430 rcu_start_gp_per_cpu(rsp, rnp, rdp);
81431 rcu_preempt_boost_start_gp(rnp);
81432@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
81433 */
81434 rcu_for_each_node_breadth_first(rsp, rnp) {
81435 raw_spin_lock_irq(&rnp->lock);
81436- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
81437+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
81438 rdp = this_cpu_ptr(rsp->rda);
81439 if (rnp == rdp->mynode)
81440 __rcu_process_gp_end(rsp, rnp, rdp);
81441@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
81442 rsp->qlen += rdp->qlen;
81443 rdp->n_cbs_orphaned += rdp->qlen;
81444 rdp->qlen_lazy = 0;
81445- ACCESS_ONCE(rdp->qlen) = 0;
81446+ ACCESS_ONCE_RW(rdp->qlen) = 0;
81447 }
81448
81449 /*
81450@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
81451 }
81452 smp_mb(); /* List handling before counting for rcu_barrier(). */
81453 rdp->qlen_lazy -= count_lazy;
81454- ACCESS_ONCE(rdp->qlen) -= count;
81455+ ACCESS_ONCE_RW(rdp->qlen) -= count;
81456 rdp->n_cbs_invoked += count;
81457
81458 /* Reinstate batch limit if we have worked down the excess. */
81459@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
81460 /*
81461 * Do RCU core processing for the current CPU.
81462 */
81463-static void rcu_process_callbacks(struct softirq_action *unused)
81464+static void rcu_process_callbacks(void)
81465 {
81466 struct rcu_state *rsp;
81467
81468@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
81469 local_irq_restore(flags);
81470 return;
81471 }
81472- ACCESS_ONCE(rdp->qlen)++;
81473+ ACCESS_ONCE_RW(rdp->qlen)++;
81474 if (lazy)
81475 rdp->qlen_lazy++;
81476 else
81477@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
81478 * counter wrap on a 32-bit system. Quite a few more CPUs would of
81479 * course be required on a 64-bit system.
81480 */
81481- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
81482+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
81483 (ulong)atomic_long_read(&rsp->expedited_done) +
81484 ULONG_MAX / 8)) {
81485 synchronize_sched();
81486- atomic_long_inc(&rsp->expedited_wrap);
81487+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
81488 return;
81489 }
81490
81491@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
81492 * Take a ticket. Note that atomic_inc_return() implies a
81493 * full memory barrier.
81494 */
81495- snap = atomic_long_inc_return(&rsp->expedited_start);
81496+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
81497 firstsnap = snap;
81498 get_online_cpus();
81499 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
81500@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
81501 synchronize_sched_expedited_cpu_stop,
81502 NULL) == -EAGAIN) {
81503 put_online_cpus();
81504- atomic_long_inc(&rsp->expedited_tryfail);
81505+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
81506
81507 /* Check to see if someone else did our work for us. */
81508 s = atomic_long_read(&rsp->expedited_done);
81509 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
81510 /* ensure test happens before caller kfree */
81511 smp_mb__before_atomic_inc(); /* ^^^ */
81512- atomic_long_inc(&rsp->expedited_workdone1);
81513+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
81514 return;
81515 }
81516
81517@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
81518 udelay(trycount * num_online_cpus());
81519 } else {
81520 wait_rcu_gp(call_rcu_sched);
81521- atomic_long_inc(&rsp->expedited_normal);
81522+ atomic_long_inc_unchecked(&rsp->expedited_normal);
81523 return;
81524 }
81525
81526@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
81527 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
81528 /* ensure test happens before caller kfree */
81529 smp_mb__before_atomic_inc(); /* ^^^ */
81530- atomic_long_inc(&rsp->expedited_workdone2);
81531+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
81532 return;
81533 }
81534
81535@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
81536 * period works for us.
81537 */
81538 get_online_cpus();
81539- snap = atomic_long_read(&rsp->expedited_start);
81540+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
81541 smp_mb(); /* ensure read is before try_stop_cpus(). */
81542 }
81543- atomic_long_inc(&rsp->expedited_stoppedcpus);
81544+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
81545
81546 /*
81547 * Everyone up to our most recent fetch is covered by our grace
81548@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
81549 * than we did already did their update.
81550 */
81551 do {
81552- atomic_long_inc(&rsp->expedited_done_tries);
81553+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
81554 s = atomic_long_read(&rsp->expedited_done);
81555 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
81556 /* ensure test happens before caller kfree */
81557 smp_mb__before_atomic_inc(); /* ^^^ */
81558- atomic_long_inc(&rsp->expedited_done_lost);
81559+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
81560 break;
81561 }
81562 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
81563- atomic_long_inc(&rsp->expedited_done_exit);
81564+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
81565
81566 put_online_cpus();
81567 }
81568@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
81569 * ACCESS_ONCE() to prevent the compiler from speculating
81570 * the increment to precede the early-exit check.
81571 */
81572- ACCESS_ONCE(rsp->n_barrier_done)++;
81573+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
81574 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
81575 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
81576 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
81577@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
81578
81579 /* Increment ->n_barrier_done to prevent duplicate work. */
81580 smp_mb(); /* Keep increment after above mechanism. */
81581- ACCESS_ONCE(rsp->n_barrier_done)++;
81582+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
81583 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
81584 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
81585 smp_mb(); /* Keep increment before caller's subsequent code. */
81586@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
81587 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
81588 init_callback_list(rdp);
81589 rdp->qlen_lazy = 0;
81590- ACCESS_ONCE(rdp->qlen) = 0;
81591+ ACCESS_ONCE_RW(rdp->qlen) = 0;
81592 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
81593 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
81594- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
81595+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
81596 rdp->cpu = cpu;
81597 rdp->rsp = rsp;
81598 rcu_boot_init_nocb_percpu_data(rdp);
81599@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
81600 rdp->blimit = blimit;
81601 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
81602 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
81603- atomic_set(&rdp->dynticks->dynticks,
81604- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
81605+ atomic_set_unchecked(&rdp->dynticks->dynticks,
81606+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
81607 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
81608
81609 /* Add CPU to rcu_node bitmasks. */
81610@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
81611 struct task_struct *t;
81612
81613 for_each_rcu_flavor(rsp) {
81614- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
81615+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
81616 BUG_ON(IS_ERR(t));
81617 rnp = rcu_get_root(rsp);
81618 raw_spin_lock_irqsave(&rnp->lock, flags);
81619diff --git a/kernel/rcutree.h b/kernel/rcutree.h
81620index 4df5034..5ee93f2 100644
81621--- a/kernel/rcutree.h
81622+++ b/kernel/rcutree.h
81623@@ -87,7 +87,7 @@ struct rcu_dynticks {
81624 long long dynticks_nesting; /* Track irq/process nesting level. */
81625 /* Process level is worth LLONG_MAX/2. */
81626 int dynticks_nmi_nesting; /* Track NMI nesting level. */
81627- atomic_t dynticks; /* Even value for idle, else odd. */
81628+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
81629 #ifdef CONFIG_RCU_FAST_NO_HZ
81630 bool all_lazy; /* Are all CPU's CBs lazy? */
81631 unsigned long nonlazy_posted;
81632@@ -414,17 +414,17 @@ struct rcu_state {
81633 /* _rcu_barrier(). */
81634 /* End of fields guarded by barrier_mutex. */
81635
81636- atomic_long_t expedited_start; /* Starting ticket. */
81637- atomic_long_t expedited_done; /* Done ticket. */
81638- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
81639- atomic_long_t expedited_tryfail; /* # acquisition failures. */
81640- atomic_long_t expedited_workdone1; /* # done by others #1. */
81641- atomic_long_t expedited_workdone2; /* # done by others #2. */
81642- atomic_long_t expedited_normal; /* # fallbacks to normal. */
81643- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
81644- atomic_long_t expedited_done_tries; /* # tries to update _done. */
81645- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
81646- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
81647+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
81648+ atomic_long_t expedited_done; /* Done ticket. */
81649+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
81650+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
81651+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
81652+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
81653+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
81654+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
81655+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
81656+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
81657+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
81658
81659 unsigned long jiffies_force_qs; /* Time at which to invoke */
81660 /* force_quiescent_state(). */
81661diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
81662index 3db5a37..b395fb35 100644
81663--- a/kernel/rcutree_plugin.h
81664+++ b/kernel/rcutree_plugin.h
81665@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
81666
81667 /* Clean up and exit. */
81668 smp_mb(); /* ensure expedited GP seen before counter increment. */
81669- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
81670+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
81671 unlock_mb_ret:
81672 mutex_unlock(&sync_rcu_preempt_exp_mutex);
81673 mb_ret:
81674@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
81675 free_cpumask_var(cm);
81676 }
81677
81678-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
81679+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
81680 .store = &rcu_cpu_kthread_task,
81681 .thread_should_run = rcu_cpu_kthread_should_run,
81682 .thread_fn = rcu_cpu_kthread,
81683@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
81684 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
81685 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
81686 cpu, ticks_value, ticks_title,
81687- atomic_read(&rdtp->dynticks) & 0xfff,
81688+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
81689 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
81690 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
81691 fast_no_hz);
81692@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
81693
81694 /* Enqueue the callback on the nocb list and update counts. */
81695 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
81696- ACCESS_ONCE(*old_rhpp) = rhp;
81697+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
81698 atomic_long_add(rhcount, &rdp->nocb_q_count);
81699 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
81700
81701@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
81702 * Extract queued callbacks, update counts, and wait
81703 * for a grace period to elapse.
81704 */
81705- ACCESS_ONCE(rdp->nocb_head) = NULL;
81706+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
81707 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
81708 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
81709 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
81710- ACCESS_ONCE(rdp->nocb_p_count) += c;
81711- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
81712+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
81713+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
81714 rcu_nocb_wait_gp(rdp);
81715
81716 /* Each pass through the following loop invokes a callback. */
81717@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
81718 list = next;
81719 }
81720 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
81721- ACCESS_ONCE(rdp->nocb_p_count) -= c;
81722- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
81723+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
81724+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
81725 rdp->n_nocbs_invoked += c;
81726 }
81727 return 0;
81728@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
81729 t = kthread_run(rcu_nocb_kthread, rdp,
81730 "rcuo%c/%d", rsp->abbr, cpu);
81731 BUG_ON(IS_ERR(t));
81732- ACCESS_ONCE(rdp->nocb_kthread) = t;
81733+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
81734 }
81735 }
81736
81737diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
81738index cf6c174..a8f4b50 100644
81739--- a/kernel/rcutree_trace.c
81740+++ b/kernel/rcutree_trace.c
81741@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
81742 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
81743 rdp->passed_quiesce, rdp->qs_pending);
81744 seq_printf(m, " dt=%d/%llx/%d df=%lu",
81745- atomic_read(&rdp->dynticks->dynticks),
81746+ atomic_read_unchecked(&rdp->dynticks->dynticks),
81747 rdp->dynticks->dynticks_nesting,
81748 rdp->dynticks->dynticks_nmi_nesting,
81749 rdp->dynticks_fqs);
81750@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
81751 struct rcu_state *rsp = (struct rcu_state *)m->private;
81752
81753 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
81754- atomic_long_read(&rsp->expedited_start),
81755+ atomic_long_read_unchecked(&rsp->expedited_start),
81756 atomic_long_read(&rsp->expedited_done),
81757- atomic_long_read(&rsp->expedited_wrap),
81758- atomic_long_read(&rsp->expedited_tryfail),
81759- atomic_long_read(&rsp->expedited_workdone1),
81760- atomic_long_read(&rsp->expedited_workdone2),
81761- atomic_long_read(&rsp->expedited_normal),
81762- atomic_long_read(&rsp->expedited_stoppedcpus),
81763- atomic_long_read(&rsp->expedited_done_tries),
81764- atomic_long_read(&rsp->expedited_done_lost),
81765- atomic_long_read(&rsp->expedited_done_exit));
81766+ atomic_long_read_unchecked(&rsp->expedited_wrap),
81767+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
81768+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
81769+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
81770+ atomic_long_read_unchecked(&rsp->expedited_normal),
81771+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
81772+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
81773+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
81774+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
81775 return 0;
81776 }
81777
81778diff --git a/kernel/resource.c b/kernel/resource.c
81779index d738698..5f8e60a 100644
81780--- a/kernel/resource.c
81781+++ b/kernel/resource.c
81782@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
81783
81784 static int __init ioresources_init(void)
81785 {
81786+#ifdef CONFIG_GRKERNSEC_PROC_ADD
81787+#ifdef CONFIG_GRKERNSEC_PROC_USER
81788+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
81789+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
81790+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81791+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
81792+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
81793+#endif
81794+#else
81795 proc_create("ioports", 0, NULL, &proc_ioports_operations);
81796 proc_create("iomem", 0, NULL, &proc_iomem_operations);
81797+#endif
81798 return 0;
81799 }
81800 __initcall(ioresources_init);
81801diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
81802index 1d96dd0..994ff19 100644
81803--- a/kernel/rtmutex-tester.c
81804+++ b/kernel/rtmutex-tester.c
81805@@ -22,7 +22,7 @@
81806 #define MAX_RT_TEST_MUTEXES 8
81807
81808 static spinlock_t rttest_lock;
81809-static atomic_t rttest_event;
81810+static atomic_unchecked_t rttest_event;
81811
81812 struct test_thread_data {
81813 int opcode;
81814@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81815
81816 case RTTEST_LOCKCONT:
81817 td->mutexes[td->opdata] = 1;
81818- td->event = atomic_add_return(1, &rttest_event);
81819+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81820 return 0;
81821
81822 case RTTEST_RESET:
81823@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81824 return 0;
81825
81826 case RTTEST_RESETEVENT:
81827- atomic_set(&rttest_event, 0);
81828+ atomic_set_unchecked(&rttest_event, 0);
81829 return 0;
81830
81831 default:
81832@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81833 return ret;
81834
81835 td->mutexes[id] = 1;
81836- td->event = atomic_add_return(1, &rttest_event);
81837+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81838 rt_mutex_lock(&mutexes[id]);
81839- td->event = atomic_add_return(1, &rttest_event);
81840+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81841 td->mutexes[id] = 4;
81842 return 0;
81843
81844@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81845 return ret;
81846
81847 td->mutexes[id] = 1;
81848- td->event = atomic_add_return(1, &rttest_event);
81849+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81850 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
81851- td->event = atomic_add_return(1, &rttest_event);
81852+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81853 td->mutexes[id] = ret ? 0 : 4;
81854 return ret ? -EINTR : 0;
81855
81856@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81857 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
81858 return ret;
81859
81860- td->event = atomic_add_return(1, &rttest_event);
81861+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81862 rt_mutex_unlock(&mutexes[id]);
81863- td->event = atomic_add_return(1, &rttest_event);
81864+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81865 td->mutexes[id] = 0;
81866 return 0;
81867
81868@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
81869 break;
81870
81871 td->mutexes[dat] = 2;
81872- td->event = atomic_add_return(1, &rttest_event);
81873+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81874 break;
81875
81876 default:
81877@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
81878 return;
81879
81880 td->mutexes[dat] = 3;
81881- td->event = atomic_add_return(1, &rttest_event);
81882+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81883 break;
81884
81885 case RTTEST_LOCKNOWAIT:
81886@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
81887 return;
81888
81889 td->mutexes[dat] = 1;
81890- td->event = atomic_add_return(1, &rttest_event);
81891+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81892 return;
81893
81894 default:
81895diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
81896index 64de5f8..7735e12 100644
81897--- a/kernel/sched/auto_group.c
81898+++ b/kernel/sched/auto_group.c
81899@@ -11,7 +11,7 @@
81900
81901 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
81902 static struct autogroup autogroup_default;
81903-static atomic_t autogroup_seq_nr;
81904+static atomic_unchecked_t autogroup_seq_nr;
81905
81906 void __init autogroup_init(struct task_struct *init_task)
81907 {
81908@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
81909
81910 kref_init(&ag->kref);
81911 init_rwsem(&ag->lock);
81912- ag->id = atomic_inc_return(&autogroup_seq_nr);
81913+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
81914 ag->tg = tg;
81915 #ifdef CONFIG_RT_GROUP_SCHED
81916 /*
81917diff --git a/kernel/sched/core.c b/kernel/sched/core.c
81918index e8b3350..d83d44e 100644
81919--- a/kernel/sched/core.c
81920+++ b/kernel/sched/core.c
81921@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
81922 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
81923 * positive (at least 1, or number of jiffies left till timeout) if completed.
81924 */
81925-long __sched
81926+long __sched __intentional_overflow(-1)
81927 wait_for_completion_interruptible_timeout(struct completion *x,
81928 unsigned long timeout)
81929 {
81930@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
81931 *
81932 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
81933 */
81934-int __sched wait_for_completion_killable(struct completion *x)
81935+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
81936 {
81937 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
81938 if (t == -ERESTARTSYS)
81939@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
81940 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
81941 * positive (at least 1, or number of jiffies left till timeout) if completed.
81942 */
81943-long __sched
81944+long __sched __intentional_overflow(-1)
81945 wait_for_completion_killable_timeout(struct completion *x,
81946 unsigned long timeout)
81947 {
81948@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
81949 /* convert nice value [19,-20] to rlimit style value [1,40] */
81950 int nice_rlim = 20 - nice;
81951
81952+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
81953+
81954 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
81955 capable(CAP_SYS_NICE));
81956 }
81957@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
81958 if (nice > 19)
81959 nice = 19;
81960
81961- if (increment < 0 && !can_nice(current, nice))
81962+ if (increment < 0 && (!can_nice(current, nice) ||
81963+ gr_handle_chroot_nice()))
81964 return -EPERM;
81965
81966 retval = security_task_setnice(current, nice);
81967@@ -3891,6 +3894,7 @@ recheck:
81968 unsigned long rlim_rtprio =
81969 task_rlimit(p, RLIMIT_RTPRIO);
81970
81971+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
81972 /* can't set/change the rt policy */
81973 if (policy != p->policy && !rlim_rtprio)
81974 return -EPERM;
81975@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
81976
81977 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
81978
81979-static struct ctl_table sd_ctl_dir[] = {
81980+static ctl_table_no_const sd_ctl_dir[] __read_only = {
81981 {
81982 .procname = "sched_domain",
81983 .mode = 0555,
81984@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
81985 {}
81986 };
81987
81988-static struct ctl_table *sd_alloc_ctl_entry(int n)
81989+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
81990 {
81991- struct ctl_table *entry =
81992+ ctl_table_no_const *entry =
81993 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
81994
81995 return entry;
81996 }
81997
81998-static void sd_free_ctl_entry(struct ctl_table **tablep)
81999+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
82000 {
82001- struct ctl_table *entry;
82002+ ctl_table_no_const *entry;
82003
82004 /*
82005 * In the intermediate directories, both the child directory and
82006@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
82007 * will always be set. In the lowest directory the names are
82008 * static strings and all have proc handlers.
82009 */
82010- for (entry = *tablep; entry->mode; entry++) {
82011- if (entry->child)
82012- sd_free_ctl_entry(&entry->child);
82013+ for (entry = tablep; entry->mode; entry++) {
82014+ if (entry->child) {
82015+ sd_free_ctl_entry(entry->child);
82016+ pax_open_kernel();
82017+ entry->child = NULL;
82018+ pax_close_kernel();
82019+ }
82020 if (entry->proc_handler == NULL)
82021 kfree(entry->procname);
82022 }
82023
82024- kfree(*tablep);
82025- *tablep = NULL;
82026+ kfree(tablep);
82027 }
82028
82029 static int min_load_idx = 0;
82030 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
82031
82032 static void
82033-set_table_entry(struct ctl_table *entry,
82034+set_table_entry(ctl_table_no_const *entry,
82035 const char *procname, void *data, int maxlen,
82036 umode_t mode, proc_handler *proc_handler,
82037 bool load_idx)
82038@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
82039 static struct ctl_table *
82040 sd_alloc_ctl_domain_table(struct sched_domain *sd)
82041 {
82042- struct ctl_table *table = sd_alloc_ctl_entry(13);
82043+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
82044
82045 if (table == NULL)
82046 return NULL;
82047@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
82048 return table;
82049 }
82050
82051-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
82052+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
82053 {
82054- struct ctl_table *entry, *table;
82055+ ctl_table_no_const *entry, *table;
82056 struct sched_domain *sd;
82057 int domain_num = 0, i;
82058 char buf[32];
82059@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
82060 static void register_sched_domain_sysctl(void)
82061 {
82062 int i, cpu_num = num_possible_cpus();
82063- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
82064+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
82065 char buf[32];
82066
82067 WARN_ON(sd_ctl_dir[0].child);
82068+ pax_open_kernel();
82069 sd_ctl_dir[0].child = entry;
82070+ pax_close_kernel();
82071
82072 if (entry == NULL)
82073 return;
82074@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
82075 if (sd_sysctl_header)
82076 unregister_sysctl_table(sd_sysctl_header);
82077 sd_sysctl_header = NULL;
82078- if (sd_ctl_dir[0].child)
82079- sd_free_ctl_entry(&sd_ctl_dir[0].child);
82080+ if (sd_ctl_dir[0].child) {
82081+ sd_free_ctl_entry(sd_ctl_dir[0].child);
82082+ pax_open_kernel();
82083+ sd_ctl_dir[0].child = NULL;
82084+ pax_close_kernel();
82085+ }
82086 }
82087 #else
82088 static void register_sched_domain_sysctl(void)
82089@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
82090 * happens before everything else. This has to be lower priority than
82091 * the notifier in the perf_event subsystem, though.
82092 */
82093-static struct notifier_block __cpuinitdata migration_notifier = {
82094+static struct notifier_block migration_notifier = {
82095 .notifier_call = migration_call,
82096 .priority = CPU_PRI_MIGRATION,
82097 };
82098diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
82099index c61a614..d7f3d7e 100644
82100--- a/kernel/sched/fair.c
82101+++ b/kernel/sched/fair.c
82102@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
82103
82104 static void reset_ptenuma_scan(struct task_struct *p)
82105 {
82106- ACCESS_ONCE(p->mm->numa_scan_seq)++;
82107+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
82108 p->mm->numa_scan_offset = 0;
82109 }
82110
82111@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
82112 * run_rebalance_domains is triggered when needed from the scheduler tick.
82113 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
82114 */
82115-static void run_rebalance_domains(struct softirq_action *h)
82116+static void run_rebalance_domains(void)
82117 {
82118 int this_cpu = smp_processor_id();
82119 struct rq *this_rq = cpu_rq(this_cpu);
82120diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
82121index ce39224d..0e09343 100644
82122--- a/kernel/sched/sched.h
82123+++ b/kernel/sched/sched.h
82124@@ -1009,7 +1009,7 @@ struct sched_class {
82125 #ifdef CONFIG_FAIR_GROUP_SCHED
82126 void (*task_move_group) (struct task_struct *p, int on_rq);
82127 #endif
82128-};
82129+} __do_const;
82130
82131 #define sched_class_highest (&stop_sched_class)
82132 #define for_each_class(class) \
82133diff --git a/kernel/signal.c b/kernel/signal.c
82134index 113411b..20d0a99 100644
82135--- a/kernel/signal.c
82136+++ b/kernel/signal.c
82137@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
82138
82139 int print_fatal_signals __read_mostly;
82140
82141-static void __user *sig_handler(struct task_struct *t, int sig)
82142+static __sighandler_t sig_handler(struct task_struct *t, int sig)
82143 {
82144 return t->sighand->action[sig - 1].sa.sa_handler;
82145 }
82146
82147-static int sig_handler_ignored(void __user *handler, int sig)
82148+static int sig_handler_ignored(__sighandler_t handler, int sig)
82149 {
82150 /* Is it explicitly or implicitly ignored? */
82151 return handler == SIG_IGN ||
82152@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
82153
82154 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
82155 {
82156- void __user *handler;
82157+ __sighandler_t handler;
82158
82159 handler = sig_handler(t, sig);
82160
82161@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
82162 atomic_inc(&user->sigpending);
82163 rcu_read_unlock();
82164
82165+ if (!override_rlimit)
82166+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
82167+
82168 if (override_rlimit ||
82169 atomic_read(&user->sigpending) <=
82170 task_rlimit(t, RLIMIT_SIGPENDING)) {
82171@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
82172
82173 int unhandled_signal(struct task_struct *tsk, int sig)
82174 {
82175- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
82176+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
82177 if (is_global_init(tsk))
82178 return 1;
82179 if (handler != SIG_IGN && handler != SIG_DFL)
82180@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
82181 }
82182 }
82183
82184+ /* allow glibc communication via tgkill to other threads in our
82185+ thread group */
82186+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
82187+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
82188+ && gr_handle_signal(t, sig))
82189+ return -EPERM;
82190+
82191 return security_task_kill(t, info, sig, 0);
82192 }
82193
82194@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
82195 return send_signal(sig, info, p, 1);
82196 }
82197
82198-static int
82199+int
82200 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
82201 {
82202 return send_signal(sig, info, t, 0);
82203@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
82204 unsigned long int flags;
82205 int ret, blocked, ignored;
82206 struct k_sigaction *action;
82207+ int is_unhandled = 0;
82208
82209 spin_lock_irqsave(&t->sighand->siglock, flags);
82210 action = &t->sighand->action[sig-1];
82211@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
82212 }
82213 if (action->sa.sa_handler == SIG_DFL)
82214 t->signal->flags &= ~SIGNAL_UNKILLABLE;
82215+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
82216+ is_unhandled = 1;
82217 ret = specific_send_sig_info(sig, info, t);
82218 spin_unlock_irqrestore(&t->sighand->siglock, flags);
82219
82220+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
82221+ normal operation */
82222+ if (is_unhandled) {
82223+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
82224+ gr_handle_crash(t, sig);
82225+ }
82226+
82227 return ret;
82228 }
82229
82230@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
82231 ret = check_kill_permission(sig, info, p);
82232 rcu_read_unlock();
82233
82234- if (!ret && sig)
82235+ if (!ret && sig) {
82236 ret = do_send_sig_info(sig, info, p, true);
82237+ if (!ret)
82238+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
82239+ }
82240
82241 return ret;
82242 }
82243@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
82244 int error = -ESRCH;
82245
82246 rcu_read_lock();
82247- p = find_task_by_vpid(pid);
82248+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82249+ /* allow glibc communication via tgkill to other threads in our
82250+ thread group */
82251+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
82252+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
82253+ p = find_task_by_vpid_unrestricted(pid);
82254+ else
82255+#endif
82256+ p = find_task_by_vpid(pid);
82257 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
82258 error = check_kill_permission(sig, info, p);
82259 /*
82260@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
82261 __put_user(t->sas_ss_size, &uss->ss_size);
82262 }
82263
82264+#ifdef CONFIG_X86
82265+void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
82266+{
82267+ struct task_struct *t = current;
82268+ put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
82269+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
82270+ put_user_ex(t->sas_ss_size, &uss->ss_size);
82271+}
82272+#endif
82273+
82274 #ifdef CONFIG_COMPAT
82275 COMPAT_SYSCALL_DEFINE2(sigaltstack,
82276 const compat_stack_t __user *, uss_ptr,
82277@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
82278 }
82279 seg = get_fs();
82280 set_fs(KERNEL_DS);
82281- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
82282- (stack_t __force __user *) &uoss,
82283+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
82284+ (stack_t __force_user *) &uoss,
82285 compat_user_stack_pointer());
82286 set_fs(seg);
82287 if (ret >= 0 && uoss_ptr) {
82288@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
82289 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
82290 __put_user(t->sas_ss_size, &uss->ss_size);
82291 }
82292+
82293+#ifdef CONFIG_X86
82294+void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
82295+{
82296+ struct task_struct *t = current;
82297+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
82298+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
82299+ put_user_ex(t->sas_ss_size, &uss->ss_size);
82300+}
82301+#endif
82302 #endif
82303
82304 #ifdef __ARCH_WANT_SYS_SIGPENDING
82305diff --git a/kernel/smp.c b/kernel/smp.c
82306index 4dba0f7..fe9f773 100644
82307--- a/kernel/smp.c
82308+++ b/kernel/smp.c
82309@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
82310 return NOTIFY_OK;
82311 }
82312
82313-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
82314+static struct notifier_block hotplug_cfd_notifier = {
82315 .notifier_call = hotplug_cfd,
82316 };
82317
82318diff --git a/kernel/smpboot.c b/kernel/smpboot.c
82319index 02fc5c9..e54c335 100644
82320--- a/kernel/smpboot.c
82321+++ b/kernel/smpboot.c
82322@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
82323 }
82324 smpboot_unpark_thread(plug_thread, cpu);
82325 }
82326- list_add(&plug_thread->list, &hotplug_threads);
82327+ pax_list_add(&plug_thread->list, &hotplug_threads);
82328 out:
82329 mutex_unlock(&smpboot_threads_lock);
82330 return ret;
82331@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
82332 {
82333 get_online_cpus();
82334 mutex_lock(&smpboot_threads_lock);
82335- list_del(&plug_thread->list);
82336+ pax_list_del(&plug_thread->list);
82337 smpboot_destroy_threads(plug_thread);
82338 mutex_unlock(&smpboot_threads_lock);
82339 put_online_cpus();
82340diff --git a/kernel/softirq.c b/kernel/softirq.c
82341index 3d6833f..da6d93d 100644
82342--- a/kernel/softirq.c
82343+++ b/kernel/softirq.c
82344@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
82345 EXPORT_SYMBOL(irq_stat);
82346 #endif
82347
82348-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
82349+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
82350
82351 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
82352
82353-char *softirq_to_name[NR_SOFTIRQS] = {
82354+const char * const softirq_to_name[NR_SOFTIRQS] = {
82355 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
82356 "TASKLET", "SCHED", "HRTIMER", "RCU"
82357 };
82358@@ -250,7 +250,7 @@ restart:
82359 kstat_incr_softirqs_this_cpu(vec_nr);
82360
82361 trace_softirq_entry(vec_nr);
82362- h->action(h);
82363+ h->action();
82364 trace_softirq_exit(vec_nr);
82365 if (unlikely(prev_count != preempt_count())) {
82366 printk(KERN_ERR "huh, entered softirq %u %s %p"
82367@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
82368 or_softirq_pending(1UL << nr);
82369 }
82370
82371-void open_softirq(int nr, void (*action)(struct softirq_action *))
82372+void __init open_softirq(int nr, void (*action)(void))
82373 {
82374 softirq_vec[nr].action = action;
82375 }
82376@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
82377
82378 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
82379
82380-static void tasklet_action(struct softirq_action *a)
82381+static void tasklet_action(void)
82382 {
82383 struct tasklet_struct *list;
82384
82385@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
82386 }
82387 }
82388
82389-static void tasklet_hi_action(struct softirq_action *a)
82390+static void tasklet_hi_action(void)
82391 {
82392 struct tasklet_struct *list;
82393
82394@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
82395 return NOTIFY_OK;
82396 }
82397
82398-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
82399+static struct notifier_block remote_softirq_cpu_notifier = {
82400 .notifier_call = remote_softirq_cpu_notify,
82401 };
82402
82403@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
82404 return NOTIFY_OK;
82405 }
82406
82407-static struct notifier_block __cpuinitdata cpu_nfb = {
82408+static struct notifier_block cpu_nfb = {
82409 .notifier_call = cpu_callback
82410 };
82411
82412-static struct smp_hotplug_thread softirq_threads = {
82413+static struct smp_hotplug_thread softirq_threads __read_only = {
82414 .store = &ksoftirqd,
82415 .thread_should_run = ksoftirqd_should_run,
82416 .thread_fn = run_ksoftirqd,
82417diff --git a/kernel/srcu.c b/kernel/srcu.c
82418index 01d5ccb..cdcbee6 100644
82419--- a/kernel/srcu.c
82420+++ b/kernel/srcu.c
82421@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
82422
82423 idx = ACCESS_ONCE(sp->completed) & 0x1;
82424 preempt_disable();
82425- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
82426+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
82427 smp_mb(); /* B */ /* Avoid leaking the critical section. */
82428- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
82429+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
82430 preempt_enable();
82431 return idx;
82432 }
82433diff --git a/kernel/sys.c b/kernel/sys.c
82434index 2bbd9a7..0875671 100644
82435--- a/kernel/sys.c
82436+++ b/kernel/sys.c
82437@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
82438 error = -EACCES;
82439 goto out;
82440 }
82441+
82442+ if (gr_handle_chroot_setpriority(p, niceval)) {
82443+ error = -EACCES;
82444+ goto out;
82445+ }
82446+
82447 no_nice = security_task_setnice(p, niceval);
82448 if (no_nice) {
82449 error = no_nice;
82450@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
82451 goto error;
82452 }
82453
82454+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
82455+ goto error;
82456+
82457 if (rgid != (gid_t) -1 ||
82458 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
82459 new->sgid = new->egid;
82460@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
82461 old = current_cred();
82462
82463 retval = -EPERM;
82464+
82465+ if (gr_check_group_change(kgid, kgid, kgid))
82466+ goto error;
82467+
82468 if (nsown_capable(CAP_SETGID))
82469 new->gid = new->egid = new->sgid = new->fsgid = kgid;
82470 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
82471@@ -678,7 +691,7 @@ error:
82472 /*
82473 * change the user struct in a credentials set to match the new UID
82474 */
82475-static int set_user(struct cred *new)
82476+int set_user(struct cred *new)
82477 {
82478 struct user_struct *new_user;
82479
82480@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
82481 goto error;
82482 }
82483
82484+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
82485+ goto error;
82486+
82487 if (!uid_eq(new->uid, old->uid)) {
82488 retval = set_user(new);
82489 if (retval < 0)
82490@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
82491 old = current_cred();
82492
82493 retval = -EPERM;
82494+
82495+ if (gr_check_crash_uid(kuid))
82496+ goto error;
82497+ if (gr_check_user_change(kuid, kuid, kuid))
82498+ goto error;
82499+
82500 if (nsown_capable(CAP_SETUID)) {
82501 new->suid = new->uid = kuid;
82502 if (!uid_eq(kuid, old->uid)) {
82503@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
82504 goto error;
82505 }
82506
82507+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
82508+ goto error;
82509+
82510 if (ruid != (uid_t) -1) {
82511 new->uid = kruid;
82512 if (!uid_eq(kruid, old->uid)) {
82513@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
82514 goto error;
82515 }
82516
82517+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
82518+ goto error;
82519+
82520 if (rgid != (gid_t) -1)
82521 new->gid = krgid;
82522 if (egid != (gid_t) -1)
82523@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
82524 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
82525 nsown_capable(CAP_SETUID)) {
82526 if (!uid_eq(kuid, old->fsuid)) {
82527+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
82528+ goto error;
82529+
82530 new->fsuid = kuid;
82531 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
82532 goto change_okay;
82533 }
82534 }
82535
82536+error:
82537 abort_creds(new);
82538 return old_fsuid;
82539
82540@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
82541 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
82542 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
82543 nsown_capable(CAP_SETGID)) {
82544+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
82545+ goto error;
82546+
82547 if (!gid_eq(kgid, old->fsgid)) {
82548 new->fsgid = kgid;
82549 goto change_okay;
82550 }
82551 }
82552
82553+error:
82554 abort_creds(new);
82555 return old_fsgid;
82556
82557@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
82558 return -EFAULT;
82559
82560 down_read(&uts_sem);
82561- error = __copy_to_user(&name->sysname, &utsname()->sysname,
82562+ error = __copy_to_user(name->sysname, &utsname()->sysname,
82563 __OLD_UTS_LEN);
82564 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
82565- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
82566+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
82567 __OLD_UTS_LEN);
82568 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
82569- error |= __copy_to_user(&name->release, &utsname()->release,
82570+ error |= __copy_to_user(name->release, &utsname()->release,
82571 __OLD_UTS_LEN);
82572 error |= __put_user(0, name->release + __OLD_UTS_LEN);
82573- error |= __copy_to_user(&name->version, &utsname()->version,
82574+ error |= __copy_to_user(name->version, &utsname()->version,
82575 __OLD_UTS_LEN);
82576 error |= __put_user(0, name->version + __OLD_UTS_LEN);
82577- error |= __copy_to_user(&name->machine, &utsname()->machine,
82578+ error |= __copy_to_user(name->machine, &utsname()->machine,
82579 __OLD_UTS_LEN);
82580 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
82581 up_read(&uts_sem);
82582@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
82583 */
82584 new_rlim->rlim_cur = 1;
82585 }
82586+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
82587+ is changed to a lower value. Since tasks can be created by the same
82588+ user in between this limit change and an execve by this task, force
82589+ a recheck only for this task by setting PF_NPROC_EXCEEDED
82590+ */
82591+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
82592+ tsk->flags |= PF_NPROC_EXCEEDED;
82593 }
82594 if (!retval) {
82595 if (old_rlim)
82596diff --git a/kernel/sysctl.c b/kernel/sysctl.c
82597index 9edcf45..713c960 100644
82598--- a/kernel/sysctl.c
82599+++ b/kernel/sysctl.c
82600@@ -93,7 +93,6 @@
82601
82602
82603 #if defined(CONFIG_SYSCTL)
82604-
82605 /* External variables not in a header file. */
82606 extern int sysctl_overcommit_memory;
82607 extern int sysctl_overcommit_ratio;
82608@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
82609
82610 /* Constants used for minimum and maximum */
82611 #ifdef CONFIG_LOCKUP_DETECTOR
82612-static int sixty = 60;
82613-static int neg_one = -1;
82614+static int sixty __read_only = 60;
82615 #endif
82616
82617-static int zero;
82618-static int __maybe_unused one = 1;
82619-static int __maybe_unused two = 2;
82620-static int __maybe_unused three = 3;
82621-static unsigned long one_ul = 1;
82622-static int one_hundred = 100;
82623+static int neg_one __read_only = -1;
82624+static int zero __read_only = 0;
82625+static int __maybe_unused one __read_only = 1;
82626+static int __maybe_unused two __read_only = 2;
82627+static int __maybe_unused three __read_only = 3;
82628+static unsigned long one_ul __read_only = 1;
82629+static int one_hundred __read_only = 100;
82630 #ifdef CONFIG_PRINTK
82631-static int ten_thousand = 10000;
82632+static int ten_thousand __read_only = 10000;
82633 #endif
82634
82635 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
82636@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
82637 void __user *buffer, size_t *lenp, loff_t *ppos);
82638 #endif
82639
82640-#ifdef CONFIG_PRINTK
82641 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
82642 void __user *buffer, size_t *lenp, loff_t *ppos);
82643-#endif
82644
82645 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
82646 void __user *buffer, size_t *lenp, loff_t *ppos);
82647@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
82648
82649 #endif
82650
82651+extern struct ctl_table grsecurity_table[];
82652+
82653 static struct ctl_table kern_table[];
82654 static struct ctl_table vm_table[];
82655 static struct ctl_table fs_table[];
82656@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
82657 int sysctl_legacy_va_layout;
82658 #endif
82659
82660+#ifdef CONFIG_PAX_SOFTMODE
82661+static ctl_table pax_table[] = {
82662+ {
82663+ .procname = "softmode",
82664+ .data = &pax_softmode,
82665+ .maxlen = sizeof(unsigned int),
82666+ .mode = 0600,
82667+ .proc_handler = &proc_dointvec,
82668+ },
82669+
82670+ { }
82671+};
82672+#endif
82673+
82674 /* The default sysctl tables: */
82675
82676 static struct ctl_table sysctl_base_table[] = {
82677@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
82678 #endif
82679
82680 static struct ctl_table kern_table[] = {
82681+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
82682+ {
82683+ .procname = "grsecurity",
82684+ .mode = 0500,
82685+ .child = grsecurity_table,
82686+ },
82687+#endif
82688+
82689+#ifdef CONFIG_PAX_SOFTMODE
82690+ {
82691+ .procname = "pax",
82692+ .mode = 0500,
82693+ .child = pax_table,
82694+ },
82695+#endif
82696+
82697 {
82698 .procname = "sched_child_runs_first",
82699 .data = &sysctl_sched_child_runs_first,
82700@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
82701 .data = &modprobe_path,
82702 .maxlen = KMOD_PATH_LEN,
82703 .mode = 0644,
82704- .proc_handler = proc_dostring,
82705+ .proc_handler = proc_dostring_modpriv,
82706 },
82707 {
82708 .procname = "modules_disabled",
82709@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
82710 .extra1 = &zero,
82711 .extra2 = &one,
82712 },
82713+#endif
82714 {
82715 .procname = "kptr_restrict",
82716 .data = &kptr_restrict,
82717 .maxlen = sizeof(int),
82718 .mode = 0644,
82719 .proc_handler = proc_dointvec_minmax_sysadmin,
82720+#ifdef CONFIG_GRKERNSEC_HIDESYM
82721+ .extra1 = &two,
82722+#else
82723 .extra1 = &zero,
82724+#endif
82725 .extra2 = &two,
82726 },
82727-#endif
82728 {
82729 .procname = "ngroups_max",
82730 .data = &ngroups_max,
82731@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
82732 */
82733 {
82734 .procname = "perf_event_paranoid",
82735- .data = &sysctl_perf_event_paranoid,
82736- .maxlen = sizeof(sysctl_perf_event_paranoid),
82737+ .data = &sysctl_perf_event_legitimately_concerned,
82738+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
82739 .mode = 0644,
82740- .proc_handler = proc_dointvec,
82741+ /* go ahead, be a hero */
82742+ .proc_handler = proc_dointvec_minmax_sysadmin,
82743+ .extra1 = &neg_one,
82744+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
82745+ .extra2 = &three,
82746+#else
82747+ .extra2 = &two,
82748+#endif
82749 },
82750 {
82751 .procname = "perf_event_mlock_kb",
82752@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
82753 .proc_handler = proc_dointvec_minmax,
82754 .extra1 = &zero,
82755 },
82756+ {
82757+ .procname = "heap_stack_gap",
82758+ .data = &sysctl_heap_stack_gap,
82759+ .maxlen = sizeof(sysctl_heap_stack_gap),
82760+ .mode = 0644,
82761+ .proc_handler = proc_doulongvec_minmax,
82762+ },
82763 #else
82764 {
82765 .procname = "nr_trim_pages",
82766@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
82767 buffer, lenp, ppos);
82768 }
82769
82770+int proc_dostring_modpriv(struct ctl_table *table, int write,
82771+ void __user *buffer, size_t *lenp, loff_t *ppos)
82772+{
82773+ if (write && !capable(CAP_SYS_MODULE))
82774+ return -EPERM;
82775+
82776+ return _proc_do_string(table->data, table->maxlen, write,
82777+ buffer, lenp, ppos);
82778+}
82779+
82780 static size_t proc_skip_spaces(char **buf)
82781 {
82782 size_t ret;
82783@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
82784 len = strlen(tmp);
82785 if (len > *size)
82786 len = *size;
82787+ if (len > sizeof(tmp))
82788+ len = sizeof(tmp);
82789 if (copy_to_user(*buf, tmp, len))
82790 return -EFAULT;
82791 *size -= len;
82792@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
82793 static int proc_taint(struct ctl_table *table, int write,
82794 void __user *buffer, size_t *lenp, loff_t *ppos)
82795 {
82796- struct ctl_table t;
82797+ ctl_table_no_const t;
82798 unsigned long tmptaint = get_taint();
82799 int err;
82800
82801@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
82802 return err;
82803 }
82804
82805-#ifdef CONFIG_PRINTK
82806 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
82807 void __user *buffer, size_t *lenp, loff_t *ppos)
82808 {
82809@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
82810
82811 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
82812 }
82813-#endif
82814
82815 struct do_proc_dointvec_minmax_conv_param {
82816 int *min;
82817@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
82818 *i = val;
82819 } else {
82820 val = convdiv * (*i) / convmul;
82821- if (!first)
82822+ if (!first) {
82823 err = proc_put_char(&buffer, &left, '\t');
82824+ if (err)
82825+ break;
82826+ }
82827 err = proc_put_long(&buffer, &left, val, false);
82828 if (err)
82829 break;
82830@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
82831 return -ENOSYS;
82832 }
82833
82834+int proc_dostring_modpriv(struct ctl_table *table, int write,
82835+ void __user *buffer, size_t *lenp, loff_t *ppos)
82836+{
82837+ return -ENOSYS;
82838+}
82839+
82840 int proc_dointvec(struct ctl_table *table, int write,
82841 void __user *buffer, size_t *lenp, loff_t *ppos)
82842 {
82843@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
82844 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
82845 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
82846 EXPORT_SYMBOL(proc_dostring);
82847+EXPORT_SYMBOL(proc_dostring_modpriv);
82848 EXPORT_SYMBOL(proc_doulongvec_minmax);
82849 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
82850diff --git a/kernel/taskstats.c b/kernel/taskstats.c
82851index 145bb4d..b2aa969 100644
82852--- a/kernel/taskstats.c
82853+++ b/kernel/taskstats.c
82854@@ -28,9 +28,12 @@
82855 #include <linux/fs.h>
82856 #include <linux/file.h>
82857 #include <linux/pid_namespace.h>
82858+#include <linux/grsecurity.h>
82859 #include <net/genetlink.h>
82860 #include <linux/atomic.h>
82861
82862+extern int gr_is_taskstats_denied(int pid);
82863+
82864 /*
82865 * Maximum length of a cpumask that can be specified in
82866 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
82867@@ -570,6 +573,9 @@ err:
82868
82869 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
82870 {
82871+ if (gr_is_taskstats_denied(current->pid))
82872+ return -EACCES;
82873+
82874 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
82875 return cmd_attr_register_cpumask(info);
82876 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
82877diff --git a/kernel/time.c b/kernel/time.c
82878index d3617db..c98bbe9 100644
82879--- a/kernel/time.c
82880+++ b/kernel/time.c
82881@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
82882 return error;
82883
82884 if (tz) {
82885+ /* we log in do_settimeofday called below, so don't log twice
82886+ */
82887+ if (!tv)
82888+ gr_log_timechange();
82889+
82890 sys_tz = *tz;
82891 update_vsyscall_tz();
82892 if (firsttime) {
82893@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
82894 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
82895 * value to a scaled second value.
82896 */
82897-unsigned long
82898+unsigned long __intentional_overflow(-1)
82899 timespec_to_jiffies(const struct timespec *value)
82900 {
82901 unsigned long sec = value->tv_sec;
82902diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
82903index f11d83b..d016d91 100644
82904--- a/kernel/time/alarmtimer.c
82905+++ b/kernel/time/alarmtimer.c
82906@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
82907 struct platform_device *pdev;
82908 int error = 0;
82909 int i;
82910- struct k_clock alarm_clock = {
82911+ static struct k_clock alarm_clock = {
82912 .clock_getres = alarm_clock_getres,
82913 .clock_get = alarm_clock_get,
82914 .timer_create = alarm_timer_create,
82915diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
82916index baeeb5c..c22704a 100644
82917--- a/kernel/time/timekeeping.c
82918+++ b/kernel/time/timekeeping.c
82919@@ -15,6 +15,7 @@
82920 #include <linux/init.h>
82921 #include <linux/mm.h>
82922 #include <linux/sched.h>
82923+#include <linux/grsecurity.h>
82924 #include <linux/syscore_ops.h>
82925 #include <linux/clocksource.h>
82926 #include <linux/jiffies.h>
82927@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
82928 if (!timespec_valid_strict(tv))
82929 return -EINVAL;
82930
82931+ gr_log_timechange();
82932+
82933 raw_spin_lock_irqsave(&timekeeper_lock, flags);
82934 write_seqcount_begin(&timekeeper_seq);
82935
82936diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
82937index 3bdf283..cc68d83 100644
82938--- a/kernel/time/timer_list.c
82939+++ b/kernel/time/timer_list.c
82940@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
82941
82942 static void print_name_offset(struct seq_file *m, void *sym)
82943 {
82944+#ifdef CONFIG_GRKERNSEC_HIDESYM
82945+ SEQ_printf(m, "<%p>", NULL);
82946+#else
82947 char symname[KSYM_NAME_LEN];
82948
82949 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
82950 SEQ_printf(m, "<%pK>", sym);
82951 else
82952 SEQ_printf(m, "%s", symname);
82953+#endif
82954 }
82955
82956 static void
82957@@ -119,7 +123,11 @@ next_one:
82958 static void
82959 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
82960 {
82961+#ifdef CONFIG_GRKERNSEC_HIDESYM
82962+ SEQ_printf(m, " .base: %p\n", NULL);
82963+#else
82964 SEQ_printf(m, " .base: %pK\n", base);
82965+#endif
82966 SEQ_printf(m, " .index: %d\n",
82967 base->index);
82968 SEQ_printf(m, " .resolution: %Lu nsecs\n",
82969@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
82970 {
82971 struct proc_dir_entry *pe;
82972
82973+#ifdef CONFIG_GRKERNSEC_PROC_ADD
82974+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
82975+#else
82976 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
82977+#endif
82978 if (!pe)
82979 return -ENOMEM;
82980 return 0;
82981diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
82982index 0b537f2..40d6c20 100644
82983--- a/kernel/time/timer_stats.c
82984+++ b/kernel/time/timer_stats.c
82985@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
82986 static unsigned long nr_entries;
82987 static struct entry entries[MAX_ENTRIES];
82988
82989-static atomic_t overflow_count;
82990+static atomic_unchecked_t overflow_count;
82991
82992 /*
82993 * The entries are in a hash-table, for fast lookup:
82994@@ -140,7 +140,7 @@ static void reset_entries(void)
82995 nr_entries = 0;
82996 memset(entries, 0, sizeof(entries));
82997 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
82998- atomic_set(&overflow_count, 0);
82999+ atomic_set_unchecked(&overflow_count, 0);
83000 }
83001
83002 static struct entry *alloc_entry(void)
83003@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
83004 if (likely(entry))
83005 entry->count++;
83006 else
83007- atomic_inc(&overflow_count);
83008+ atomic_inc_unchecked(&overflow_count);
83009
83010 out_unlock:
83011 raw_spin_unlock_irqrestore(lock, flags);
83012@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
83013
83014 static void print_name_offset(struct seq_file *m, unsigned long addr)
83015 {
83016+#ifdef CONFIG_GRKERNSEC_HIDESYM
83017+ seq_printf(m, "<%p>", NULL);
83018+#else
83019 char symname[KSYM_NAME_LEN];
83020
83021 if (lookup_symbol_name(addr, symname) < 0)
83022- seq_printf(m, "<%p>", (void *)addr);
83023+ seq_printf(m, "<%pK>", (void *)addr);
83024 else
83025 seq_printf(m, "%s", symname);
83026+#endif
83027 }
83028
83029 static int tstats_show(struct seq_file *m, void *v)
83030@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
83031
83032 seq_puts(m, "Timer Stats Version: v0.2\n");
83033 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
83034- if (atomic_read(&overflow_count))
83035+ if (atomic_read_unchecked(&overflow_count))
83036 seq_printf(m, "Overflow: %d entries\n",
83037- atomic_read(&overflow_count));
83038+ atomic_read_unchecked(&overflow_count));
83039
83040 for (i = 0; i < nr_entries; i++) {
83041 entry = entries + i;
83042@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
83043 {
83044 struct proc_dir_entry *pe;
83045
83046+#ifdef CONFIG_GRKERNSEC_PROC_ADD
83047+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
83048+#else
83049 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
83050+#endif
83051 if (!pe)
83052 return -ENOMEM;
83053 return 0;
83054diff --git a/kernel/timer.c b/kernel/timer.c
83055index 15bc1b4..32da49c 100644
83056--- a/kernel/timer.c
83057+++ b/kernel/timer.c
83058@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
83059 /*
83060 * This function runs timers and the timer-tq in bottom half context.
83061 */
83062-static void run_timer_softirq(struct softirq_action *h)
83063+static void run_timer_softirq(void)
83064 {
83065 struct tvec_base *base = __this_cpu_read(tvec_bases);
83066
83067@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
83068 *
83069 * In all cases the return value is guaranteed to be non-negative.
83070 */
83071-signed long __sched schedule_timeout(signed long timeout)
83072+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
83073 {
83074 struct timer_list timer;
83075 unsigned long expire;
83076@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
83077 return NOTIFY_OK;
83078 }
83079
83080-static struct notifier_block __cpuinitdata timers_nb = {
83081+static struct notifier_block timers_nb = {
83082 .notifier_call = timer_cpu_notify,
83083 };
83084
83085diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
83086index b8b8560..75b1a09 100644
83087--- a/kernel/trace/blktrace.c
83088+++ b/kernel/trace/blktrace.c
83089@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
83090 struct blk_trace *bt = filp->private_data;
83091 char buf[16];
83092
83093- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
83094+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
83095
83096 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
83097 }
83098@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
83099 return 1;
83100
83101 bt = buf->chan->private_data;
83102- atomic_inc(&bt->dropped);
83103+ atomic_inc_unchecked(&bt->dropped);
83104 return 0;
83105 }
83106
83107@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
83108
83109 bt->dir = dir;
83110 bt->dev = dev;
83111- atomic_set(&bt->dropped, 0);
83112+ atomic_set_unchecked(&bt->dropped, 0);
83113
83114 ret = -EIO;
83115 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
83116diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
83117index 6c508ff..ee55a13 100644
83118--- a/kernel/trace/ftrace.c
83119+++ b/kernel/trace/ftrace.c
83120@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
83121 if (unlikely(ftrace_disabled))
83122 return 0;
83123
83124+ ret = ftrace_arch_code_modify_prepare();
83125+ FTRACE_WARN_ON(ret);
83126+ if (ret)
83127+ return 0;
83128+
83129 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
83130+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
83131 if (ret) {
83132 ftrace_bug(ret, ip);
83133- return 0;
83134 }
83135- return 1;
83136+ return ret ? 0 : 1;
83137 }
83138
83139 /*
83140@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
83141 if (!count)
83142 return 0;
83143
83144+ pax_open_kernel();
83145 sort(start, count, sizeof(*start),
83146 ftrace_cmp_ips, ftrace_swap_ips);
83147+ pax_close_kernel();
83148
83149 start_pg = ftrace_allocate_pages(count);
83150 if (!start_pg)
83151@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
83152 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
83153
83154 static int ftrace_graph_active;
83155-static struct notifier_block ftrace_suspend_notifier;
83156-
83157 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
83158 {
83159 return 0;
83160@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
83161 return NOTIFY_DONE;
83162 }
83163
83164+static struct notifier_block ftrace_suspend_notifier = {
83165+ .notifier_call = ftrace_suspend_notifier_call
83166+};
83167+
83168 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
83169 trace_func_graph_ent_t entryfunc)
83170 {
83171@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
83172 goto out;
83173 }
83174
83175- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
83176 register_pm_notifier(&ftrace_suspend_notifier);
83177
83178 ftrace_graph_active++;
83179diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
83180index e444ff8..438b8f4 100644
83181--- a/kernel/trace/ring_buffer.c
83182+++ b/kernel/trace/ring_buffer.c
83183@@ -352,9 +352,9 @@ struct buffer_data_page {
83184 */
83185 struct buffer_page {
83186 struct list_head list; /* list of buffer pages */
83187- local_t write; /* index for next write */
83188+ local_unchecked_t write; /* index for next write */
83189 unsigned read; /* index for next read */
83190- local_t entries; /* entries on this page */
83191+ local_unchecked_t entries; /* entries on this page */
83192 unsigned long real_end; /* real end of data */
83193 struct buffer_data_page *page; /* Actual data page */
83194 };
83195@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
83196 unsigned long last_overrun;
83197 local_t entries_bytes;
83198 local_t entries;
83199- local_t overrun;
83200- local_t commit_overrun;
83201+ local_unchecked_t overrun;
83202+ local_unchecked_t commit_overrun;
83203 local_t dropped_events;
83204 local_t committing;
83205 local_t commits;
83206@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
83207 *
83208 * We add a counter to the write field to denote this.
83209 */
83210- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
83211- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
83212+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
83213+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
83214
83215 /*
83216 * Just make sure we have seen our old_write and synchronize
83217@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
83218 * cmpxchg to only update if an interrupt did not already
83219 * do it for us. If the cmpxchg fails, we don't care.
83220 */
83221- (void)local_cmpxchg(&next_page->write, old_write, val);
83222- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
83223+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
83224+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
83225
83226 /*
83227 * No need to worry about races with clearing out the commit.
83228@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
83229
83230 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
83231 {
83232- return local_read(&bpage->entries) & RB_WRITE_MASK;
83233+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
83234 }
83235
83236 static inline unsigned long rb_page_write(struct buffer_page *bpage)
83237 {
83238- return local_read(&bpage->write) & RB_WRITE_MASK;
83239+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
83240 }
83241
83242 static int
83243@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
83244 * bytes consumed in ring buffer from here.
83245 * Increment overrun to account for the lost events.
83246 */
83247- local_add(page_entries, &cpu_buffer->overrun);
83248+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
83249 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
83250 }
83251
83252@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
83253 * it is our responsibility to update
83254 * the counters.
83255 */
83256- local_add(entries, &cpu_buffer->overrun);
83257+ local_add_unchecked(entries, &cpu_buffer->overrun);
83258 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
83259
83260 /*
83261@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
83262 if (tail == BUF_PAGE_SIZE)
83263 tail_page->real_end = 0;
83264
83265- local_sub(length, &tail_page->write);
83266+ local_sub_unchecked(length, &tail_page->write);
83267 return;
83268 }
83269
83270@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
83271 rb_event_set_padding(event);
83272
83273 /* Set the write back to the previous setting */
83274- local_sub(length, &tail_page->write);
83275+ local_sub_unchecked(length, &tail_page->write);
83276 return;
83277 }
83278
83279@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
83280
83281 /* Set write to end of buffer */
83282 length = (tail + length) - BUF_PAGE_SIZE;
83283- local_sub(length, &tail_page->write);
83284+ local_sub_unchecked(length, &tail_page->write);
83285 }
83286
83287 /*
83288@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
83289 * about it.
83290 */
83291 if (unlikely(next_page == commit_page)) {
83292- local_inc(&cpu_buffer->commit_overrun);
83293+ local_inc_unchecked(&cpu_buffer->commit_overrun);
83294 goto out_reset;
83295 }
83296
83297@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
83298 cpu_buffer->tail_page) &&
83299 (cpu_buffer->commit_page ==
83300 cpu_buffer->reader_page))) {
83301- local_inc(&cpu_buffer->commit_overrun);
83302+ local_inc_unchecked(&cpu_buffer->commit_overrun);
83303 goto out_reset;
83304 }
83305 }
83306@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
83307 length += RB_LEN_TIME_EXTEND;
83308
83309 tail_page = cpu_buffer->tail_page;
83310- write = local_add_return(length, &tail_page->write);
83311+ write = local_add_return_unchecked(length, &tail_page->write);
83312
83313 /* set write to only the index of the write */
83314 write &= RB_WRITE_MASK;
83315@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
83316 kmemcheck_annotate_bitfield(event, bitfield);
83317 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
83318
83319- local_inc(&tail_page->entries);
83320+ local_inc_unchecked(&tail_page->entries);
83321
83322 /*
83323 * If this is the first commit on the page, then update
83324@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
83325
83326 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
83327 unsigned long write_mask =
83328- local_read(&bpage->write) & ~RB_WRITE_MASK;
83329+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
83330 unsigned long event_length = rb_event_length(event);
83331 /*
83332 * This is on the tail page. It is possible that
83333@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
83334 */
83335 old_index += write_mask;
83336 new_index += write_mask;
83337- index = local_cmpxchg(&bpage->write, old_index, new_index);
83338+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
83339 if (index == old_index) {
83340 /* update counters */
83341 local_sub(event_length, &cpu_buffer->entries_bytes);
83342@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
83343
83344 /* Do the likely case first */
83345 if (likely(bpage->page == (void *)addr)) {
83346- local_dec(&bpage->entries);
83347+ local_dec_unchecked(&bpage->entries);
83348 return;
83349 }
83350
83351@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
83352 start = bpage;
83353 do {
83354 if (bpage->page == (void *)addr) {
83355- local_dec(&bpage->entries);
83356+ local_dec_unchecked(&bpage->entries);
83357 return;
83358 }
83359 rb_inc_page(cpu_buffer, &bpage);
83360@@ -3138,7 +3138,7 @@ static inline unsigned long
83361 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
83362 {
83363 return local_read(&cpu_buffer->entries) -
83364- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
83365+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
83366 }
83367
83368 /**
83369@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
83370 return 0;
83371
83372 cpu_buffer = buffer->buffers[cpu];
83373- ret = local_read(&cpu_buffer->overrun);
83374+ ret = local_read_unchecked(&cpu_buffer->overrun);
83375
83376 return ret;
83377 }
83378@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
83379 return 0;
83380
83381 cpu_buffer = buffer->buffers[cpu];
83382- ret = local_read(&cpu_buffer->commit_overrun);
83383+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
83384
83385 return ret;
83386 }
83387@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
83388 /* if you care about this being correct, lock the buffer */
83389 for_each_buffer_cpu(buffer, cpu) {
83390 cpu_buffer = buffer->buffers[cpu];
83391- overruns += local_read(&cpu_buffer->overrun);
83392+ overruns += local_read_unchecked(&cpu_buffer->overrun);
83393 }
83394
83395 return overruns;
83396@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
83397 /*
83398 * Reset the reader page to size zero.
83399 */
83400- local_set(&cpu_buffer->reader_page->write, 0);
83401- local_set(&cpu_buffer->reader_page->entries, 0);
83402+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
83403+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
83404 local_set(&cpu_buffer->reader_page->page->commit, 0);
83405 cpu_buffer->reader_page->real_end = 0;
83406
83407@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
83408 * want to compare with the last_overrun.
83409 */
83410 smp_mb();
83411- overwrite = local_read(&(cpu_buffer->overrun));
83412+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
83413
83414 /*
83415 * Here's the tricky part.
83416@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
83417
83418 cpu_buffer->head_page
83419 = list_entry(cpu_buffer->pages, struct buffer_page, list);
83420- local_set(&cpu_buffer->head_page->write, 0);
83421- local_set(&cpu_buffer->head_page->entries, 0);
83422+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
83423+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
83424 local_set(&cpu_buffer->head_page->page->commit, 0);
83425
83426 cpu_buffer->head_page->read = 0;
83427@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
83428
83429 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
83430 INIT_LIST_HEAD(&cpu_buffer->new_pages);
83431- local_set(&cpu_buffer->reader_page->write, 0);
83432- local_set(&cpu_buffer->reader_page->entries, 0);
83433+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
83434+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
83435 local_set(&cpu_buffer->reader_page->page->commit, 0);
83436 cpu_buffer->reader_page->read = 0;
83437
83438 local_set(&cpu_buffer->entries_bytes, 0);
83439- local_set(&cpu_buffer->overrun, 0);
83440- local_set(&cpu_buffer->commit_overrun, 0);
83441+ local_set_unchecked(&cpu_buffer->overrun, 0);
83442+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
83443 local_set(&cpu_buffer->dropped_events, 0);
83444 local_set(&cpu_buffer->entries, 0);
83445 local_set(&cpu_buffer->committing, 0);
83446@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
83447 rb_init_page(bpage);
83448 bpage = reader->page;
83449 reader->page = *data_page;
83450- local_set(&reader->write, 0);
83451- local_set(&reader->entries, 0);
83452+ local_set_unchecked(&reader->write, 0);
83453+ local_set_unchecked(&reader->entries, 0);
83454 reader->read = 0;
83455 *data_page = bpage;
83456
83457diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
83458index 06a5bce..53ad6e7 100644
83459--- a/kernel/trace/trace.c
83460+++ b/kernel/trace/trace.c
83461@@ -3347,7 +3347,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
83462 return 0;
83463 }
83464
83465-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
83466+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
83467 {
83468 /* do nothing if flag is already set */
83469 if (!!(trace_flags & mask) == !!enabled)
83470diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
83471index 51b4448..7be601f 100644
83472--- a/kernel/trace/trace.h
83473+++ b/kernel/trace/trace.h
83474@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
83475 void trace_printk_init_buffers(void);
83476 void trace_printk_start_comm(void);
83477 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
83478-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
83479+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
83480
83481 /*
83482 * Normal trace_printk() and friends allocates special buffers
83483diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
83484index 6953263..2004e16 100644
83485--- a/kernel/trace/trace_events.c
83486+++ b/kernel/trace/trace_events.c
83487@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
83488 struct ftrace_module_file_ops {
83489 struct list_head list;
83490 struct module *mod;
83491- struct file_operations id;
83492- struct file_operations enable;
83493- struct file_operations format;
83494- struct file_operations filter;
83495 };
83496
83497 static struct ftrace_module_file_ops *
83498@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
83499
83500 file_ops->mod = mod;
83501
83502- file_ops->id = ftrace_event_id_fops;
83503- file_ops->id.owner = mod;
83504-
83505- file_ops->enable = ftrace_enable_fops;
83506- file_ops->enable.owner = mod;
83507-
83508- file_ops->filter = ftrace_event_filter_fops;
83509- file_ops->filter.owner = mod;
83510-
83511- file_ops->format = ftrace_event_format_fops;
83512- file_ops->format.owner = mod;
83513+ pax_open_kernel();
83514+ mod->trace_id.owner = mod;
83515+ mod->trace_enable.owner = mod;
83516+ mod->trace_filter.owner = mod;
83517+ mod->trace_format.owner = mod;
83518+ pax_close_kernel();
83519
83520 list_add(&file_ops->list, &ftrace_module_file_list);
83521
83522@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
83523 struct ftrace_module_file_ops *file_ops)
83524 {
83525 return __trace_add_new_event(call, tr,
83526- &file_ops->id, &file_ops->enable,
83527- &file_ops->filter, &file_ops->format);
83528+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
83529+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
83530 }
83531
83532 #else
83533diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
83534index a5e8f48..a9690d2 100644
83535--- a/kernel/trace/trace_mmiotrace.c
83536+++ b/kernel/trace/trace_mmiotrace.c
83537@@ -24,7 +24,7 @@ struct header_iter {
83538 static struct trace_array *mmio_trace_array;
83539 static bool overrun_detected;
83540 static unsigned long prev_overruns;
83541-static atomic_t dropped_count;
83542+static atomic_unchecked_t dropped_count;
83543
83544 static void mmio_reset_data(struct trace_array *tr)
83545 {
83546@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
83547
83548 static unsigned long count_overruns(struct trace_iterator *iter)
83549 {
83550- unsigned long cnt = atomic_xchg(&dropped_count, 0);
83551+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
83552 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
83553
83554 if (over > prev_overruns)
83555@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
83556 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
83557 sizeof(*entry), 0, pc);
83558 if (!event) {
83559- atomic_inc(&dropped_count);
83560+ atomic_inc_unchecked(&dropped_count);
83561 return;
83562 }
83563 entry = ring_buffer_event_data(event);
83564@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
83565 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
83566 sizeof(*entry), 0, pc);
83567 if (!event) {
83568- atomic_inc(&dropped_count);
83569+ atomic_inc_unchecked(&dropped_count);
83570 return;
83571 }
83572 entry = ring_buffer_event_data(event);
83573diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
83574index bb922d9..2a54a257 100644
83575--- a/kernel/trace/trace_output.c
83576+++ b/kernel/trace/trace_output.c
83577@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
83578
83579 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
83580 if (!IS_ERR(p)) {
83581- p = mangle_path(s->buffer + s->len, p, "\n");
83582+ p = mangle_path(s->buffer + s->len, p, "\n\\");
83583 if (p) {
83584 s->len = p - s->buffer;
83585 return 1;
83586@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
83587 goto out;
83588 }
83589
83590+ pax_open_kernel();
83591 if (event->funcs->trace == NULL)
83592- event->funcs->trace = trace_nop_print;
83593+ *(void **)&event->funcs->trace = trace_nop_print;
83594 if (event->funcs->raw == NULL)
83595- event->funcs->raw = trace_nop_print;
83596+ *(void **)&event->funcs->raw = trace_nop_print;
83597 if (event->funcs->hex == NULL)
83598- event->funcs->hex = trace_nop_print;
83599+ *(void **)&event->funcs->hex = trace_nop_print;
83600 if (event->funcs->binary == NULL)
83601- event->funcs->binary = trace_nop_print;
83602+ *(void **)&event->funcs->binary = trace_nop_print;
83603+ pax_close_kernel();
83604
83605 key = event->type & (EVENT_HASHSIZE - 1);
83606
83607diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
83608index b20428c..4845a10 100644
83609--- a/kernel/trace/trace_stack.c
83610+++ b/kernel/trace/trace_stack.c
83611@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
83612 return;
83613
83614 /* we do not handle interrupt stacks yet */
83615- if (!object_is_on_stack(stack))
83616+ if (!object_starts_on_stack(stack))
83617 return;
83618
83619 local_irq_save(flags);
83620diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
83621index 9064b91..1f5d2f8 100644
83622--- a/kernel/user_namespace.c
83623+++ b/kernel/user_namespace.c
83624@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
83625 !kgid_has_mapping(parent_ns, group))
83626 return -EPERM;
83627
83628+#ifdef CONFIG_GRKERNSEC
83629+ /*
83630+ * This doesn't really inspire confidence:
83631+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
83632+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
83633+ * Increases kernel attack surface in areas developers
83634+ * previously cared little about ("low importance due
83635+ * to requiring "root" capability")
83636+ * To be removed when this code receives *proper* review
83637+ */
83638+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
83639+ !capable(CAP_SETGID))
83640+ return -EPERM;
83641+#endif
83642+
83643 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
83644 if (!ns)
83645 return -ENOMEM;
83646@@ -862,7 +877,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
83647 if (atomic_read(&current->mm->mm_users) > 1)
83648 return -EINVAL;
83649
83650- if (current->fs->users != 1)
83651+ if (atomic_read(&current->fs->users) != 1)
83652 return -EINVAL;
83653
83654 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
83655diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
83656index 4f69f9a..7c6f8f8 100644
83657--- a/kernel/utsname_sysctl.c
83658+++ b/kernel/utsname_sysctl.c
83659@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
83660 static int proc_do_uts_string(ctl_table *table, int write,
83661 void __user *buffer, size_t *lenp, loff_t *ppos)
83662 {
83663- struct ctl_table uts_table;
83664+ ctl_table_no_const uts_table;
83665 int r;
83666 memcpy(&uts_table, table, sizeof(uts_table));
83667 uts_table.data = get_uts(table, write);
83668diff --git a/kernel/watchdog.c b/kernel/watchdog.c
83669index 05039e3..17490c7 100644
83670--- a/kernel/watchdog.c
83671+++ b/kernel/watchdog.c
83672@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
83673 }
83674 #endif /* CONFIG_SYSCTL */
83675
83676-static struct smp_hotplug_thread watchdog_threads = {
83677+static struct smp_hotplug_thread watchdog_threads __read_only = {
83678 .store = &softlockup_watchdog,
83679 .thread_should_run = watchdog_should_run,
83680 .thread_fn = watchdog,
83681diff --git a/kernel/workqueue.c b/kernel/workqueue.c
83682index 6f01921..139869b 100644
83683--- a/kernel/workqueue.c
83684+++ b/kernel/workqueue.c
83685@@ -4596,7 +4596,7 @@ static void rebind_workers(struct worker_pool *pool)
83686 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
83687 worker_flags |= WORKER_REBOUND;
83688 worker_flags &= ~WORKER_UNBOUND;
83689- ACCESS_ONCE(worker->flags) = worker_flags;
83690+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
83691 }
83692
83693 spin_unlock_irq(&pool->lock);
83694diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
83695index 74fdc5c..3310593 100644
83696--- a/lib/Kconfig.debug
83697+++ b/lib/Kconfig.debug
83698@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
83699
83700 config DEBUG_LOCK_ALLOC
83701 bool "Lock debugging: detect incorrect freeing of live locks"
83702- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
83703+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
83704 select DEBUG_SPINLOCK
83705 select DEBUG_MUTEXES
83706 select LOCKDEP
83707@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
83708
83709 config PROVE_LOCKING
83710 bool "Lock debugging: prove locking correctness"
83711- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
83712+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
83713 select LOCKDEP
83714 select DEBUG_SPINLOCK
83715 select DEBUG_MUTEXES
83716@@ -614,7 +614,7 @@ config LOCKDEP
83717
83718 config LOCK_STAT
83719 bool "Lock usage statistics"
83720- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
83721+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
83722 select LOCKDEP
83723 select DEBUG_SPINLOCK
83724 select DEBUG_MUTEXES
83725@@ -1282,6 +1282,7 @@ config LATENCYTOP
83726 depends on DEBUG_KERNEL
83727 depends on STACKTRACE_SUPPORT
83728 depends on PROC_FS
83729+ depends on !GRKERNSEC_HIDESYM
83730 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
83731 select KALLSYMS
83732 select KALLSYMS_ALL
83733@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
83734 config DEBUG_STRICT_USER_COPY_CHECKS
83735 bool "Strict user copy size checks"
83736 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
83737- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
83738+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
83739 help
83740 Enabling this option turns a certain set of sanity checks for user
83741 copy operations into compile time failures.
83742@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
83743
83744 config PROVIDE_OHCI1394_DMA_INIT
83745 bool "Remote debugging over FireWire early on boot"
83746- depends on PCI && X86
83747+ depends on PCI && X86 && !GRKERNSEC
83748 help
83749 If you want to debug problems which hang or crash the kernel early
83750 on boot and the crashing machine has a FireWire port, you can use
83751@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
83752
83753 config FIREWIRE_OHCI_REMOTE_DMA
83754 bool "Remote debugging over FireWire with firewire-ohci"
83755- depends on FIREWIRE_OHCI
83756+ depends on FIREWIRE_OHCI && !GRKERNSEC
83757 help
83758 This option lets you use the FireWire bus for remote debugging
83759 with help of the firewire-ohci driver. It enables unfiltered
83760diff --git a/lib/Makefile b/lib/Makefile
83761index c55a037..fb46e3b 100644
83762--- a/lib/Makefile
83763+++ b/lib/Makefile
83764@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
83765
83766 obj-$(CONFIG_BTREE) += btree.o
83767 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
83768-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
83769+obj-y += list_debug.o
83770 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
83771
83772 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
83773diff --git a/lib/bitmap.c b/lib/bitmap.c
83774index 06f7e4f..f3cf2b0 100644
83775--- a/lib/bitmap.c
83776+++ b/lib/bitmap.c
83777@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
83778 {
83779 int c, old_c, totaldigits, ndigits, nchunks, nbits;
83780 u32 chunk;
83781- const char __user __force *ubuf = (const char __user __force *)buf;
83782+ const char __user *ubuf = (const char __force_user *)buf;
83783
83784 bitmap_zero(maskp, nmaskbits);
83785
83786@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
83787 {
83788 if (!access_ok(VERIFY_READ, ubuf, ulen))
83789 return -EFAULT;
83790- return __bitmap_parse((const char __force *)ubuf,
83791+ return __bitmap_parse((const char __force_kernel *)ubuf,
83792 ulen, 1, maskp, nmaskbits);
83793
83794 }
83795@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
83796 {
83797 unsigned a, b;
83798 int c, old_c, totaldigits;
83799- const char __user __force *ubuf = (const char __user __force *)buf;
83800+ const char __user *ubuf = (const char __force_user *)buf;
83801 int exp_digit, in_range;
83802
83803 totaldigits = c = 0;
83804@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
83805 {
83806 if (!access_ok(VERIFY_READ, ubuf, ulen))
83807 return -EFAULT;
83808- return __bitmap_parselist((const char __force *)ubuf,
83809+ return __bitmap_parselist((const char __force_kernel *)ubuf,
83810 ulen, 1, maskp, nmaskbits);
83811 }
83812 EXPORT_SYMBOL(bitmap_parselist_user);
83813diff --git a/lib/bug.c b/lib/bug.c
83814index 1686034..a9c00c8 100644
83815--- a/lib/bug.c
83816+++ b/lib/bug.c
83817@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
83818 return BUG_TRAP_TYPE_NONE;
83819
83820 bug = find_bug(bugaddr);
83821+ if (!bug)
83822+ return BUG_TRAP_TYPE_NONE;
83823
83824 file = NULL;
83825 line = 0;
83826diff --git a/lib/debugobjects.c b/lib/debugobjects.c
83827index 37061ed..da83f48 100644
83828--- a/lib/debugobjects.c
83829+++ b/lib/debugobjects.c
83830@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
83831 if (limit > 4)
83832 return;
83833
83834- is_on_stack = object_is_on_stack(addr);
83835+ is_on_stack = object_starts_on_stack(addr);
83836 if (is_on_stack == onstack)
83837 return;
83838
83839diff --git a/lib/devres.c b/lib/devres.c
83840index 8235331..5881053 100644
83841--- a/lib/devres.c
83842+++ b/lib/devres.c
83843@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
83844 void devm_iounmap(struct device *dev, void __iomem *addr)
83845 {
83846 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
83847- (void *)addr));
83848+ (void __force *)addr));
83849 iounmap(addr);
83850 }
83851 EXPORT_SYMBOL(devm_iounmap);
83852@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
83853 {
83854 ioport_unmap(addr);
83855 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
83856- devm_ioport_map_match, (void *)addr));
83857+ devm_ioport_map_match, (void __force *)addr));
83858 }
83859 EXPORT_SYMBOL(devm_ioport_unmap);
83860 #endif /* CONFIG_HAS_IOPORT */
83861diff --git a/lib/div64.c b/lib/div64.c
83862index a163b6c..9618fa5 100644
83863--- a/lib/div64.c
83864+++ b/lib/div64.c
83865@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
83866 EXPORT_SYMBOL(__div64_32);
83867
83868 #ifndef div_s64_rem
83869-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
83870+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
83871 {
83872 u64 quotient;
83873
83874@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
83875 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
83876 */
83877 #ifndef div64_u64
83878-u64 div64_u64(u64 dividend, u64 divisor)
83879+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83880 {
83881 u32 high = divisor >> 32;
83882 u64 quot;
83883diff --git a/lib/dma-debug.c b/lib/dma-debug.c
83884index d87a17a..ac0d79a 100644
83885--- a/lib/dma-debug.c
83886+++ b/lib/dma-debug.c
83887@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
83888
83889 void dma_debug_add_bus(struct bus_type *bus)
83890 {
83891- struct notifier_block *nb;
83892+ notifier_block_no_const *nb;
83893
83894 if (global_disable)
83895 return;
83896@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
83897
83898 static void check_for_stack(struct device *dev, void *addr)
83899 {
83900- if (object_is_on_stack(addr))
83901+ if (object_starts_on_stack(addr))
83902 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
83903 "stack [addr=%p]\n", addr);
83904 }
83905diff --git a/lib/inflate.c b/lib/inflate.c
83906index 013a761..c28f3fc 100644
83907--- a/lib/inflate.c
83908+++ b/lib/inflate.c
83909@@ -269,7 +269,7 @@ static void free(void *where)
83910 malloc_ptr = free_mem_ptr;
83911 }
83912 #else
83913-#define malloc(a) kmalloc(a, GFP_KERNEL)
83914+#define malloc(a) kmalloc((a), GFP_KERNEL)
83915 #define free(a) kfree(a)
83916 #endif
83917
83918diff --git a/lib/ioremap.c b/lib/ioremap.c
83919index 0c9216c..863bd89 100644
83920--- a/lib/ioremap.c
83921+++ b/lib/ioremap.c
83922@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
83923 unsigned long next;
83924
83925 phys_addr -= addr;
83926- pmd = pmd_alloc(&init_mm, pud, addr);
83927+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
83928 if (!pmd)
83929 return -ENOMEM;
83930 do {
83931@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
83932 unsigned long next;
83933
83934 phys_addr -= addr;
83935- pud = pud_alloc(&init_mm, pgd, addr);
83936+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
83937 if (!pud)
83938 return -ENOMEM;
83939 do {
83940diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
83941index bd2bea9..6b3c95e 100644
83942--- a/lib/is_single_threaded.c
83943+++ b/lib/is_single_threaded.c
83944@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
83945 struct task_struct *p, *t;
83946 bool ret;
83947
83948+ if (!mm)
83949+ return true;
83950+
83951 if (atomic_read(&task->signal->live) != 1)
83952 return false;
83953
83954diff --git a/lib/kobject.c b/lib/kobject.c
83955index b7e29a6..2f3ca75 100644
83956--- a/lib/kobject.c
83957+++ b/lib/kobject.c
83958@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
83959 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
83960 if (!kset)
83961 return NULL;
83962- retval = kobject_set_name(&kset->kobj, name);
83963+ retval = kobject_set_name(&kset->kobj, "%s", name);
83964 if (retval) {
83965 kfree(kset);
83966 return NULL;
83967@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
83968
83969
83970 static DEFINE_SPINLOCK(kobj_ns_type_lock);
83971-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
83972+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
83973
83974-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
83975+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
83976 {
83977 enum kobj_ns_type type = ops->type;
83978 int error;
83979diff --git a/lib/list_debug.c b/lib/list_debug.c
83980index c24c2f7..06e070b 100644
83981--- a/lib/list_debug.c
83982+++ b/lib/list_debug.c
83983@@ -11,7 +11,9 @@
83984 #include <linux/bug.h>
83985 #include <linux/kernel.h>
83986 #include <linux/rculist.h>
83987+#include <linux/mm.h>
83988
83989+#ifdef CONFIG_DEBUG_LIST
83990 /*
83991 * Insert a new entry between two known consecutive entries.
83992 *
83993@@ -19,21 +21,32 @@
83994 * the prev/next entries already!
83995 */
83996
83997-void __list_add(struct list_head *new,
83998- struct list_head *prev,
83999- struct list_head *next)
84000+static bool __list_add_debug(struct list_head *new,
84001+ struct list_head *prev,
84002+ struct list_head *next)
84003 {
84004- WARN(next->prev != prev,
84005+ if (WARN(next->prev != prev,
84006 "list_add corruption. next->prev should be "
84007 "prev (%p), but was %p. (next=%p).\n",
84008- prev, next->prev, next);
84009- WARN(prev->next != next,
84010+ prev, next->prev, next) ||
84011+ WARN(prev->next != next,
84012 "list_add corruption. prev->next should be "
84013 "next (%p), but was %p. (prev=%p).\n",
84014- next, prev->next, prev);
84015- WARN(new == prev || new == next,
84016- "list_add double add: new=%p, prev=%p, next=%p.\n",
84017- new, prev, next);
84018+ next, prev->next, prev) ||
84019+ WARN(new == prev || new == next,
84020+ "list_add double add: new=%p, prev=%p, next=%p.\n",
84021+ new, prev, next))
84022+ return false;
84023+ return true;
84024+}
84025+
84026+void __list_add(struct list_head *new,
84027+ struct list_head *prev,
84028+ struct list_head *next)
84029+{
84030+ if (!__list_add_debug(new, prev, next))
84031+ return;
84032+
84033 next->prev = new;
84034 new->next = next;
84035 new->prev = prev;
84036@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
84037 }
84038 EXPORT_SYMBOL(__list_add);
84039
84040-void __list_del_entry(struct list_head *entry)
84041+static bool __list_del_entry_debug(struct list_head *entry)
84042 {
84043 struct list_head *prev, *next;
84044
84045@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
84046 WARN(next->prev != entry,
84047 "list_del corruption. next->prev should be %p, "
84048 "but was %p\n", entry, next->prev))
84049+ return false;
84050+ return true;
84051+}
84052+
84053+void __list_del_entry(struct list_head *entry)
84054+{
84055+ if (!__list_del_entry_debug(entry))
84056 return;
84057
84058- __list_del(prev, next);
84059+ __list_del(entry->prev, entry->next);
84060 }
84061 EXPORT_SYMBOL(__list_del_entry);
84062
84063@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
84064 void __list_add_rcu(struct list_head *new,
84065 struct list_head *prev, struct list_head *next)
84066 {
84067- WARN(next->prev != prev,
84068- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
84069- prev, next->prev, next);
84070- WARN(prev->next != next,
84071- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
84072- next, prev->next, prev);
84073+ if (!__list_add_debug(new, prev, next))
84074+ return;
84075+
84076 new->next = next;
84077 new->prev = prev;
84078 rcu_assign_pointer(list_next_rcu(prev), new);
84079 next->prev = new;
84080 }
84081 EXPORT_SYMBOL(__list_add_rcu);
84082+#endif
84083+
84084+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
84085+{
84086+#ifdef CONFIG_DEBUG_LIST
84087+ if (!__list_add_debug(new, prev, next))
84088+ return;
84089+#endif
84090+
84091+ pax_open_kernel();
84092+ next->prev = new;
84093+ new->next = next;
84094+ new->prev = prev;
84095+ prev->next = new;
84096+ pax_close_kernel();
84097+}
84098+EXPORT_SYMBOL(__pax_list_add);
84099+
84100+void pax_list_del(struct list_head *entry)
84101+{
84102+#ifdef CONFIG_DEBUG_LIST
84103+ if (!__list_del_entry_debug(entry))
84104+ return;
84105+#endif
84106+
84107+ pax_open_kernel();
84108+ __list_del(entry->prev, entry->next);
84109+ entry->next = LIST_POISON1;
84110+ entry->prev = LIST_POISON2;
84111+ pax_close_kernel();
84112+}
84113+EXPORT_SYMBOL(pax_list_del);
84114+
84115+void pax_list_del_init(struct list_head *entry)
84116+{
84117+ pax_open_kernel();
84118+ __list_del(entry->prev, entry->next);
84119+ INIT_LIST_HEAD(entry);
84120+ pax_close_kernel();
84121+}
84122+EXPORT_SYMBOL(pax_list_del_init);
84123+
84124+void __pax_list_add_rcu(struct list_head *new,
84125+ struct list_head *prev, struct list_head *next)
84126+{
84127+#ifdef CONFIG_DEBUG_LIST
84128+ if (!__list_add_debug(new, prev, next))
84129+ return;
84130+#endif
84131+
84132+ pax_open_kernel();
84133+ new->next = next;
84134+ new->prev = prev;
84135+ rcu_assign_pointer(list_next_rcu(prev), new);
84136+ next->prev = new;
84137+ pax_close_kernel();
84138+}
84139+EXPORT_SYMBOL(__pax_list_add_rcu);
84140+
84141+void pax_list_del_rcu(struct list_head *entry)
84142+{
84143+#ifdef CONFIG_DEBUG_LIST
84144+ if (!__list_del_entry_debug(entry))
84145+ return;
84146+#endif
84147+
84148+ pax_open_kernel();
84149+ __list_del(entry->prev, entry->next);
84150+ entry->next = LIST_POISON1;
84151+ entry->prev = LIST_POISON2;
84152+ pax_close_kernel();
84153+}
84154+EXPORT_SYMBOL(pax_list_del_rcu);
84155diff --git a/lib/radix-tree.c b/lib/radix-tree.c
84156index e796429..6e38f9f 100644
84157--- a/lib/radix-tree.c
84158+++ b/lib/radix-tree.c
84159@@ -92,7 +92,7 @@ struct radix_tree_preload {
84160 int nr;
84161 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
84162 };
84163-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
84164+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
84165
84166 static inline void *ptr_to_indirect(void *ptr)
84167 {
84168diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
84169index bb2b201..46abaf9 100644
84170--- a/lib/strncpy_from_user.c
84171+++ b/lib/strncpy_from_user.c
84172@@ -21,7 +21,7 @@
84173 */
84174 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
84175 {
84176- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84177+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84178 long res = 0;
84179
84180 /*
84181diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
84182index a28df52..3d55877 100644
84183--- a/lib/strnlen_user.c
84184+++ b/lib/strnlen_user.c
84185@@ -26,7 +26,7 @@
84186 */
84187 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
84188 {
84189- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84190+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84191 long align, res = 0;
84192 unsigned long c;
84193
84194diff --git a/lib/swiotlb.c b/lib/swiotlb.c
84195index d23762e..e21eab2 100644
84196--- a/lib/swiotlb.c
84197+++ b/lib/swiotlb.c
84198@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
84199
84200 void
84201 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
84202- dma_addr_t dev_addr)
84203+ dma_addr_t dev_addr, struct dma_attrs *attrs)
84204 {
84205 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
84206
84207diff --git a/lib/usercopy.c b/lib/usercopy.c
84208index 4f5b1dd..7cab418 100644
84209--- a/lib/usercopy.c
84210+++ b/lib/usercopy.c
84211@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
84212 WARN(1, "Buffer overflow detected!\n");
84213 }
84214 EXPORT_SYMBOL(copy_from_user_overflow);
84215+
84216+void copy_to_user_overflow(void)
84217+{
84218+ WARN(1, "Buffer overflow detected!\n");
84219+}
84220+EXPORT_SYMBOL(copy_to_user_overflow);
84221diff --git a/lib/vsprintf.c b/lib/vsprintf.c
84222index e149c64..24aa71a 100644
84223--- a/lib/vsprintf.c
84224+++ b/lib/vsprintf.c
84225@@ -16,6 +16,9 @@
84226 * - scnprintf and vscnprintf
84227 */
84228
84229+#ifdef CONFIG_GRKERNSEC_HIDESYM
84230+#define __INCLUDED_BY_HIDESYM 1
84231+#endif
84232 #include <stdarg.h>
84233 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
84234 #include <linux/types.h>
84235@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
84236 return number(buf, end, *(const netdev_features_t *)addr, spec);
84237 }
84238
84239+#ifdef CONFIG_GRKERNSEC_HIDESYM
84240+int kptr_restrict __read_mostly = 2;
84241+#else
84242 int kptr_restrict __read_mostly;
84243+#endif
84244
84245 /*
84246 * Show a '%p' thing. A kernel extension is that the '%p' is followed
84247@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
84248 * - 'f' For simple symbolic function names without offset
84249 * - 'S' For symbolic direct pointers with offset
84250 * - 's' For symbolic direct pointers without offset
84251+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
84252 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
84253 * - 'B' For backtraced symbolic direct pointers with offset
84254 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
84255@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84256
84257 if (!ptr && *fmt != 'K') {
84258 /*
84259- * Print (null) with the same width as a pointer so it makes
84260+ * Print (nil) with the same width as a pointer so it makes
84261 * tabular output look nice.
84262 */
84263 if (spec.field_width == -1)
84264 spec.field_width = default_width;
84265- return string(buf, end, "(null)", spec);
84266+ return string(buf, end, "(nil)", spec);
84267 }
84268
84269 switch (*fmt) {
84270@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84271 /* Fallthrough */
84272 case 'S':
84273 case 's':
84274+#ifdef CONFIG_GRKERNSEC_HIDESYM
84275+ break;
84276+#else
84277+ return symbol_string(buf, end, ptr, spec, fmt);
84278+#endif
84279+ case 'A':
84280 case 'B':
84281 return symbol_string(buf, end, ptr, spec, fmt);
84282 case 'R':
84283@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84284 va_end(va);
84285 return buf;
84286 }
84287+ case 'P':
84288+ break;
84289 case 'K':
84290 /*
84291 * %pK cannot be used in IRQ context because its test
84292@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84293 return number(buf, end,
84294 (unsigned long long) *((phys_addr_t *)ptr), spec);
84295 }
84296+
84297+#ifdef CONFIG_GRKERNSEC_HIDESYM
84298+ /* 'P' = approved pointers to copy to userland,
84299+ as in the /proc/kallsyms case, as we make it display nothing
84300+ for non-root users, and the real contents for root users
84301+ Also ignore 'K' pointers, since we force their NULLing for non-root users
84302+ above
84303+ */
84304+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
84305+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
84306+ dump_stack();
84307+ ptr = NULL;
84308+ }
84309+#endif
84310+
84311 spec.flags |= SMALL;
84312 if (spec.field_width == -1) {
84313 spec.field_width = default_width;
84314@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
84315 typeof(type) value; \
84316 if (sizeof(type) == 8) { \
84317 args = PTR_ALIGN(args, sizeof(u32)); \
84318- *(u32 *)&value = *(u32 *)args; \
84319- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
84320+ *(u32 *)&value = *(const u32 *)args; \
84321+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
84322 } else { \
84323 args = PTR_ALIGN(args, sizeof(type)); \
84324- value = *(typeof(type) *)args; \
84325+ value = *(const typeof(type) *)args; \
84326 } \
84327 args += sizeof(type); \
84328 value; \
84329@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
84330 case FORMAT_TYPE_STR: {
84331 const char *str_arg = args;
84332 args += strlen(str_arg) + 1;
84333- str = string(str, end, (char *)str_arg, spec);
84334+ str = string(str, end, str_arg, spec);
84335 break;
84336 }
84337
84338diff --git a/localversion-grsec b/localversion-grsec
84339new file mode 100644
84340index 0000000..7cd6065
84341--- /dev/null
84342+++ b/localversion-grsec
84343@@ -0,0 +1 @@
84344+-grsec
84345diff --git a/mm/Kconfig b/mm/Kconfig
84346index e742d06..c56fdd8 100644
84347--- a/mm/Kconfig
84348+++ b/mm/Kconfig
84349@@ -317,10 +317,10 @@ config KSM
84350 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
84351
84352 config DEFAULT_MMAP_MIN_ADDR
84353- int "Low address space to protect from user allocation"
84354+ int "Low address space to protect from user allocation"
84355 depends on MMU
84356- default 4096
84357- help
84358+ default 65536
84359+ help
84360 This is the portion of low virtual memory which should be protected
84361 from userspace allocation. Keeping a user from writing to low pages
84362 can help reduce the impact of kernel NULL pointer bugs.
84363@@ -351,7 +351,7 @@ config MEMORY_FAILURE
84364
84365 config HWPOISON_INJECT
84366 tristate "HWPoison pages injector"
84367- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
84368+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
84369 select PROC_PAGE_MONITOR
84370
84371 config NOMMU_INITIAL_TRIM_EXCESS
84372diff --git a/mm/backing-dev.c b/mm/backing-dev.c
84373index 5025174..9d67dcd 100644
84374--- a/mm/backing-dev.c
84375+++ b/mm/backing-dev.c
84376@@ -12,7 +12,7 @@
84377 #include <linux/device.h>
84378 #include <trace/events/writeback.h>
84379
84380-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
84381+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
84382
84383 struct backing_dev_info default_backing_dev_info = {
84384 .name = "default",
84385@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
84386 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
84387 unsigned int cap)
84388 {
84389- char tmp[32];
84390 int err;
84391
84392 bdi->name = name;
84393@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
84394 if (err)
84395 return err;
84396
84397- sprintf(tmp, "%.28s%s", name, "-%d");
84398- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
84399+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
84400 if (err) {
84401 bdi_destroy(bdi);
84402 return err;
84403diff --git a/mm/filemap.c b/mm/filemap.c
84404index 7905fe7..e60faa8 100644
84405--- a/mm/filemap.c
84406+++ b/mm/filemap.c
84407@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
84408 struct address_space *mapping = file->f_mapping;
84409
84410 if (!mapping->a_ops->readpage)
84411- return -ENOEXEC;
84412+ return -ENODEV;
84413 file_accessed(file);
84414 vma->vm_ops = &generic_file_vm_ops;
84415 return 0;
84416@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
84417 *pos = i_size_read(inode);
84418
84419 if (limit != RLIM_INFINITY) {
84420+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
84421 if (*pos >= limit) {
84422 send_sig(SIGXFSZ, current, 0);
84423 return -EFBIG;
84424diff --git a/mm/fremap.c b/mm/fremap.c
84425index 87da359..3f41cb1 100644
84426--- a/mm/fremap.c
84427+++ b/mm/fremap.c
84428@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
84429 retry:
84430 vma = find_vma(mm, start);
84431
84432+#ifdef CONFIG_PAX_SEGMEXEC
84433+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
84434+ goto out;
84435+#endif
84436+
84437 /*
84438 * Make sure the vma is shared, that it supports prefaulting,
84439 * and that the remapped range is valid and fully within
84440diff --git a/mm/highmem.c b/mm/highmem.c
84441index b32b70c..e512eb0 100644
84442--- a/mm/highmem.c
84443+++ b/mm/highmem.c
84444@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
84445 * So no dangers, even with speculative execution.
84446 */
84447 page = pte_page(pkmap_page_table[i]);
84448+ pax_open_kernel();
84449 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
84450-
84451+ pax_close_kernel();
84452 set_page_address(page, NULL);
84453 need_flush = 1;
84454 }
84455@@ -198,9 +199,11 @@ start:
84456 }
84457 }
84458 vaddr = PKMAP_ADDR(last_pkmap_nr);
84459+
84460+ pax_open_kernel();
84461 set_pte_at(&init_mm, vaddr,
84462 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
84463-
84464+ pax_close_kernel();
84465 pkmap_count[last_pkmap_nr] = 1;
84466 set_page_address(page, (void *)vaddr);
84467
84468diff --git a/mm/hugetlb.c b/mm/hugetlb.c
84469index 5cf99bf..5c01c2f 100644
84470--- a/mm/hugetlb.c
84471+++ b/mm/hugetlb.c
84472@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
84473 struct hstate *h = &default_hstate;
84474 unsigned long tmp;
84475 int ret;
84476+ ctl_table_no_const hugetlb_table;
84477
84478 tmp = h->max_huge_pages;
84479
84480 if (write && h->order >= MAX_ORDER)
84481 return -EINVAL;
84482
84483- table->data = &tmp;
84484- table->maxlen = sizeof(unsigned long);
84485- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
84486+ hugetlb_table = *table;
84487+ hugetlb_table.data = &tmp;
84488+ hugetlb_table.maxlen = sizeof(unsigned long);
84489+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
84490 if (ret)
84491 goto out;
84492
84493@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
84494 struct hstate *h = &default_hstate;
84495 unsigned long tmp;
84496 int ret;
84497+ ctl_table_no_const hugetlb_table;
84498
84499 tmp = h->nr_overcommit_huge_pages;
84500
84501 if (write && h->order >= MAX_ORDER)
84502 return -EINVAL;
84503
84504- table->data = &tmp;
84505- table->maxlen = sizeof(unsigned long);
84506- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
84507+ hugetlb_table = *table;
84508+ hugetlb_table.data = &tmp;
84509+ hugetlb_table.maxlen = sizeof(unsigned long);
84510+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
84511 if (ret)
84512 goto out;
84513
84514@@ -2490,7 +2494,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
84515
84516 mm = vma->vm_mm;
84517
84518- tlb_gather_mmu(&tlb, mm, 0);
84519+ tlb_gather_mmu(&tlb, mm, start, end);
84520 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
84521 tlb_finish_mmu(&tlb, start, end);
84522 }
84523@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
84524 return 1;
84525 }
84526
84527+#ifdef CONFIG_PAX_SEGMEXEC
84528+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
84529+{
84530+ struct mm_struct *mm = vma->vm_mm;
84531+ struct vm_area_struct *vma_m;
84532+ unsigned long address_m;
84533+ pte_t *ptep_m;
84534+
84535+ vma_m = pax_find_mirror_vma(vma);
84536+ if (!vma_m)
84537+ return;
84538+
84539+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
84540+ address_m = address + SEGMEXEC_TASK_SIZE;
84541+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
84542+ get_page(page_m);
84543+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
84544+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
84545+}
84546+#endif
84547+
84548 /*
84549 * Hugetlb_cow() should be called with page lock of the original hugepage held.
84550 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
84551@@ -2663,6 +2688,11 @@ retry_avoidcopy:
84552 make_huge_pte(vma, new_page, 1));
84553 page_remove_rmap(old_page);
84554 hugepage_add_new_anon_rmap(new_page, vma, address);
84555+
84556+#ifdef CONFIG_PAX_SEGMEXEC
84557+ pax_mirror_huge_pte(vma, address, new_page);
84558+#endif
84559+
84560 /* Make the old page be freed below */
84561 new_page = old_page;
84562 }
84563@@ -2821,6 +2851,10 @@ retry:
84564 && (vma->vm_flags & VM_SHARED)));
84565 set_huge_pte_at(mm, address, ptep, new_pte);
84566
84567+#ifdef CONFIG_PAX_SEGMEXEC
84568+ pax_mirror_huge_pte(vma, address, page);
84569+#endif
84570+
84571 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
84572 /* Optimization, do the COW without a second fault */
84573 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
84574@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
84575 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
84576 struct hstate *h = hstate_vma(vma);
84577
84578+#ifdef CONFIG_PAX_SEGMEXEC
84579+ struct vm_area_struct *vma_m;
84580+#endif
84581+
84582 address &= huge_page_mask(h);
84583
84584 ptep = huge_pte_offset(mm, address);
84585@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
84586 VM_FAULT_SET_HINDEX(hstate_index(h));
84587 }
84588
84589+#ifdef CONFIG_PAX_SEGMEXEC
84590+ vma_m = pax_find_mirror_vma(vma);
84591+ if (vma_m) {
84592+ unsigned long address_m;
84593+
84594+ if (vma->vm_start > vma_m->vm_start) {
84595+ address_m = address;
84596+ address -= SEGMEXEC_TASK_SIZE;
84597+ vma = vma_m;
84598+ h = hstate_vma(vma);
84599+ } else
84600+ address_m = address + SEGMEXEC_TASK_SIZE;
84601+
84602+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
84603+ return VM_FAULT_OOM;
84604+ address_m &= HPAGE_MASK;
84605+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
84606+ }
84607+#endif
84608+
84609 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
84610 if (!ptep)
84611 return VM_FAULT_OOM;
84612diff --git a/mm/internal.h b/mm/internal.h
84613index 8562de0..92b2073 100644
84614--- a/mm/internal.h
84615+++ b/mm/internal.h
84616@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
84617 * in mm/page_alloc.c
84618 */
84619 extern void __free_pages_bootmem(struct page *page, unsigned int order);
84620+extern void free_compound_page(struct page *page);
84621 extern void prep_compound_page(struct page *page, unsigned long order);
84622 #ifdef CONFIG_MEMORY_FAILURE
84623 extern bool is_free_buddy_page(struct page *page);
84624@@ -355,7 +356,7 @@ extern u32 hwpoison_filter_enable;
84625
84626 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
84627 unsigned long, unsigned long,
84628- unsigned long, unsigned long);
84629+ unsigned long, unsigned long) __intentional_overflow(-1);
84630
84631 extern void set_pageblock_order(void);
84632 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
84633diff --git a/mm/kmemleak.c b/mm/kmemleak.c
84634index c8d7f31..2dbeffd 100644
84635--- a/mm/kmemleak.c
84636+++ b/mm/kmemleak.c
84637@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
84638
84639 for (i = 0; i < object->trace_len; i++) {
84640 void *ptr = (void *)object->trace[i];
84641- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
84642+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
84643 }
84644 }
84645
84646@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
84647 return -ENOMEM;
84648 }
84649
84650- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
84651+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
84652 &kmemleak_fops);
84653 if (!dentry)
84654 pr_warning("Failed to create the debugfs kmemleak file\n");
84655diff --git a/mm/maccess.c b/mm/maccess.c
84656index d53adf9..03a24bf 100644
84657--- a/mm/maccess.c
84658+++ b/mm/maccess.c
84659@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
84660 set_fs(KERNEL_DS);
84661 pagefault_disable();
84662 ret = __copy_from_user_inatomic(dst,
84663- (__force const void __user *)src, size);
84664+ (const void __force_user *)src, size);
84665 pagefault_enable();
84666 set_fs(old_fs);
84667
84668@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
84669
84670 set_fs(KERNEL_DS);
84671 pagefault_disable();
84672- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
84673+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
84674 pagefault_enable();
84675 set_fs(old_fs);
84676
84677diff --git a/mm/madvise.c b/mm/madvise.c
84678index 7055883..aafb1ed 100644
84679--- a/mm/madvise.c
84680+++ b/mm/madvise.c
84681@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
84682 pgoff_t pgoff;
84683 unsigned long new_flags = vma->vm_flags;
84684
84685+#ifdef CONFIG_PAX_SEGMEXEC
84686+ struct vm_area_struct *vma_m;
84687+#endif
84688+
84689 switch (behavior) {
84690 case MADV_NORMAL:
84691 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
84692@@ -126,6 +130,13 @@ success:
84693 /*
84694 * vm_flags is protected by the mmap_sem held in write mode.
84695 */
84696+
84697+#ifdef CONFIG_PAX_SEGMEXEC
84698+ vma_m = pax_find_mirror_vma(vma);
84699+ if (vma_m)
84700+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
84701+#endif
84702+
84703 vma->vm_flags = new_flags;
84704
84705 out:
84706@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
84707 struct vm_area_struct ** prev,
84708 unsigned long start, unsigned long end)
84709 {
84710+
84711+#ifdef CONFIG_PAX_SEGMEXEC
84712+ struct vm_area_struct *vma_m;
84713+#endif
84714+
84715 *prev = vma;
84716 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
84717 return -EINVAL;
84718@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
84719 zap_page_range(vma, start, end - start, &details);
84720 } else
84721 zap_page_range(vma, start, end - start, NULL);
84722+
84723+#ifdef CONFIG_PAX_SEGMEXEC
84724+ vma_m = pax_find_mirror_vma(vma);
84725+ if (vma_m) {
84726+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
84727+ struct zap_details details = {
84728+ .nonlinear_vma = vma_m,
84729+ .last_index = ULONG_MAX,
84730+ };
84731+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
84732+ } else
84733+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
84734+ }
84735+#endif
84736+
84737 return 0;
84738 }
84739
84740@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
84741 if (end < start)
84742 return error;
84743
84744+#ifdef CONFIG_PAX_SEGMEXEC
84745+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
84746+ if (end > SEGMEXEC_TASK_SIZE)
84747+ return error;
84748+ } else
84749+#endif
84750+
84751+ if (end > TASK_SIZE)
84752+ return error;
84753+
84754 error = 0;
84755 if (end == start)
84756 return error;
84757diff --git a/mm/memory-failure.c b/mm/memory-failure.c
84758index ceb0c7f..b2b8e94 100644
84759--- a/mm/memory-failure.c
84760+++ b/mm/memory-failure.c
84761@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
84762
84763 int sysctl_memory_failure_recovery __read_mostly = 1;
84764
84765-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
84766+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
84767
84768 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
84769
84770@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
84771 pfn, t->comm, t->pid);
84772 si.si_signo = SIGBUS;
84773 si.si_errno = 0;
84774- si.si_addr = (void *)addr;
84775+ si.si_addr = (void __user *)addr;
84776 #ifdef __ARCH_SI_TRAPNO
84777 si.si_trapno = trapno;
84778 #endif
84779@@ -760,7 +760,7 @@ static struct page_state {
84780 unsigned long res;
84781 char *msg;
84782 int (*action)(struct page *p, unsigned long pfn);
84783-} error_states[] = {
84784+} __do_const error_states[] = {
84785 { reserved, reserved, "reserved kernel", me_kernel },
84786 /*
84787 * free pages are specially detected outside this table:
84788@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
84789 nr_pages = 1 << compound_order(hpage);
84790 else /* normal page or thp */
84791 nr_pages = 1;
84792- atomic_long_add(nr_pages, &num_poisoned_pages);
84793+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
84794
84795 /*
84796 * We need/can do nothing about count=0 pages.
84797@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
84798 if (!PageHWPoison(hpage)
84799 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
84800 || (p != hpage && TestSetPageHWPoison(hpage))) {
84801- atomic_long_sub(nr_pages, &num_poisoned_pages);
84802+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84803 return 0;
84804 }
84805 set_page_hwpoison_huge_page(hpage);
84806@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
84807 }
84808 if (hwpoison_filter(p)) {
84809 if (TestClearPageHWPoison(p))
84810- atomic_long_sub(nr_pages, &num_poisoned_pages);
84811+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84812 unlock_page(hpage);
84813 put_page(hpage);
84814 return 0;
84815@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
84816 return 0;
84817 }
84818 if (TestClearPageHWPoison(p))
84819- atomic_long_sub(nr_pages, &num_poisoned_pages);
84820+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84821 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
84822 return 0;
84823 }
84824@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
84825 */
84826 if (TestClearPageHWPoison(page)) {
84827 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
84828- atomic_long_sub(nr_pages, &num_poisoned_pages);
84829+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84830 freeit = 1;
84831 if (PageHuge(page))
84832 clear_page_hwpoison_huge_page(page);
84833@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
84834 } else {
84835 set_page_hwpoison_huge_page(hpage);
84836 dequeue_hwpoisoned_huge_page(hpage);
84837- atomic_long_add(1 << compound_trans_order(hpage),
84838+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
84839 &num_poisoned_pages);
84840 }
84841 /* keep elevated page count for bad page */
84842@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
84843 if (PageHuge(page)) {
84844 set_page_hwpoison_huge_page(hpage);
84845 dequeue_hwpoisoned_huge_page(hpage);
84846- atomic_long_add(1 << compound_trans_order(hpage),
84847+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
84848 &num_poisoned_pages);
84849 } else {
84850 SetPageHWPoison(page);
84851- atomic_long_inc(&num_poisoned_pages);
84852+ atomic_long_inc_unchecked(&num_poisoned_pages);
84853 }
84854 }
84855 /* keep elevated page count for bad page */
84856@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
84857 put_page(page);
84858 pr_info("soft_offline: %#lx: invalidated\n", pfn);
84859 SetPageHWPoison(page);
84860- atomic_long_inc(&num_poisoned_pages);
84861+ atomic_long_inc_unchecked(&num_poisoned_pages);
84862 return 0;
84863 }
84864
84865@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
84866 ret = -EIO;
84867 } else {
84868 SetPageHWPoison(page);
84869- atomic_long_inc(&num_poisoned_pages);
84870+ atomic_long_inc_unchecked(&num_poisoned_pages);
84871 }
84872 } else {
84873 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
84874diff --git a/mm/memory.c b/mm/memory.c
84875index 5e50800..7c0340f 100644
84876--- a/mm/memory.c
84877+++ b/mm/memory.c
84878@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
84879 * tear-down from @mm. The @fullmm argument is used when @mm is without
84880 * users and we're going to destroy the full address space (exit/execve).
84881 */
84882-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
84883+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
84884 {
84885 tlb->mm = mm;
84886
84887- tlb->fullmm = fullmm;
84888+ /* Is it from 0 to ~0? */
84889+ tlb->fullmm = !(start | (end+1));
84890 tlb->need_flush_all = 0;
84891- tlb->start = -1UL;
84892- tlb->end = 0;
84893+ tlb->start = start;
84894+ tlb->end = end;
84895 tlb->need_flush = 0;
84896 tlb->local.next = NULL;
84897 tlb->local.nr = 0;
84898@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
84899 {
84900 struct mmu_gather_batch *batch, *next;
84901
84902- tlb->start = start;
84903- tlb->end = end;
84904 tlb_flush_mmu(tlb);
84905
84906 /* keep the page table cache within bounds */
84907@@ -429,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
84908 free_pte_range(tlb, pmd, addr);
84909 } while (pmd++, addr = next, addr != end);
84910
84911+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
84912 start &= PUD_MASK;
84913 if (start < floor)
84914 return;
84915@@ -443,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
84916 pmd = pmd_offset(pud, start);
84917 pud_clear(pud);
84918 pmd_free_tlb(tlb, pmd, start);
84919+#endif
84920+
84921 }
84922
84923 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
84924@@ -462,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
84925 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
84926 } while (pud++, addr = next, addr != end);
84927
84928+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
84929 start &= PGDIR_MASK;
84930 if (start < floor)
84931 return;
84932@@ -476,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
84933 pud = pud_offset(pgd, start);
84934 pgd_clear(pgd);
84935 pud_free_tlb(tlb, pud, start);
84936+#endif
84937+
84938 }
84939
84940 /*
84941@@ -1101,7 +1106,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
84942 spinlock_t *ptl;
84943 pte_t *start_pte;
84944 pte_t *pte;
84945- unsigned long range_start = addr;
84946
84947 again:
84948 init_rss_vec(rss);
84949@@ -1204,17 +1208,25 @@ again:
84950 * and page-free while holding it.
84951 */
84952 if (force_flush) {
84953+ unsigned long old_end;
84954+
84955 force_flush = 0;
84956
84957-#ifdef HAVE_GENERIC_MMU_GATHER
84958- tlb->start = range_start;
84959+ /*
84960+ * Flush the TLB just for the previous segment,
84961+ * then update the range to be the remaining
84962+ * TLB range.
84963+ */
84964+ old_end = tlb->end;
84965 tlb->end = addr;
84966-#endif
84967+
84968 tlb_flush_mmu(tlb);
84969- if (addr != end) {
84970- range_start = addr;
84971+
84972+ tlb->start = addr;
84973+ tlb->end = old_end;
84974+
84975+ if (addr != end)
84976 goto again;
84977- }
84978 }
84979
84980 return addr;
84981@@ -1399,7 +1411,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
84982 unsigned long end = start + size;
84983
84984 lru_add_drain();
84985- tlb_gather_mmu(&tlb, mm, 0);
84986+ tlb_gather_mmu(&tlb, mm, start, end);
84987 update_hiwater_rss(mm);
84988 mmu_notifier_invalidate_range_start(mm, start, end);
84989 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
84990@@ -1425,7 +1437,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
84991 unsigned long end = address + size;
84992
84993 lru_add_drain();
84994- tlb_gather_mmu(&tlb, mm, 0);
84995+ tlb_gather_mmu(&tlb, mm, address, end);
84996 update_hiwater_rss(mm);
84997 mmu_notifier_invalidate_range_start(mm, address, end);
84998 unmap_single_vma(&tlb, vma, address, end, details);
84999@@ -1638,12 +1650,6 @@ no_page_table:
85000 return page;
85001 }
85002
85003-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
85004-{
85005- return stack_guard_page_start(vma, addr) ||
85006- stack_guard_page_end(vma, addr+PAGE_SIZE);
85007-}
85008-
85009 /**
85010 * __get_user_pages() - pin user pages in memory
85011 * @tsk: task_struct of target task
85012@@ -1730,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
85013
85014 i = 0;
85015
85016- do {
85017+ while (nr_pages) {
85018 struct vm_area_struct *vma;
85019
85020- vma = find_extend_vma(mm, start);
85021+ vma = find_vma(mm, start);
85022 if (!vma && in_gate_area(mm, start)) {
85023 unsigned long pg = start & PAGE_MASK;
85024 pgd_t *pgd;
85025@@ -1782,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
85026 goto next_page;
85027 }
85028
85029- if (!vma ||
85030+ if (!vma || start < vma->vm_start ||
85031 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
85032 !(vm_flags & vma->vm_flags))
85033 return i ? : -EFAULT;
85034@@ -1811,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
85035 int ret;
85036 unsigned int fault_flags = 0;
85037
85038- /* For mlock, just skip the stack guard page. */
85039- if (foll_flags & FOLL_MLOCK) {
85040- if (stack_guard_page(vma, start))
85041- goto next_page;
85042- }
85043 if (foll_flags & FOLL_WRITE)
85044 fault_flags |= FAULT_FLAG_WRITE;
85045 if (nonblocking)
85046@@ -1895,7 +1896,7 @@ next_page:
85047 start += page_increm * PAGE_SIZE;
85048 nr_pages -= page_increm;
85049 } while (nr_pages && start < vma->vm_end);
85050- } while (nr_pages);
85051+ }
85052 return i;
85053 }
85054 EXPORT_SYMBOL(__get_user_pages);
85055@@ -2102,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
85056 page_add_file_rmap(page);
85057 set_pte_at(mm, addr, pte, mk_pte(page, prot));
85058
85059+#ifdef CONFIG_PAX_SEGMEXEC
85060+ pax_mirror_file_pte(vma, addr, page, ptl);
85061+#endif
85062+
85063 retval = 0;
85064 pte_unmap_unlock(pte, ptl);
85065 return retval;
85066@@ -2146,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
85067 if (!page_count(page))
85068 return -EINVAL;
85069 if (!(vma->vm_flags & VM_MIXEDMAP)) {
85070+
85071+#ifdef CONFIG_PAX_SEGMEXEC
85072+ struct vm_area_struct *vma_m;
85073+#endif
85074+
85075 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
85076 BUG_ON(vma->vm_flags & VM_PFNMAP);
85077 vma->vm_flags |= VM_MIXEDMAP;
85078+
85079+#ifdef CONFIG_PAX_SEGMEXEC
85080+ vma_m = pax_find_mirror_vma(vma);
85081+ if (vma_m)
85082+ vma_m->vm_flags |= VM_MIXEDMAP;
85083+#endif
85084+
85085 }
85086 return insert_page(vma, addr, page, vma->vm_page_prot);
85087 }
85088@@ -2231,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
85089 unsigned long pfn)
85090 {
85091 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
85092+ BUG_ON(vma->vm_mirror);
85093
85094 if (addr < vma->vm_start || addr >= vma->vm_end)
85095 return -EFAULT;
85096@@ -2478,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
85097
85098 BUG_ON(pud_huge(*pud));
85099
85100- pmd = pmd_alloc(mm, pud, addr);
85101+ pmd = (mm == &init_mm) ?
85102+ pmd_alloc_kernel(mm, pud, addr) :
85103+ pmd_alloc(mm, pud, addr);
85104 if (!pmd)
85105 return -ENOMEM;
85106 do {
85107@@ -2498,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
85108 unsigned long next;
85109 int err;
85110
85111- pud = pud_alloc(mm, pgd, addr);
85112+ pud = (mm == &init_mm) ?
85113+ pud_alloc_kernel(mm, pgd, addr) :
85114+ pud_alloc(mm, pgd, addr);
85115 if (!pud)
85116 return -ENOMEM;
85117 do {
85118@@ -2586,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
85119 copy_user_highpage(dst, src, va, vma);
85120 }
85121
85122+#ifdef CONFIG_PAX_SEGMEXEC
85123+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
85124+{
85125+ struct mm_struct *mm = vma->vm_mm;
85126+ spinlock_t *ptl;
85127+ pte_t *pte, entry;
85128+
85129+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
85130+ entry = *pte;
85131+ if (!pte_present(entry)) {
85132+ if (!pte_none(entry)) {
85133+ BUG_ON(pte_file(entry));
85134+ free_swap_and_cache(pte_to_swp_entry(entry));
85135+ pte_clear_not_present_full(mm, address, pte, 0);
85136+ }
85137+ } else {
85138+ struct page *page;
85139+
85140+ flush_cache_page(vma, address, pte_pfn(entry));
85141+ entry = ptep_clear_flush(vma, address, pte);
85142+ BUG_ON(pte_dirty(entry));
85143+ page = vm_normal_page(vma, address, entry);
85144+ if (page) {
85145+ update_hiwater_rss(mm);
85146+ if (PageAnon(page))
85147+ dec_mm_counter_fast(mm, MM_ANONPAGES);
85148+ else
85149+ dec_mm_counter_fast(mm, MM_FILEPAGES);
85150+ page_remove_rmap(page);
85151+ page_cache_release(page);
85152+ }
85153+ }
85154+ pte_unmap_unlock(pte, ptl);
85155+}
85156+
85157+/* PaX: if vma is mirrored, synchronize the mirror's PTE
85158+ *
85159+ * the ptl of the lower mapped page is held on entry and is not released on exit
85160+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
85161+ */
85162+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
85163+{
85164+ struct mm_struct *mm = vma->vm_mm;
85165+ unsigned long address_m;
85166+ spinlock_t *ptl_m;
85167+ struct vm_area_struct *vma_m;
85168+ pmd_t *pmd_m;
85169+ pte_t *pte_m, entry_m;
85170+
85171+ BUG_ON(!page_m || !PageAnon(page_m));
85172+
85173+ vma_m = pax_find_mirror_vma(vma);
85174+ if (!vma_m)
85175+ return;
85176+
85177+ BUG_ON(!PageLocked(page_m));
85178+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85179+ address_m = address + SEGMEXEC_TASK_SIZE;
85180+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
85181+ pte_m = pte_offset_map(pmd_m, address_m);
85182+ ptl_m = pte_lockptr(mm, pmd_m);
85183+ if (ptl != ptl_m) {
85184+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
85185+ if (!pte_none(*pte_m))
85186+ goto out;
85187+ }
85188+
85189+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
85190+ page_cache_get(page_m);
85191+ page_add_anon_rmap(page_m, vma_m, address_m);
85192+ inc_mm_counter_fast(mm, MM_ANONPAGES);
85193+ set_pte_at(mm, address_m, pte_m, entry_m);
85194+ update_mmu_cache(vma_m, address_m, pte_m);
85195+out:
85196+ if (ptl != ptl_m)
85197+ spin_unlock(ptl_m);
85198+ pte_unmap(pte_m);
85199+ unlock_page(page_m);
85200+}
85201+
85202+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
85203+{
85204+ struct mm_struct *mm = vma->vm_mm;
85205+ unsigned long address_m;
85206+ spinlock_t *ptl_m;
85207+ struct vm_area_struct *vma_m;
85208+ pmd_t *pmd_m;
85209+ pte_t *pte_m, entry_m;
85210+
85211+ BUG_ON(!page_m || PageAnon(page_m));
85212+
85213+ vma_m = pax_find_mirror_vma(vma);
85214+ if (!vma_m)
85215+ return;
85216+
85217+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85218+ address_m = address + SEGMEXEC_TASK_SIZE;
85219+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
85220+ pte_m = pte_offset_map(pmd_m, address_m);
85221+ ptl_m = pte_lockptr(mm, pmd_m);
85222+ if (ptl != ptl_m) {
85223+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
85224+ if (!pte_none(*pte_m))
85225+ goto out;
85226+ }
85227+
85228+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
85229+ page_cache_get(page_m);
85230+ page_add_file_rmap(page_m);
85231+ inc_mm_counter_fast(mm, MM_FILEPAGES);
85232+ set_pte_at(mm, address_m, pte_m, entry_m);
85233+ update_mmu_cache(vma_m, address_m, pte_m);
85234+out:
85235+ if (ptl != ptl_m)
85236+ spin_unlock(ptl_m);
85237+ pte_unmap(pte_m);
85238+}
85239+
85240+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
85241+{
85242+ struct mm_struct *mm = vma->vm_mm;
85243+ unsigned long address_m;
85244+ spinlock_t *ptl_m;
85245+ struct vm_area_struct *vma_m;
85246+ pmd_t *pmd_m;
85247+ pte_t *pte_m, entry_m;
85248+
85249+ vma_m = pax_find_mirror_vma(vma);
85250+ if (!vma_m)
85251+ return;
85252+
85253+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85254+ address_m = address + SEGMEXEC_TASK_SIZE;
85255+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
85256+ pte_m = pte_offset_map(pmd_m, address_m);
85257+ ptl_m = pte_lockptr(mm, pmd_m);
85258+ if (ptl != ptl_m) {
85259+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
85260+ if (!pte_none(*pte_m))
85261+ goto out;
85262+ }
85263+
85264+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
85265+ set_pte_at(mm, address_m, pte_m, entry_m);
85266+out:
85267+ if (ptl != ptl_m)
85268+ spin_unlock(ptl_m);
85269+ pte_unmap(pte_m);
85270+}
85271+
85272+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
85273+{
85274+ struct page *page_m;
85275+ pte_t entry;
85276+
85277+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
85278+ goto out;
85279+
85280+ entry = *pte;
85281+ page_m = vm_normal_page(vma, address, entry);
85282+ if (!page_m)
85283+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
85284+ else if (PageAnon(page_m)) {
85285+ if (pax_find_mirror_vma(vma)) {
85286+ pte_unmap_unlock(pte, ptl);
85287+ lock_page(page_m);
85288+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
85289+ if (pte_same(entry, *pte))
85290+ pax_mirror_anon_pte(vma, address, page_m, ptl);
85291+ else
85292+ unlock_page(page_m);
85293+ }
85294+ } else
85295+ pax_mirror_file_pte(vma, address, page_m, ptl);
85296+
85297+out:
85298+ pte_unmap_unlock(pte, ptl);
85299+}
85300+#endif
85301+
85302 /*
85303 * This routine handles present pages, when users try to write
85304 * to a shared page. It is done by copying the page to a new address
85305@@ -2802,6 +3004,12 @@ gotten:
85306 */
85307 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
85308 if (likely(pte_same(*page_table, orig_pte))) {
85309+
85310+#ifdef CONFIG_PAX_SEGMEXEC
85311+ if (pax_find_mirror_vma(vma))
85312+ BUG_ON(!trylock_page(new_page));
85313+#endif
85314+
85315 if (old_page) {
85316 if (!PageAnon(old_page)) {
85317 dec_mm_counter_fast(mm, MM_FILEPAGES);
85318@@ -2853,6 +3061,10 @@ gotten:
85319 page_remove_rmap(old_page);
85320 }
85321
85322+#ifdef CONFIG_PAX_SEGMEXEC
85323+ pax_mirror_anon_pte(vma, address, new_page, ptl);
85324+#endif
85325+
85326 /* Free the old page.. */
85327 new_page = old_page;
85328 ret |= VM_FAULT_WRITE;
85329@@ -3128,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
85330 swap_free(entry);
85331 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
85332 try_to_free_swap(page);
85333+
85334+#ifdef CONFIG_PAX_SEGMEXEC
85335+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
85336+#endif
85337+
85338 unlock_page(page);
85339 if (page != swapcache) {
85340 /*
85341@@ -3151,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
85342
85343 /* No need to invalidate - it was non-present before */
85344 update_mmu_cache(vma, address, page_table);
85345+
85346+#ifdef CONFIG_PAX_SEGMEXEC
85347+ pax_mirror_anon_pte(vma, address, page, ptl);
85348+#endif
85349+
85350 unlock:
85351 pte_unmap_unlock(page_table, ptl);
85352 out:
85353@@ -3170,40 +3392,6 @@ out_release:
85354 }
85355
85356 /*
85357- * This is like a special single-page "expand_{down|up}wards()",
85358- * except we must first make sure that 'address{-|+}PAGE_SIZE'
85359- * doesn't hit another vma.
85360- */
85361-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
85362-{
85363- address &= PAGE_MASK;
85364- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
85365- struct vm_area_struct *prev = vma->vm_prev;
85366-
85367- /*
85368- * Is there a mapping abutting this one below?
85369- *
85370- * That's only ok if it's the same stack mapping
85371- * that has gotten split..
85372- */
85373- if (prev && prev->vm_end == address)
85374- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
85375-
85376- expand_downwards(vma, address - PAGE_SIZE);
85377- }
85378- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
85379- struct vm_area_struct *next = vma->vm_next;
85380-
85381- /* As VM_GROWSDOWN but s/below/above/ */
85382- if (next && next->vm_start == address + PAGE_SIZE)
85383- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
85384-
85385- expand_upwards(vma, address + PAGE_SIZE);
85386- }
85387- return 0;
85388-}
85389-
85390-/*
85391 * We enter with non-exclusive mmap_sem (to exclude vma changes,
85392 * but allow concurrent faults), and pte mapped but not yet locked.
85393 * We return with mmap_sem still held, but pte unmapped and unlocked.
85394@@ -3212,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
85395 unsigned long address, pte_t *page_table, pmd_t *pmd,
85396 unsigned int flags)
85397 {
85398- struct page *page;
85399+ struct page *page = NULL;
85400 spinlock_t *ptl;
85401 pte_t entry;
85402
85403- pte_unmap(page_table);
85404-
85405- /* Check if we need to add a guard page to the stack */
85406- if (check_stack_guard_page(vma, address) < 0)
85407- return VM_FAULT_SIGBUS;
85408-
85409- /* Use the zero-page for reads */
85410 if (!(flags & FAULT_FLAG_WRITE)) {
85411 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
85412 vma->vm_page_prot));
85413- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
85414+ ptl = pte_lockptr(mm, pmd);
85415+ spin_lock(ptl);
85416 if (!pte_none(*page_table))
85417 goto unlock;
85418 goto setpte;
85419 }
85420
85421 /* Allocate our own private page. */
85422+ pte_unmap(page_table);
85423+
85424 if (unlikely(anon_vma_prepare(vma)))
85425 goto oom;
85426 page = alloc_zeroed_user_highpage_movable(vma, address);
85427@@ -3256,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
85428 if (!pte_none(*page_table))
85429 goto release;
85430
85431+#ifdef CONFIG_PAX_SEGMEXEC
85432+ if (pax_find_mirror_vma(vma))
85433+ BUG_ON(!trylock_page(page));
85434+#endif
85435+
85436 inc_mm_counter_fast(mm, MM_ANONPAGES);
85437 page_add_new_anon_rmap(page, vma, address);
85438 setpte:
85439@@ -3263,6 +3452,12 @@ setpte:
85440
85441 /* No need to invalidate - it was non-present before */
85442 update_mmu_cache(vma, address, page_table);
85443+
85444+#ifdef CONFIG_PAX_SEGMEXEC
85445+ if (page)
85446+ pax_mirror_anon_pte(vma, address, page, ptl);
85447+#endif
85448+
85449 unlock:
85450 pte_unmap_unlock(page_table, ptl);
85451 return 0;
85452@@ -3406,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85453 */
85454 /* Only go through if we didn't race with anybody else... */
85455 if (likely(pte_same(*page_table, orig_pte))) {
85456+
85457+#ifdef CONFIG_PAX_SEGMEXEC
85458+ if (anon && pax_find_mirror_vma(vma))
85459+ BUG_ON(!trylock_page(page));
85460+#endif
85461+
85462 flush_icache_page(vma, page);
85463 entry = mk_pte(page, vma->vm_page_prot);
85464 if (flags & FAULT_FLAG_WRITE)
85465@@ -3425,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85466
85467 /* no need to invalidate: a not-present page won't be cached */
85468 update_mmu_cache(vma, address, page_table);
85469+
85470+#ifdef CONFIG_PAX_SEGMEXEC
85471+ if (anon)
85472+ pax_mirror_anon_pte(vma, address, page, ptl);
85473+ else
85474+ pax_mirror_file_pte(vma, address, page, ptl);
85475+#endif
85476+
85477 } else {
85478 if (cow_page)
85479 mem_cgroup_uncharge_page(cow_page);
85480@@ -3746,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
85481 if (flags & FAULT_FLAG_WRITE)
85482 flush_tlb_fix_spurious_fault(vma, address);
85483 }
85484+
85485+#ifdef CONFIG_PAX_SEGMEXEC
85486+ pax_mirror_pte(vma, address, pte, pmd, ptl);
85487+ return 0;
85488+#endif
85489+
85490 unlock:
85491 pte_unmap_unlock(pte, ptl);
85492 return 0;
85493@@ -3762,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85494 pmd_t *pmd;
85495 pte_t *pte;
85496
85497+#ifdef CONFIG_PAX_SEGMEXEC
85498+ struct vm_area_struct *vma_m;
85499+#endif
85500+
85501 __set_current_state(TASK_RUNNING);
85502
85503 count_vm_event(PGFAULT);
85504@@ -3773,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85505 if (unlikely(is_vm_hugetlb_page(vma)))
85506 return hugetlb_fault(mm, vma, address, flags);
85507
85508+#ifdef CONFIG_PAX_SEGMEXEC
85509+ vma_m = pax_find_mirror_vma(vma);
85510+ if (vma_m) {
85511+ unsigned long address_m;
85512+ pgd_t *pgd_m;
85513+ pud_t *pud_m;
85514+ pmd_t *pmd_m;
85515+
85516+ if (vma->vm_start > vma_m->vm_start) {
85517+ address_m = address;
85518+ address -= SEGMEXEC_TASK_SIZE;
85519+ vma = vma_m;
85520+ } else
85521+ address_m = address + SEGMEXEC_TASK_SIZE;
85522+
85523+ pgd_m = pgd_offset(mm, address_m);
85524+ pud_m = pud_alloc(mm, pgd_m, address_m);
85525+ if (!pud_m)
85526+ return VM_FAULT_OOM;
85527+ pmd_m = pmd_alloc(mm, pud_m, address_m);
85528+ if (!pmd_m)
85529+ return VM_FAULT_OOM;
85530+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
85531+ return VM_FAULT_OOM;
85532+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
85533+ }
85534+#endif
85535+
85536 retry:
85537 pgd = pgd_offset(mm, address);
85538 pud = pud_alloc(mm, pgd, address);
85539@@ -3871,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
85540 spin_unlock(&mm->page_table_lock);
85541 return 0;
85542 }
85543+
85544+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
85545+{
85546+ pud_t *new = pud_alloc_one(mm, address);
85547+ if (!new)
85548+ return -ENOMEM;
85549+
85550+ smp_wmb(); /* See comment in __pte_alloc */
85551+
85552+ spin_lock(&mm->page_table_lock);
85553+ if (pgd_present(*pgd)) /* Another has populated it */
85554+ pud_free(mm, new);
85555+ else
85556+ pgd_populate_kernel(mm, pgd, new);
85557+ spin_unlock(&mm->page_table_lock);
85558+ return 0;
85559+}
85560 #endif /* __PAGETABLE_PUD_FOLDED */
85561
85562 #ifndef __PAGETABLE_PMD_FOLDED
85563@@ -3901,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
85564 spin_unlock(&mm->page_table_lock);
85565 return 0;
85566 }
85567+
85568+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
85569+{
85570+ pmd_t *new = pmd_alloc_one(mm, address);
85571+ if (!new)
85572+ return -ENOMEM;
85573+
85574+ smp_wmb(); /* See comment in __pte_alloc */
85575+
85576+ spin_lock(&mm->page_table_lock);
85577+#ifndef __ARCH_HAS_4LEVEL_HACK
85578+ if (pud_present(*pud)) /* Another has populated it */
85579+ pmd_free(mm, new);
85580+ else
85581+ pud_populate_kernel(mm, pud, new);
85582+#else
85583+ if (pgd_present(*pud)) /* Another has populated it */
85584+ pmd_free(mm, new);
85585+ else
85586+ pgd_populate_kernel(mm, pud, new);
85587+#endif /* __ARCH_HAS_4LEVEL_HACK */
85588+ spin_unlock(&mm->page_table_lock);
85589+ return 0;
85590+}
85591 #endif /* __PAGETABLE_PMD_FOLDED */
85592
85593 #if !defined(__HAVE_ARCH_GATE_AREA)
85594@@ -3914,7 +4202,7 @@ static int __init gate_vma_init(void)
85595 gate_vma.vm_start = FIXADDR_USER_START;
85596 gate_vma.vm_end = FIXADDR_USER_END;
85597 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
85598- gate_vma.vm_page_prot = __P101;
85599+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
85600
85601 return 0;
85602 }
85603@@ -4048,8 +4336,8 @@ out:
85604 return ret;
85605 }
85606
85607-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85608- void *buf, int len, int write)
85609+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85610+ void *buf, size_t len, int write)
85611 {
85612 resource_size_t phys_addr;
85613 unsigned long prot = 0;
85614@@ -4074,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85615 * Access another process' address space as given in mm. If non-NULL, use the
85616 * given task for page fault accounting.
85617 */
85618-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85619- unsigned long addr, void *buf, int len, int write)
85620+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85621+ unsigned long addr, void *buf, size_t len, int write)
85622 {
85623 struct vm_area_struct *vma;
85624 void *old_buf = buf;
85625@@ -4083,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85626 down_read(&mm->mmap_sem);
85627 /* ignore errors, just check how much was successfully transferred */
85628 while (len) {
85629- int bytes, ret, offset;
85630+ ssize_t bytes, ret, offset;
85631 void *maddr;
85632 struct page *page = NULL;
85633
85634@@ -4142,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85635 *
85636 * The caller must hold a reference on @mm.
85637 */
85638-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
85639- void *buf, int len, int write)
85640+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
85641+ void *buf, size_t len, int write)
85642 {
85643 return __access_remote_vm(NULL, mm, addr, buf, len, write);
85644 }
85645@@ -4153,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
85646 * Source/target buffer must be kernel space,
85647 * Do not walk the page table directly, use get_user_pages
85648 */
85649-int access_process_vm(struct task_struct *tsk, unsigned long addr,
85650- void *buf, int len, int write)
85651+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
85652+ void *buf, size_t len, int write)
85653 {
85654 struct mm_struct *mm;
85655- int ret;
85656+ ssize_t ret;
85657
85658 mm = get_task_mm(tsk);
85659 if (!mm)
85660diff --git a/mm/mempolicy.c b/mm/mempolicy.c
85661index 4baf12e..5497066 100644
85662--- a/mm/mempolicy.c
85663+++ b/mm/mempolicy.c
85664@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
85665 unsigned long vmstart;
85666 unsigned long vmend;
85667
85668+#ifdef CONFIG_PAX_SEGMEXEC
85669+ struct vm_area_struct *vma_m;
85670+#endif
85671+
85672 vma = find_vma(mm, start);
85673 if (!vma || vma->vm_start > start)
85674 return -EFAULT;
85675@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
85676 err = vma_replace_policy(vma, new_pol);
85677 if (err)
85678 goto out;
85679+
85680+#ifdef CONFIG_PAX_SEGMEXEC
85681+ vma_m = pax_find_mirror_vma(vma);
85682+ if (vma_m) {
85683+ err = vma_replace_policy(vma_m, new_pol);
85684+ if (err)
85685+ goto out;
85686+ }
85687+#endif
85688+
85689 }
85690
85691 out:
85692@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
85693
85694 if (end < start)
85695 return -EINVAL;
85696+
85697+#ifdef CONFIG_PAX_SEGMEXEC
85698+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
85699+ if (end > SEGMEXEC_TASK_SIZE)
85700+ return -EINVAL;
85701+ } else
85702+#endif
85703+
85704+ if (end > TASK_SIZE)
85705+ return -EINVAL;
85706+
85707 if (end == start)
85708 return 0;
85709
85710@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
85711 */
85712 tcred = __task_cred(task);
85713 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
85714- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
85715- !capable(CAP_SYS_NICE)) {
85716+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
85717 rcu_read_unlock();
85718 err = -EPERM;
85719 goto out_put;
85720@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
85721 goto out;
85722 }
85723
85724+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85725+ if (mm != current->mm &&
85726+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
85727+ mmput(mm);
85728+ err = -EPERM;
85729+ goto out;
85730+ }
85731+#endif
85732+
85733 err = do_migrate_pages(mm, old, new,
85734 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
85735
85736diff --git a/mm/migrate.c b/mm/migrate.c
85737index 6f0c244..6d1ae32 100644
85738--- a/mm/migrate.c
85739+++ b/mm/migrate.c
85740@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
85741 */
85742 tcred = __task_cred(task);
85743 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
85744- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
85745- !capable(CAP_SYS_NICE)) {
85746+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
85747 rcu_read_unlock();
85748 err = -EPERM;
85749 goto out;
85750diff --git a/mm/mlock.c b/mm/mlock.c
85751index 79b7cf7..9944291 100644
85752--- a/mm/mlock.c
85753+++ b/mm/mlock.c
85754@@ -13,6 +13,7 @@
85755 #include <linux/pagemap.h>
85756 #include <linux/mempolicy.h>
85757 #include <linux/syscalls.h>
85758+#include <linux/security.h>
85759 #include <linux/sched.h>
85760 #include <linux/export.h>
85761 #include <linux/rmap.h>
85762@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
85763 {
85764 unsigned long nstart, end, tmp;
85765 struct vm_area_struct * vma, * prev;
85766- int error;
85767+ int error = 0;
85768
85769 VM_BUG_ON(start & ~PAGE_MASK);
85770 VM_BUG_ON(len != PAGE_ALIGN(len));
85771@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
85772 return -EINVAL;
85773 if (end == start)
85774 return 0;
85775+ if (end > TASK_SIZE)
85776+ return -EINVAL;
85777+
85778 vma = find_vma(current->mm, start);
85779 if (!vma || vma->vm_start > start)
85780 return -ENOMEM;
85781@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
85782 for (nstart = start ; ; ) {
85783 vm_flags_t newflags;
85784
85785+#ifdef CONFIG_PAX_SEGMEXEC
85786+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
85787+ break;
85788+#endif
85789+
85790 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
85791
85792 newflags = vma->vm_flags & ~VM_LOCKED;
85793@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
85794 lock_limit >>= PAGE_SHIFT;
85795
85796 /* check against resource limits */
85797+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
85798 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
85799 error = do_mlock(start, len, 1);
85800 up_write(&current->mm->mmap_sem);
85801@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
85802 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
85803 vm_flags_t newflags;
85804
85805+#ifdef CONFIG_PAX_SEGMEXEC
85806+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
85807+ break;
85808+#endif
85809+
85810 newflags = vma->vm_flags & ~VM_LOCKED;
85811 if (flags & MCL_CURRENT)
85812 newflags |= VM_LOCKED;
85813@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
85814 lock_limit >>= PAGE_SHIFT;
85815
85816 ret = -ENOMEM;
85817+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
85818 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
85819 capable(CAP_IPC_LOCK))
85820 ret = do_mlockall(flags);
85821diff --git a/mm/mmap.c b/mm/mmap.c
85822index 7dbe397..bfb7626 100644
85823--- a/mm/mmap.c
85824+++ b/mm/mmap.c
85825@@ -36,6 +36,7 @@
85826 #include <linux/sched/sysctl.h>
85827 #include <linux/notifier.h>
85828 #include <linux/memory.h>
85829+#include <linux/random.h>
85830
85831 #include <asm/uaccess.h>
85832 #include <asm/cacheflush.h>
85833@@ -52,6 +53,16 @@
85834 #define arch_rebalance_pgtables(addr, len) (addr)
85835 #endif
85836
85837+static inline void verify_mm_writelocked(struct mm_struct *mm)
85838+{
85839+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
85840+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
85841+ up_read(&mm->mmap_sem);
85842+ BUG();
85843+ }
85844+#endif
85845+}
85846+
85847 static void unmap_region(struct mm_struct *mm,
85848 struct vm_area_struct *vma, struct vm_area_struct *prev,
85849 unsigned long start, unsigned long end);
85850@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
85851 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
85852 *
85853 */
85854-pgprot_t protection_map[16] = {
85855+pgprot_t protection_map[16] __read_only = {
85856 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
85857 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
85858 };
85859
85860-pgprot_t vm_get_page_prot(unsigned long vm_flags)
85861+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
85862 {
85863- return __pgprot(pgprot_val(protection_map[vm_flags &
85864+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
85865 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
85866 pgprot_val(arch_vm_get_page_prot(vm_flags)));
85867+
85868+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
85869+ if (!(__supported_pte_mask & _PAGE_NX) &&
85870+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
85871+ (vm_flags & (VM_READ | VM_WRITE)))
85872+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
85873+#endif
85874+
85875+ return prot;
85876 }
85877 EXPORT_SYMBOL(vm_get_page_prot);
85878
85879@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
85880 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
85881 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
85882 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
85883+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
85884 /*
85885 * Make sure vm_committed_as in one cacheline and not cacheline shared with
85886 * other variables. It can be updated by several CPUs frequently.
85887@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
85888 struct vm_area_struct *next = vma->vm_next;
85889
85890 might_sleep();
85891+ BUG_ON(vma->vm_mirror);
85892 if (vma->vm_ops && vma->vm_ops->close)
85893 vma->vm_ops->close(vma);
85894 if (vma->vm_file)
85895@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
85896 * not page aligned -Ram Gupta
85897 */
85898 rlim = rlimit(RLIMIT_DATA);
85899+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
85900 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
85901 (mm->end_data - mm->start_data) > rlim)
85902 goto out;
85903@@ -933,6 +956,12 @@ static int
85904 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
85905 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
85906 {
85907+
85908+#ifdef CONFIG_PAX_SEGMEXEC
85909+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
85910+ return 0;
85911+#endif
85912+
85913 if (is_mergeable_vma(vma, file, vm_flags) &&
85914 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
85915 if (vma->vm_pgoff == vm_pgoff)
85916@@ -952,6 +981,12 @@ static int
85917 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
85918 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
85919 {
85920+
85921+#ifdef CONFIG_PAX_SEGMEXEC
85922+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
85923+ return 0;
85924+#endif
85925+
85926 if (is_mergeable_vma(vma, file, vm_flags) &&
85927 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
85928 pgoff_t vm_pglen;
85929@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
85930 struct vm_area_struct *vma_merge(struct mm_struct *mm,
85931 struct vm_area_struct *prev, unsigned long addr,
85932 unsigned long end, unsigned long vm_flags,
85933- struct anon_vma *anon_vma, struct file *file,
85934+ struct anon_vma *anon_vma, struct file *file,
85935 pgoff_t pgoff, struct mempolicy *policy)
85936 {
85937 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
85938 struct vm_area_struct *area, *next;
85939 int err;
85940
85941+#ifdef CONFIG_PAX_SEGMEXEC
85942+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
85943+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
85944+
85945+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
85946+#endif
85947+
85948 /*
85949 * We later require that vma->vm_flags == vm_flags,
85950 * so this tests vma->vm_flags & VM_SPECIAL, too.
85951@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
85952 if (next && next->vm_end == end) /* cases 6, 7, 8 */
85953 next = next->vm_next;
85954
85955+#ifdef CONFIG_PAX_SEGMEXEC
85956+ if (prev)
85957+ prev_m = pax_find_mirror_vma(prev);
85958+ if (area)
85959+ area_m = pax_find_mirror_vma(area);
85960+ if (next)
85961+ next_m = pax_find_mirror_vma(next);
85962+#endif
85963+
85964 /*
85965 * Can it merge with the predecessor?
85966 */
85967@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
85968 /* cases 1, 6 */
85969 err = vma_adjust(prev, prev->vm_start,
85970 next->vm_end, prev->vm_pgoff, NULL);
85971- } else /* cases 2, 5, 7 */
85972+
85973+#ifdef CONFIG_PAX_SEGMEXEC
85974+ if (!err && prev_m)
85975+ err = vma_adjust(prev_m, prev_m->vm_start,
85976+ next_m->vm_end, prev_m->vm_pgoff, NULL);
85977+#endif
85978+
85979+ } else { /* cases 2, 5, 7 */
85980 err = vma_adjust(prev, prev->vm_start,
85981 end, prev->vm_pgoff, NULL);
85982+
85983+#ifdef CONFIG_PAX_SEGMEXEC
85984+ if (!err && prev_m)
85985+ err = vma_adjust(prev_m, prev_m->vm_start,
85986+ end_m, prev_m->vm_pgoff, NULL);
85987+#endif
85988+
85989+ }
85990 if (err)
85991 return NULL;
85992 khugepaged_enter_vma_merge(prev);
85993@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
85994 mpol_equal(policy, vma_policy(next)) &&
85995 can_vma_merge_before(next, vm_flags,
85996 anon_vma, file, pgoff+pglen)) {
85997- if (prev && addr < prev->vm_end) /* case 4 */
85998+ if (prev && addr < prev->vm_end) { /* case 4 */
85999 err = vma_adjust(prev, prev->vm_start,
86000 addr, prev->vm_pgoff, NULL);
86001- else /* cases 3, 8 */
86002+
86003+#ifdef CONFIG_PAX_SEGMEXEC
86004+ if (!err && prev_m)
86005+ err = vma_adjust(prev_m, prev_m->vm_start,
86006+ addr_m, prev_m->vm_pgoff, NULL);
86007+#endif
86008+
86009+ } else { /* cases 3, 8 */
86010 err = vma_adjust(area, addr, next->vm_end,
86011 next->vm_pgoff - pglen, NULL);
86012+
86013+#ifdef CONFIG_PAX_SEGMEXEC
86014+ if (!err && area_m)
86015+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
86016+ next_m->vm_pgoff - pglen, NULL);
86017+#endif
86018+
86019+ }
86020 if (err)
86021 return NULL;
86022 khugepaged_enter_vma_merge(area);
86023@@ -1165,8 +1246,10 @@ none:
86024 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
86025 struct file *file, long pages)
86026 {
86027- const unsigned long stack_flags
86028- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
86029+
86030+#ifdef CONFIG_PAX_RANDMMAP
86031+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
86032+#endif
86033
86034 mm->total_vm += pages;
86035
86036@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
86037 mm->shared_vm += pages;
86038 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
86039 mm->exec_vm += pages;
86040- } else if (flags & stack_flags)
86041+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
86042 mm->stack_vm += pages;
86043 }
86044 #endif /* CONFIG_PROC_FS */
86045@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86046 * (the exception is when the underlying filesystem is noexec
86047 * mounted, in which case we dont add PROT_EXEC.)
86048 */
86049- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
86050+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
86051 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
86052 prot |= PROT_EXEC;
86053
86054@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86055 /* Obtain the address to map to. we verify (or select) it and ensure
86056 * that it represents a valid section of the address space.
86057 */
86058- addr = get_unmapped_area(file, addr, len, pgoff, flags);
86059+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
86060 if (addr & ~PAGE_MASK)
86061 return addr;
86062
86063@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86064 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
86065 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
86066
86067+#ifdef CONFIG_PAX_MPROTECT
86068+ if (mm->pax_flags & MF_PAX_MPROTECT) {
86069+
86070+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86071+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
86072+ mm->binfmt->handle_mmap)
86073+ mm->binfmt->handle_mmap(file);
86074+#endif
86075+
86076+#ifndef CONFIG_PAX_MPROTECT_COMPAT
86077+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
86078+ gr_log_rwxmmap(file);
86079+
86080+#ifdef CONFIG_PAX_EMUPLT
86081+ vm_flags &= ~VM_EXEC;
86082+#else
86083+ return -EPERM;
86084+#endif
86085+
86086+ }
86087+
86088+ if (!(vm_flags & VM_EXEC))
86089+ vm_flags &= ~VM_MAYEXEC;
86090+#else
86091+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
86092+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
86093+#endif
86094+ else
86095+ vm_flags &= ~VM_MAYWRITE;
86096+ }
86097+#endif
86098+
86099+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
86100+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
86101+ vm_flags &= ~VM_PAGEEXEC;
86102+#endif
86103+
86104 if (flags & MAP_LOCKED)
86105 if (!can_do_mlock())
86106 return -EPERM;
86107@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86108 locked += mm->locked_vm;
86109 lock_limit = rlimit(RLIMIT_MEMLOCK);
86110 lock_limit >>= PAGE_SHIFT;
86111+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
86112 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
86113 return -EAGAIN;
86114 }
86115@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86116 vm_flags |= VM_NORESERVE;
86117 }
86118
86119+ if (!gr_acl_handle_mmap(file, prot))
86120+ return -EACCES;
86121+
86122 addr = mmap_region(file, addr, len, vm_flags, pgoff);
86123 if (!IS_ERR_VALUE(addr) &&
86124 ((vm_flags & VM_LOCKED) ||
86125@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
86126 vm_flags_t vm_flags = vma->vm_flags;
86127
86128 /* If it was private or non-writable, the write bit is already clear */
86129- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
86130+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
86131 return 0;
86132
86133 /* The backer wishes to know when pages are first written to? */
86134@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
86135 unsigned long charged = 0;
86136 struct inode *inode = file ? file_inode(file) : NULL;
86137
86138+#ifdef CONFIG_PAX_SEGMEXEC
86139+ struct vm_area_struct *vma_m = NULL;
86140+#endif
86141+
86142+ /*
86143+ * mm->mmap_sem is required to protect against another thread
86144+ * changing the mappings in case we sleep.
86145+ */
86146+ verify_mm_writelocked(mm);
86147+
86148 /* Check against address space limit. */
86149+
86150+#ifdef CONFIG_PAX_RANDMMAP
86151+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
86152+#endif
86153+
86154 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
86155 unsigned long nr_pages;
86156
86157@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
86158
86159 /* Clear old maps */
86160 error = -ENOMEM;
86161-munmap_back:
86162 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
86163 if (do_munmap(mm, addr, len))
86164 return -ENOMEM;
86165- goto munmap_back;
86166+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
86167 }
86168
86169 /*
86170@@ -1534,6 +1672,16 @@ munmap_back:
86171 goto unacct_error;
86172 }
86173
86174+#ifdef CONFIG_PAX_SEGMEXEC
86175+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
86176+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
86177+ if (!vma_m) {
86178+ error = -ENOMEM;
86179+ goto free_vma;
86180+ }
86181+ }
86182+#endif
86183+
86184 vma->vm_mm = mm;
86185 vma->vm_start = addr;
86186 vma->vm_end = addr + len;
86187@@ -1558,6 +1706,13 @@ munmap_back:
86188 if (error)
86189 goto unmap_and_free_vma;
86190
86191+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
86192+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
86193+ vma->vm_flags |= VM_PAGEEXEC;
86194+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
86195+ }
86196+#endif
86197+
86198 /* Can addr have changed??
86199 *
86200 * Answer: Yes, several device drivers can do it in their
86201@@ -1596,6 +1751,11 @@ munmap_back:
86202 vma_link(mm, vma, prev, rb_link, rb_parent);
86203 file = vma->vm_file;
86204
86205+#ifdef CONFIG_PAX_SEGMEXEC
86206+ if (vma_m)
86207+ BUG_ON(pax_mirror_vma(vma_m, vma));
86208+#endif
86209+
86210 /* Once vma denies write, undo our temporary denial count */
86211 if (correct_wcount)
86212 atomic_inc(&inode->i_writecount);
86213@@ -1603,6 +1763,7 @@ out:
86214 perf_event_mmap(vma);
86215
86216 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
86217+ track_exec_limit(mm, addr, addr + len, vm_flags);
86218 if (vm_flags & VM_LOCKED) {
86219 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
86220 vma == get_gate_vma(current->mm)))
86221@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
86222 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
86223 charged = 0;
86224 free_vma:
86225+
86226+#ifdef CONFIG_PAX_SEGMEXEC
86227+ if (vma_m)
86228+ kmem_cache_free(vm_area_cachep, vma_m);
86229+#endif
86230+
86231 kmem_cache_free(vm_area_cachep, vma);
86232 unacct_error:
86233 if (charged)
86234@@ -1633,7 +1800,63 @@ unacct_error:
86235 return error;
86236 }
86237
86238-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
86239+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86240+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86241+{
86242+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
86243+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
86244+
86245+ return 0;
86246+}
86247+#endif
86248+
86249+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
86250+{
86251+ if (!vma) {
86252+#ifdef CONFIG_STACK_GROWSUP
86253+ if (addr > sysctl_heap_stack_gap)
86254+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
86255+ else
86256+ vma = find_vma(current->mm, 0);
86257+ if (vma && (vma->vm_flags & VM_GROWSUP))
86258+ return false;
86259+#endif
86260+ return true;
86261+ }
86262+
86263+ if (addr + len > vma->vm_start)
86264+ return false;
86265+
86266+ if (vma->vm_flags & VM_GROWSDOWN)
86267+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
86268+#ifdef CONFIG_STACK_GROWSUP
86269+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
86270+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
86271+#endif
86272+ else if (offset)
86273+ return offset <= vma->vm_start - addr - len;
86274+
86275+ return true;
86276+}
86277+
86278+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
86279+{
86280+ if (vma->vm_start < len)
86281+ return -ENOMEM;
86282+
86283+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
86284+ if (offset <= vma->vm_start - len)
86285+ return vma->vm_start - len - offset;
86286+ else
86287+ return -ENOMEM;
86288+ }
86289+
86290+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
86291+ return vma->vm_start - len - sysctl_heap_stack_gap;
86292+ return -ENOMEM;
86293+}
86294+
86295+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
86296 {
86297 /*
86298 * We implement the search by looking for an rbtree node that
86299@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
86300 }
86301 }
86302
86303- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
86304+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
86305 check_current:
86306 /* Check if current node has a suitable gap */
86307 if (gap_start > high_limit)
86308 return -ENOMEM;
86309+
86310+ if (gap_end - gap_start > info->threadstack_offset)
86311+ gap_start += info->threadstack_offset;
86312+ else
86313+ gap_start = gap_end;
86314+
86315+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
86316+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86317+ gap_start += sysctl_heap_stack_gap;
86318+ else
86319+ gap_start = gap_end;
86320+ }
86321+ if (vma->vm_flags & VM_GROWSDOWN) {
86322+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86323+ gap_end -= sysctl_heap_stack_gap;
86324+ else
86325+ gap_end = gap_start;
86326+ }
86327 if (gap_end >= low_limit && gap_end - gap_start >= length)
86328 goto found;
86329
86330@@ -1735,7 +1976,7 @@ found:
86331 return gap_start;
86332 }
86333
86334-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
86335+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
86336 {
86337 struct mm_struct *mm = current->mm;
86338 struct vm_area_struct *vma;
86339@@ -1789,6 +2030,24 @@ check_current:
86340 gap_end = vma->vm_start;
86341 if (gap_end < low_limit)
86342 return -ENOMEM;
86343+
86344+ if (gap_end - gap_start > info->threadstack_offset)
86345+ gap_end -= info->threadstack_offset;
86346+ else
86347+ gap_end = gap_start;
86348+
86349+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
86350+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86351+ gap_start += sysctl_heap_stack_gap;
86352+ else
86353+ gap_start = gap_end;
86354+ }
86355+ if (vma->vm_flags & VM_GROWSDOWN) {
86356+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86357+ gap_end -= sysctl_heap_stack_gap;
86358+ else
86359+ gap_end = gap_start;
86360+ }
86361 if (gap_start <= high_limit && gap_end - gap_start >= length)
86362 goto found;
86363
86364@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
86365 struct mm_struct *mm = current->mm;
86366 struct vm_area_struct *vma;
86367 struct vm_unmapped_area_info info;
86368+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
86369
86370 if (len > TASK_SIZE)
86371 return -ENOMEM;
86372@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
86373 if (flags & MAP_FIXED)
86374 return addr;
86375
86376+#ifdef CONFIG_PAX_RANDMMAP
86377+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
86378+#endif
86379+
86380 if (addr) {
86381 addr = PAGE_ALIGN(addr);
86382 vma = find_vma(mm, addr);
86383- if (TASK_SIZE - len >= addr &&
86384- (!vma || addr + len <= vma->vm_start))
86385+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
86386 return addr;
86387 }
86388
86389 info.flags = 0;
86390 info.length = len;
86391 info.low_limit = TASK_UNMAPPED_BASE;
86392+
86393+#ifdef CONFIG_PAX_RANDMMAP
86394+ if (mm->pax_flags & MF_PAX_RANDMMAP)
86395+ info.low_limit += mm->delta_mmap;
86396+#endif
86397+
86398 info.high_limit = TASK_SIZE;
86399 info.align_mask = 0;
86400+ info.threadstack_offset = offset;
86401 return vm_unmapped_area(&info);
86402 }
86403 #endif
86404
86405 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
86406 {
86407+
86408+#ifdef CONFIG_PAX_SEGMEXEC
86409+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
86410+ return;
86411+#endif
86412+
86413 /*
86414 * Is this a new hole at the lowest possible address?
86415 */
86416- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
86417+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
86418 mm->free_area_cache = addr;
86419 }
86420
86421@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86422 struct mm_struct *mm = current->mm;
86423 unsigned long addr = addr0;
86424 struct vm_unmapped_area_info info;
86425+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
86426
86427 /* requested length too big for entire address space */
86428 if (len > TASK_SIZE)
86429@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86430 if (flags & MAP_FIXED)
86431 return addr;
86432
86433+#ifdef CONFIG_PAX_RANDMMAP
86434+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
86435+#endif
86436+
86437 /* requesting a specific address */
86438 if (addr) {
86439 addr = PAGE_ALIGN(addr);
86440 vma = find_vma(mm, addr);
86441- if (TASK_SIZE - len >= addr &&
86442- (!vma || addr + len <= vma->vm_start))
86443+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
86444 return addr;
86445 }
86446
86447@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86448 info.low_limit = PAGE_SIZE;
86449 info.high_limit = mm->mmap_base;
86450 info.align_mask = 0;
86451+ info.threadstack_offset = offset;
86452 addr = vm_unmapped_area(&info);
86453
86454 /*
86455@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86456 VM_BUG_ON(addr != -ENOMEM);
86457 info.flags = 0;
86458 info.low_limit = TASK_UNMAPPED_BASE;
86459+
86460+#ifdef CONFIG_PAX_RANDMMAP
86461+ if (mm->pax_flags & MF_PAX_RANDMMAP)
86462+ info.low_limit += mm->delta_mmap;
86463+#endif
86464+
86465 info.high_limit = TASK_SIZE;
86466 addr = vm_unmapped_area(&info);
86467 }
86468@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86469
86470 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
86471 {
86472+
86473+#ifdef CONFIG_PAX_SEGMEXEC
86474+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
86475+ return;
86476+#endif
86477+
86478 /*
86479 * Is this a new hole at the highest possible address?
86480 */
86481@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
86482 mm->free_area_cache = addr;
86483
86484 /* dont allow allocations above current base */
86485- if (mm->free_area_cache > mm->mmap_base)
86486+ if (mm->free_area_cache > mm->mmap_base) {
86487 mm->free_area_cache = mm->mmap_base;
86488+ mm->cached_hole_size = ~0UL;
86489+ }
86490 }
86491
86492 unsigned long
86493@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
86494 return vma;
86495 }
86496
86497+#ifdef CONFIG_PAX_SEGMEXEC
86498+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
86499+{
86500+ struct vm_area_struct *vma_m;
86501+
86502+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
86503+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
86504+ BUG_ON(vma->vm_mirror);
86505+ return NULL;
86506+ }
86507+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
86508+ vma_m = vma->vm_mirror;
86509+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
86510+ BUG_ON(vma->vm_file != vma_m->vm_file);
86511+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
86512+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
86513+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
86514+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
86515+ return vma_m;
86516+}
86517+#endif
86518+
86519 /*
86520 * Verify that the stack growth is acceptable and
86521 * update accounting. This is shared with both the
86522@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
86523 return -ENOMEM;
86524
86525 /* Stack limit test */
86526+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
86527 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
86528 return -ENOMEM;
86529
86530@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
86531 locked = mm->locked_vm + grow;
86532 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
86533 limit >>= PAGE_SHIFT;
86534+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
86535 if (locked > limit && !capable(CAP_IPC_LOCK))
86536 return -ENOMEM;
86537 }
86538@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
86539 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
86540 * vma is the last one with address > vma->vm_end. Have to extend vma.
86541 */
86542+#ifndef CONFIG_IA64
86543+static
86544+#endif
86545 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
86546 {
86547 int error;
86548+ bool locknext;
86549
86550 if (!(vma->vm_flags & VM_GROWSUP))
86551 return -EFAULT;
86552
86553+ /* Also guard against wrapping around to address 0. */
86554+ if (address < PAGE_ALIGN(address+1))
86555+ address = PAGE_ALIGN(address+1);
86556+ else
86557+ return -ENOMEM;
86558+
86559 /*
86560 * We must make sure the anon_vma is allocated
86561 * so that the anon_vma locking is not a noop.
86562 */
86563 if (unlikely(anon_vma_prepare(vma)))
86564 return -ENOMEM;
86565+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
86566+ if (locknext && anon_vma_prepare(vma->vm_next))
86567+ return -ENOMEM;
86568 vma_lock_anon_vma(vma);
86569+ if (locknext)
86570+ vma_lock_anon_vma(vma->vm_next);
86571
86572 /*
86573 * vma->vm_start/vm_end cannot change under us because the caller
86574 * is required to hold the mmap_sem in read mode. We need the
86575- * anon_vma lock to serialize against concurrent expand_stacks.
86576- * Also guard against wrapping around to address 0.
86577+ * anon_vma locks to serialize against concurrent expand_stacks
86578+ * and expand_upwards.
86579 */
86580- if (address < PAGE_ALIGN(address+4))
86581- address = PAGE_ALIGN(address+4);
86582- else {
86583- vma_unlock_anon_vma(vma);
86584- return -ENOMEM;
86585- }
86586 error = 0;
86587
86588 /* Somebody else might have raced and expanded it already */
86589- if (address > vma->vm_end) {
86590+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
86591+ error = -ENOMEM;
86592+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
86593 unsigned long size, grow;
86594
86595 size = address - vma->vm_start;
86596@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
86597 }
86598 }
86599 }
86600+ if (locknext)
86601+ vma_unlock_anon_vma(vma->vm_next);
86602 vma_unlock_anon_vma(vma);
86603 khugepaged_enter_vma_merge(vma);
86604 validate_mm(vma->vm_mm);
86605@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
86606 unsigned long address)
86607 {
86608 int error;
86609+ bool lockprev = false;
86610+ struct vm_area_struct *prev;
86611
86612 /*
86613 * We must make sure the anon_vma is allocated
86614@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
86615 if (error)
86616 return error;
86617
86618+ prev = vma->vm_prev;
86619+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
86620+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
86621+#endif
86622+ if (lockprev && anon_vma_prepare(prev))
86623+ return -ENOMEM;
86624+ if (lockprev)
86625+ vma_lock_anon_vma(prev);
86626+
86627 vma_lock_anon_vma(vma);
86628
86629 /*
86630@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
86631 */
86632
86633 /* Somebody else might have raced and expanded it already */
86634- if (address < vma->vm_start) {
86635+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
86636+ error = -ENOMEM;
86637+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
86638 unsigned long size, grow;
86639
86640+#ifdef CONFIG_PAX_SEGMEXEC
86641+ struct vm_area_struct *vma_m;
86642+
86643+ vma_m = pax_find_mirror_vma(vma);
86644+#endif
86645+
86646 size = vma->vm_end - address;
86647 grow = (vma->vm_start - address) >> PAGE_SHIFT;
86648
86649@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
86650 vma->vm_pgoff -= grow;
86651 anon_vma_interval_tree_post_update_vma(vma);
86652 vma_gap_update(vma);
86653+
86654+#ifdef CONFIG_PAX_SEGMEXEC
86655+ if (vma_m) {
86656+ anon_vma_interval_tree_pre_update_vma(vma_m);
86657+ vma_m->vm_start -= grow << PAGE_SHIFT;
86658+ vma_m->vm_pgoff -= grow;
86659+ anon_vma_interval_tree_post_update_vma(vma_m);
86660+ vma_gap_update(vma_m);
86661+ }
86662+#endif
86663+
86664 spin_unlock(&vma->vm_mm->page_table_lock);
86665
86666+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
86667 perf_event_mmap(vma);
86668 }
86669 }
86670 }
86671 vma_unlock_anon_vma(vma);
86672+ if (lockprev)
86673+ vma_unlock_anon_vma(prev);
86674 khugepaged_enter_vma_merge(vma);
86675 validate_mm(vma->vm_mm);
86676 return error;
86677@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
86678 do {
86679 long nrpages = vma_pages(vma);
86680
86681+#ifdef CONFIG_PAX_SEGMEXEC
86682+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
86683+ vma = remove_vma(vma);
86684+ continue;
86685+ }
86686+#endif
86687+
86688 if (vma->vm_flags & VM_ACCOUNT)
86689 nr_accounted += nrpages;
86690 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
86691@@ -2356,7 +2728,7 @@ static void unmap_region(struct mm_struct *mm,
86692 struct mmu_gather tlb;
86693
86694 lru_add_drain();
86695- tlb_gather_mmu(&tlb, mm, 0);
86696+ tlb_gather_mmu(&tlb, mm, start, end);
86697 update_hiwater_rss(mm);
86698 unmap_vmas(&tlb, vma, start, end);
86699 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
86700@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
86701 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
86702 vma->vm_prev = NULL;
86703 do {
86704+
86705+#ifdef CONFIG_PAX_SEGMEXEC
86706+ if (vma->vm_mirror) {
86707+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
86708+ vma->vm_mirror->vm_mirror = NULL;
86709+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
86710+ vma->vm_mirror = NULL;
86711+ }
86712+#endif
86713+
86714 vma_rb_erase(vma, &mm->mm_rb);
86715 mm->map_count--;
86716 tail_vma = vma;
86717@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86718 struct vm_area_struct *new;
86719 int err = -ENOMEM;
86720
86721+#ifdef CONFIG_PAX_SEGMEXEC
86722+ struct vm_area_struct *vma_m, *new_m = NULL;
86723+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
86724+#endif
86725+
86726 if (is_vm_hugetlb_page(vma) && (addr &
86727 ~(huge_page_mask(hstate_vma(vma)))))
86728 return -EINVAL;
86729
86730+#ifdef CONFIG_PAX_SEGMEXEC
86731+ vma_m = pax_find_mirror_vma(vma);
86732+#endif
86733+
86734 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
86735 if (!new)
86736 goto out_err;
86737
86738+#ifdef CONFIG_PAX_SEGMEXEC
86739+ if (vma_m) {
86740+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
86741+ if (!new_m) {
86742+ kmem_cache_free(vm_area_cachep, new);
86743+ goto out_err;
86744+ }
86745+ }
86746+#endif
86747+
86748 /* most fields are the same, copy all, and then fixup */
86749 *new = *vma;
86750
86751@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86752 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
86753 }
86754
86755+#ifdef CONFIG_PAX_SEGMEXEC
86756+ if (vma_m) {
86757+ *new_m = *vma_m;
86758+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
86759+ new_m->vm_mirror = new;
86760+ new->vm_mirror = new_m;
86761+
86762+ if (new_below)
86763+ new_m->vm_end = addr_m;
86764+ else {
86765+ new_m->vm_start = addr_m;
86766+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
86767+ }
86768+ }
86769+#endif
86770+
86771 pol = mpol_dup(vma_policy(vma));
86772 if (IS_ERR(pol)) {
86773 err = PTR_ERR(pol);
86774@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86775 else
86776 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
86777
86778+#ifdef CONFIG_PAX_SEGMEXEC
86779+ if (!err && vma_m) {
86780+ if (anon_vma_clone(new_m, vma_m))
86781+ goto out_free_mpol;
86782+
86783+ mpol_get(pol);
86784+ vma_set_policy(new_m, pol);
86785+
86786+ if (new_m->vm_file)
86787+ get_file(new_m->vm_file);
86788+
86789+ if (new_m->vm_ops && new_m->vm_ops->open)
86790+ new_m->vm_ops->open(new_m);
86791+
86792+ if (new_below)
86793+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
86794+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
86795+ else
86796+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
86797+
86798+ if (err) {
86799+ if (new_m->vm_ops && new_m->vm_ops->close)
86800+ new_m->vm_ops->close(new_m);
86801+ if (new_m->vm_file)
86802+ fput(new_m->vm_file);
86803+ mpol_put(pol);
86804+ }
86805+ }
86806+#endif
86807+
86808 /* Success. */
86809 if (!err)
86810 return 0;
86811@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86812 new->vm_ops->close(new);
86813 if (new->vm_file)
86814 fput(new->vm_file);
86815- unlink_anon_vmas(new);
86816 out_free_mpol:
86817 mpol_put(pol);
86818 out_free_vma:
86819+
86820+#ifdef CONFIG_PAX_SEGMEXEC
86821+ if (new_m) {
86822+ unlink_anon_vmas(new_m);
86823+ kmem_cache_free(vm_area_cachep, new_m);
86824+ }
86825+#endif
86826+
86827+ unlink_anon_vmas(new);
86828 kmem_cache_free(vm_area_cachep, new);
86829 out_err:
86830 return err;
86831@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86832 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
86833 unsigned long addr, int new_below)
86834 {
86835+
86836+#ifdef CONFIG_PAX_SEGMEXEC
86837+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
86838+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
86839+ if (mm->map_count >= sysctl_max_map_count-1)
86840+ return -ENOMEM;
86841+ } else
86842+#endif
86843+
86844 if (mm->map_count >= sysctl_max_map_count)
86845 return -ENOMEM;
86846
86847@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
86848 * work. This now handles partial unmappings.
86849 * Jeremy Fitzhardinge <jeremy@goop.org>
86850 */
86851+#ifdef CONFIG_PAX_SEGMEXEC
86852 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86853 {
86854+ int ret = __do_munmap(mm, start, len);
86855+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
86856+ return ret;
86857+
86858+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
86859+}
86860+
86861+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86862+#else
86863+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86864+#endif
86865+{
86866 unsigned long end;
86867 struct vm_area_struct *vma, *prev, *last;
86868
86869+ /*
86870+ * mm->mmap_sem is required to protect against another thread
86871+ * changing the mappings in case we sleep.
86872+ */
86873+ verify_mm_writelocked(mm);
86874+
86875 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
86876 return -EINVAL;
86877
86878@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86879 /* Fix up all other VM information */
86880 remove_vma_list(mm, vma);
86881
86882+ track_exec_limit(mm, start, end, 0UL);
86883+
86884 return 0;
86885 }
86886
86887@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
86888 int ret;
86889 struct mm_struct *mm = current->mm;
86890
86891+
86892+#ifdef CONFIG_PAX_SEGMEXEC
86893+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
86894+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
86895+ return -EINVAL;
86896+#endif
86897+
86898 down_write(&mm->mmap_sem);
86899 ret = do_munmap(mm, start, len);
86900 up_write(&mm->mmap_sem);
86901@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
86902 return vm_munmap(addr, len);
86903 }
86904
86905-static inline void verify_mm_writelocked(struct mm_struct *mm)
86906-{
86907-#ifdef CONFIG_DEBUG_VM
86908- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
86909- WARN_ON(1);
86910- up_read(&mm->mmap_sem);
86911- }
86912-#endif
86913-}
86914-
86915 /*
86916 * this is really a simplified "do_mmap". it only handles
86917 * anonymous maps. eventually we may be able to do some
86918@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86919 struct rb_node ** rb_link, * rb_parent;
86920 pgoff_t pgoff = addr >> PAGE_SHIFT;
86921 int error;
86922+ unsigned long charged;
86923
86924 len = PAGE_ALIGN(len);
86925 if (!len)
86926@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86927
86928 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
86929
86930+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
86931+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
86932+ flags &= ~VM_EXEC;
86933+
86934+#ifdef CONFIG_PAX_MPROTECT
86935+ if (mm->pax_flags & MF_PAX_MPROTECT)
86936+ flags &= ~VM_MAYEXEC;
86937+#endif
86938+
86939+ }
86940+#endif
86941+
86942 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
86943 if (error & ~PAGE_MASK)
86944 return error;
86945
86946+ charged = len >> PAGE_SHIFT;
86947+
86948 /*
86949 * mlock MCL_FUTURE?
86950 */
86951 if (mm->def_flags & VM_LOCKED) {
86952 unsigned long locked, lock_limit;
86953- locked = len >> PAGE_SHIFT;
86954+ locked = charged;
86955 locked += mm->locked_vm;
86956 lock_limit = rlimit(RLIMIT_MEMLOCK);
86957 lock_limit >>= PAGE_SHIFT;
86958@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86959 /*
86960 * Clear old maps. this also does some error checking for us
86961 */
86962- munmap_back:
86963 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
86964 if (do_munmap(mm, addr, len))
86965 return -ENOMEM;
86966- goto munmap_back;
86967+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
86968 }
86969
86970 /* Check against address space limits *after* clearing old maps... */
86971- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
86972+ if (!may_expand_vm(mm, charged))
86973 return -ENOMEM;
86974
86975 if (mm->map_count > sysctl_max_map_count)
86976 return -ENOMEM;
86977
86978- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
86979+ if (security_vm_enough_memory_mm(mm, charged))
86980 return -ENOMEM;
86981
86982 /* Can we just expand an old private anonymous mapping? */
86983@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86984 */
86985 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
86986 if (!vma) {
86987- vm_unacct_memory(len >> PAGE_SHIFT);
86988+ vm_unacct_memory(charged);
86989 return -ENOMEM;
86990 }
86991
86992@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86993 vma_link(mm, vma, prev, rb_link, rb_parent);
86994 out:
86995 perf_event_mmap(vma);
86996- mm->total_vm += len >> PAGE_SHIFT;
86997+ mm->total_vm += charged;
86998 if (flags & VM_LOCKED)
86999- mm->locked_vm += (len >> PAGE_SHIFT);
87000+ mm->locked_vm += charged;
87001+ track_exec_limit(mm, addr, addr + len, flags);
87002 return addr;
87003 }
87004
87005@@ -2735,7 +3232,7 @@ void exit_mmap(struct mm_struct *mm)
87006
87007 lru_add_drain();
87008 flush_cache_mm(mm);
87009- tlb_gather_mmu(&tlb, mm, 1);
87010+ tlb_gather_mmu(&tlb, mm, 0, -1);
87011 /* update_hiwater_rss(mm) here? but nobody should be looking */
87012 /* Use -1 here to ensure all VMAs in the mm are unmapped */
87013 unmap_vmas(&tlb, vma, 0, -1);
87014@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
87015 while (vma) {
87016 if (vma->vm_flags & VM_ACCOUNT)
87017 nr_accounted += vma_pages(vma);
87018+ vma->vm_mirror = NULL;
87019 vma = remove_vma(vma);
87020 }
87021 vm_unacct_memory(nr_accounted);
87022@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
87023 struct vm_area_struct *prev;
87024 struct rb_node **rb_link, *rb_parent;
87025
87026+#ifdef CONFIG_PAX_SEGMEXEC
87027+ struct vm_area_struct *vma_m = NULL;
87028+#endif
87029+
87030+ if (security_mmap_addr(vma->vm_start))
87031+ return -EPERM;
87032+
87033 /*
87034 * The vm_pgoff of a purely anonymous vma should be irrelevant
87035 * until its first write fault, when page's anon_vma and index
87036@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
87037 security_vm_enough_memory_mm(mm, vma_pages(vma)))
87038 return -ENOMEM;
87039
87040+#ifdef CONFIG_PAX_SEGMEXEC
87041+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
87042+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
87043+ if (!vma_m)
87044+ return -ENOMEM;
87045+ }
87046+#endif
87047+
87048 vma_link(mm, vma, prev, rb_link, rb_parent);
87049+
87050+#ifdef CONFIG_PAX_SEGMEXEC
87051+ if (vma_m)
87052+ BUG_ON(pax_mirror_vma(vma_m, vma));
87053+#endif
87054+
87055 return 0;
87056 }
87057
87058@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
87059 struct mempolicy *pol;
87060 bool faulted_in_anon_vma = true;
87061
87062+ BUG_ON(vma->vm_mirror);
87063+
87064 /*
87065 * If anonymous vma has not yet been faulted, update new pgoff
87066 * to match new location, to increase its chance of merging.
87067@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
87068 return NULL;
87069 }
87070
87071+#ifdef CONFIG_PAX_SEGMEXEC
87072+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
87073+{
87074+ struct vm_area_struct *prev_m;
87075+ struct rb_node **rb_link_m, *rb_parent_m;
87076+ struct mempolicy *pol_m;
87077+
87078+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
87079+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
87080+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
87081+ *vma_m = *vma;
87082+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
87083+ if (anon_vma_clone(vma_m, vma))
87084+ return -ENOMEM;
87085+ pol_m = vma_policy(vma_m);
87086+ mpol_get(pol_m);
87087+ vma_set_policy(vma_m, pol_m);
87088+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
87089+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
87090+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
87091+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
87092+ if (vma_m->vm_file)
87093+ get_file(vma_m->vm_file);
87094+ if (vma_m->vm_ops && vma_m->vm_ops->open)
87095+ vma_m->vm_ops->open(vma_m);
87096+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
87097+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
87098+ vma_m->vm_mirror = vma;
87099+ vma->vm_mirror = vma_m;
87100+ return 0;
87101+}
87102+#endif
87103+
87104 /*
87105 * Return true if the calling process may expand its vm space by the passed
87106 * number of pages
87107@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
87108
87109 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
87110
87111+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
87112 if (cur + npages > lim)
87113 return 0;
87114 return 1;
87115@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
87116 vma->vm_start = addr;
87117 vma->vm_end = addr + len;
87118
87119+#ifdef CONFIG_PAX_MPROTECT
87120+ if (mm->pax_flags & MF_PAX_MPROTECT) {
87121+#ifndef CONFIG_PAX_MPROTECT_COMPAT
87122+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
87123+ return -EPERM;
87124+ if (!(vm_flags & VM_EXEC))
87125+ vm_flags &= ~VM_MAYEXEC;
87126+#else
87127+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
87128+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
87129+#endif
87130+ else
87131+ vm_flags &= ~VM_MAYWRITE;
87132+ }
87133+#endif
87134+
87135 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
87136 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
87137
87138diff --git a/mm/mprotect.c b/mm/mprotect.c
87139index 94722a4..e661e29 100644
87140--- a/mm/mprotect.c
87141+++ b/mm/mprotect.c
87142@@ -23,10 +23,18 @@
87143 #include <linux/mmu_notifier.h>
87144 #include <linux/migrate.h>
87145 #include <linux/perf_event.h>
87146+#include <linux/sched/sysctl.h>
87147+
87148+#ifdef CONFIG_PAX_MPROTECT
87149+#include <linux/elf.h>
87150+#include <linux/binfmts.h>
87151+#endif
87152+
87153 #include <asm/uaccess.h>
87154 #include <asm/pgtable.h>
87155 #include <asm/cacheflush.h>
87156 #include <asm/tlbflush.h>
87157+#include <asm/mmu_context.h>
87158
87159 #ifndef pgprot_modify
87160 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
87161@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
87162 return pages;
87163 }
87164
87165+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87166+/* called while holding the mmap semaphor for writing except stack expansion */
87167+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
87168+{
87169+ unsigned long oldlimit, newlimit = 0UL;
87170+
87171+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
87172+ return;
87173+
87174+ spin_lock(&mm->page_table_lock);
87175+ oldlimit = mm->context.user_cs_limit;
87176+ if ((prot & VM_EXEC) && oldlimit < end)
87177+ /* USER_CS limit moved up */
87178+ newlimit = end;
87179+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
87180+ /* USER_CS limit moved down */
87181+ newlimit = start;
87182+
87183+ if (newlimit) {
87184+ mm->context.user_cs_limit = newlimit;
87185+
87186+#ifdef CONFIG_SMP
87187+ wmb();
87188+ cpus_clear(mm->context.cpu_user_cs_mask);
87189+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
87190+#endif
87191+
87192+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
87193+ }
87194+ spin_unlock(&mm->page_table_lock);
87195+ if (newlimit == end) {
87196+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
87197+
87198+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
87199+ if (is_vm_hugetlb_page(vma))
87200+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
87201+ else
87202+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
87203+ }
87204+}
87205+#endif
87206+
87207 int
87208 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
87209 unsigned long start, unsigned long end, unsigned long newflags)
87210@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
87211 int error;
87212 int dirty_accountable = 0;
87213
87214+#ifdef CONFIG_PAX_SEGMEXEC
87215+ struct vm_area_struct *vma_m = NULL;
87216+ unsigned long start_m, end_m;
87217+
87218+ start_m = start + SEGMEXEC_TASK_SIZE;
87219+ end_m = end + SEGMEXEC_TASK_SIZE;
87220+#endif
87221+
87222 if (newflags == oldflags) {
87223 *pprev = vma;
87224 return 0;
87225 }
87226
87227+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
87228+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
87229+
87230+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
87231+ return -ENOMEM;
87232+
87233+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
87234+ return -ENOMEM;
87235+ }
87236+
87237 /*
87238 * If we make a private mapping writable we increase our commit;
87239 * but (without finer accounting) cannot reduce our commit if we
87240@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
87241 }
87242 }
87243
87244+#ifdef CONFIG_PAX_SEGMEXEC
87245+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
87246+ if (start != vma->vm_start) {
87247+ error = split_vma(mm, vma, start, 1);
87248+ if (error)
87249+ goto fail;
87250+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
87251+ *pprev = (*pprev)->vm_next;
87252+ }
87253+
87254+ if (end != vma->vm_end) {
87255+ error = split_vma(mm, vma, end, 0);
87256+ if (error)
87257+ goto fail;
87258+ }
87259+
87260+ if (pax_find_mirror_vma(vma)) {
87261+ error = __do_munmap(mm, start_m, end_m - start_m);
87262+ if (error)
87263+ goto fail;
87264+ } else {
87265+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
87266+ if (!vma_m) {
87267+ error = -ENOMEM;
87268+ goto fail;
87269+ }
87270+ vma->vm_flags = newflags;
87271+ error = pax_mirror_vma(vma_m, vma);
87272+ if (error) {
87273+ vma->vm_flags = oldflags;
87274+ goto fail;
87275+ }
87276+ }
87277+ }
87278+#endif
87279+
87280 /*
87281 * First try to merge with previous and/or next vma.
87282 */
87283@@ -296,9 +400,21 @@ success:
87284 * vm_flags and vm_page_prot are protected by the mmap_sem
87285 * held in write mode.
87286 */
87287+
87288+#ifdef CONFIG_PAX_SEGMEXEC
87289+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
87290+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
87291+#endif
87292+
87293 vma->vm_flags = newflags;
87294+
87295+#ifdef CONFIG_PAX_MPROTECT
87296+ if (mm->binfmt && mm->binfmt->handle_mprotect)
87297+ mm->binfmt->handle_mprotect(vma, newflags);
87298+#endif
87299+
87300 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
87301- vm_get_page_prot(newflags));
87302+ vm_get_page_prot(vma->vm_flags));
87303
87304 if (vma_wants_writenotify(vma)) {
87305 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
87306@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87307 end = start + len;
87308 if (end <= start)
87309 return -ENOMEM;
87310+
87311+#ifdef CONFIG_PAX_SEGMEXEC
87312+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
87313+ if (end > SEGMEXEC_TASK_SIZE)
87314+ return -EINVAL;
87315+ } else
87316+#endif
87317+
87318+ if (end > TASK_SIZE)
87319+ return -EINVAL;
87320+
87321 if (!arch_validate_prot(prot))
87322 return -EINVAL;
87323
87324@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87325 /*
87326 * Does the application expect PROT_READ to imply PROT_EXEC:
87327 */
87328- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
87329+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
87330 prot |= PROT_EXEC;
87331
87332 vm_flags = calc_vm_prot_bits(prot);
87333@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87334 if (start > vma->vm_start)
87335 prev = vma;
87336
87337+#ifdef CONFIG_PAX_MPROTECT
87338+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
87339+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
87340+#endif
87341+
87342 for (nstart = start ; ; ) {
87343 unsigned long newflags;
87344
87345@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87346
87347 /* newflags >> 4 shift VM_MAY% in place of VM_% */
87348 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
87349+ if (prot & (PROT_WRITE | PROT_EXEC))
87350+ gr_log_rwxmprotect(vma);
87351+
87352+ error = -EACCES;
87353+ goto out;
87354+ }
87355+
87356+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
87357 error = -EACCES;
87358 goto out;
87359 }
87360@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87361 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
87362 if (error)
87363 goto out;
87364+
87365+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
87366+
87367 nstart = tmp;
87368
87369 if (nstart < prev->vm_end)
87370diff --git a/mm/mremap.c b/mm/mremap.c
87371index 463a257..c0c7a92 100644
87372--- a/mm/mremap.c
87373+++ b/mm/mremap.c
87374@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
87375 continue;
87376 pte = ptep_get_and_clear(mm, old_addr, old_pte);
87377 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
87378+
87379+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87380+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
87381+ pte = pte_exprotect(pte);
87382+#endif
87383+
87384 set_pte_at(mm, new_addr, new_pte, pte);
87385 }
87386
87387@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
87388 if (is_vm_hugetlb_page(vma))
87389 goto Einval;
87390
87391+#ifdef CONFIG_PAX_SEGMEXEC
87392+ if (pax_find_mirror_vma(vma))
87393+ goto Einval;
87394+#endif
87395+
87396 /* We can't remap across vm area boundaries */
87397 if (old_len > vma->vm_end - addr)
87398 goto Efault;
87399@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
87400 unsigned long ret = -EINVAL;
87401 unsigned long charged = 0;
87402 unsigned long map_flags;
87403+ unsigned long pax_task_size = TASK_SIZE;
87404
87405 if (new_addr & ~PAGE_MASK)
87406 goto out;
87407
87408- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
87409+#ifdef CONFIG_PAX_SEGMEXEC
87410+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
87411+ pax_task_size = SEGMEXEC_TASK_SIZE;
87412+#endif
87413+
87414+ pax_task_size -= PAGE_SIZE;
87415+
87416+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
87417 goto out;
87418
87419 /* Check if the location we're moving into overlaps the
87420 * old location at all, and fail if it does.
87421 */
87422- if ((new_addr <= addr) && (new_addr+new_len) > addr)
87423- goto out;
87424-
87425- if ((addr <= new_addr) && (addr+old_len) > new_addr)
87426+ if (addr + old_len > new_addr && new_addr + new_len > addr)
87427 goto out;
87428
87429 ret = do_munmap(mm, new_addr, new_len);
87430@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87431 unsigned long ret = -EINVAL;
87432 unsigned long charged = 0;
87433 bool locked = false;
87434+ unsigned long pax_task_size = TASK_SIZE;
87435
87436 down_write(&current->mm->mmap_sem);
87437
87438@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87439 if (!new_len)
87440 goto out;
87441
87442+#ifdef CONFIG_PAX_SEGMEXEC
87443+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
87444+ pax_task_size = SEGMEXEC_TASK_SIZE;
87445+#endif
87446+
87447+ pax_task_size -= PAGE_SIZE;
87448+
87449+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
87450+ old_len > pax_task_size || addr > pax_task_size-old_len)
87451+ goto out;
87452+
87453 if (flags & MREMAP_FIXED) {
87454 if (flags & MREMAP_MAYMOVE)
87455 ret = mremap_to(addr, old_len, new_addr, new_len,
87456@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87457 new_addr = addr;
87458 }
87459 ret = addr;
87460+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
87461 goto out;
87462 }
87463 }
87464@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87465 goto out;
87466 }
87467
87468+ map_flags = vma->vm_flags;
87469 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
87470+ if (!(ret & ~PAGE_MASK)) {
87471+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
87472+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
87473+ }
87474 }
87475 out:
87476 if (ret & ~PAGE_MASK)
87477diff --git a/mm/nommu.c b/mm/nommu.c
87478index 298884d..5f74980 100644
87479--- a/mm/nommu.c
87480+++ b/mm/nommu.c
87481@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
87482 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
87483 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
87484 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
87485-int heap_stack_gap = 0;
87486
87487 atomic_long_t mmap_pages_allocated;
87488
87489@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
87490 EXPORT_SYMBOL(find_vma);
87491
87492 /*
87493- * find a VMA
87494- * - we don't extend stack VMAs under NOMMU conditions
87495- */
87496-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
87497-{
87498- return find_vma(mm, addr);
87499-}
87500-
87501-/*
87502 * expand a stack to a given address
87503 * - not supported under NOMMU conditions
87504 */
87505@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
87506
87507 /* most fields are the same, copy all, and then fixup */
87508 *new = *vma;
87509+ INIT_LIST_HEAD(&new->anon_vma_chain);
87510 *region = *vma->vm_region;
87511 new->vm_region = region;
87512
87513@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
87514 }
87515 EXPORT_SYMBOL(generic_file_remap_pages);
87516
87517-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87518- unsigned long addr, void *buf, int len, int write)
87519+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87520+ unsigned long addr, void *buf, size_t len, int write)
87521 {
87522 struct vm_area_struct *vma;
87523
87524@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87525 *
87526 * The caller must hold a reference on @mm.
87527 */
87528-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87529- void *buf, int len, int write)
87530+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
87531+ void *buf, size_t len, int write)
87532 {
87533 return __access_remote_vm(NULL, mm, addr, buf, len, write);
87534 }
87535@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87536 * Access another process' address space.
87537 * - source/target buffer must be kernel space
87538 */
87539-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
87540+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
87541 {
87542 struct mm_struct *mm;
87543
87544diff --git a/mm/page-writeback.c b/mm/page-writeback.c
87545index 4514ad7..92eaa1c 100644
87546--- a/mm/page-writeback.c
87547+++ b/mm/page-writeback.c
87548@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
87549 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
87550 * - the bdi dirty thresh drops quickly due to change of JBOD workload
87551 */
87552-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
87553+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
87554 unsigned long thresh,
87555 unsigned long bg_thresh,
87556 unsigned long dirty,
87557@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
87558 }
87559 }
87560
87561-static struct notifier_block __cpuinitdata ratelimit_nb = {
87562+static struct notifier_block ratelimit_nb = {
87563 .notifier_call = ratelimit_handler,
87564 .next = NULL,
87565 };
87566diff --git a/mm/page_alloc.c b/mm/page_alloc.c
87567index 2ee0fd3..6e2edfb 100644
87568--- a/mm/page_alloc.c
87569+++ b/mm/page_alloc.c
87570@@ -60,6 +60,7 @@
87571 #include <linux/page-debug-flags.h>
87572 #include <linux/hugetlb.h>
87573 #include <linux/sched/rt.h>
87574+#include <linux/random.h>
87575
87576 #include <asm/tlbflush.h>
87577 #include <asm/div64.h>
87578@@ -345,7 +346,7 @@ out:
87579 * This usage means that zero-order pages may not be compound.
87580 */
87581
87582-static void free_compound_page(struct page *page)
87583+void free_compound_page(struct page *page)
87584 {
87585 __free_pages_ok(page, compound_order(page));
87586 }
87587@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
87588 int i;
87589 int bad = 0;
87590
87591+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87592+ unsigned long index = 1UL << order;
87593+#endif
87594+
87595 trace_mm_page_free(page, order);
87596 kmemcheck_free_shadow(page, order);
87597
87598@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
87599 debug_check_no_obj_freed(page_address(page),
87600 PAGE_SIZE << order);
87601 }
87602+
87603+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87604+ for (; index; --index)
87605+ sanitize_highpage(page + index - 1);
87606+#endif
87607+
87608 arch_free_page(page, order);
87609 kernel_map_pages(page, 1 << order, 0);
87610
87611@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
87612 local_irq_restore(flags);
87613 }
87614
87615+#ifdef CONFIG_PAX_LATENT_ENTROPY
87616+bool __meminitdata extra_latent_entropy;
87617+
87618+static int __init setup_pax_extra_latent_entropy(char *str)
87619+{
87620+ extra_latent_entropy = true;
87621+ return 0;
87622+}
87623+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
87624+
87625+volatile u64 latent_entropy;
87626+#endif
87627+
87628 /*
87629 * Read access to zone->managed_pages is safe because it's unsigned long,
87630 * but we still need to serialize writers. Currently all callers of
87631@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
87632 set_page_count(p, 0);
87633 }
87634
87635+#ifdef CONFIG_PAX_LATENT_ENTROPY
87636+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
87637+ u64 hash = 0;
87638+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
87639+ const u64 *data = lowmem_page_address(page);
87640+
87641+ for (index = 0; index < end; index++)
87642+ hash ^= hash + data[index];
87643+ latent_entropy ^= hash;
87644+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
87645+ }
87646+#endif
87647+
87648 page_zone(page)->managed_pages += 1 << order;
87649 set_page_refcounted(page);
87650 __free_pages(page, order);
87651@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
87652 arch_alloc_page(page, order);
87653 kernel_map_pages(page, 1 << order, 1);
87654
87655+#ifndef CONFIG_PAX_MEMORY_SANITIZE
87656 if (gfp_flags & __GFP_ZERO)
87657 prep_zero_page(page, order, gfp_flags);
87658+#endif
87659
87660 if (order && (gfp_flags & __GFP_COMP))
87661 prep_compound_page(page, order);
87662diff --git a/mm/page_io.c b/mm/page_io.c
87663index a8a3ef4..7260a60 100644
87664--- a/mm/page_io.c
87665+++ b/mm/page_io.c
87666@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
87667 struct file *swap_file = sis->swap_file;
87668 struct address_space *mapping = swap_file->f_mapping;
87669 struct iovec iov = {
87670- .iov_base = kmap(page),
87671+ .iov_base = (void __force_user *)kmap(page),
87672 .iov_len = PAGE_SIZE,
87673 };
87674
87675diff --git a/mm/percpu.c b/mm/percpu.c
87676index 8c8e08f..73a5cda 100644
87677--- a/mm/percpu.c
87678+++ b/mm/percpu.c
87679@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
87680 static unsigned int pcpu_high_unit_cpu __read_mostly;
87681
87682 /* the address of the first chunk which starts with the kernel static area */
87683-void *pcpu_base_addr __read_mostly;
87684+void *pcpu_base_addr __read_only;
87685 EXPORT_SYMBOL_GPL(pcpu_base_addr);
87686
87687 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
87688diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
87689index fd26d04..0cea1b0 100644
87690--- a/mm/process_vm_access.c
87691+++ b/mm/process_vm_access.c
87692@@ -13,6 +13,7 @@
87693 #include <linux/uio.h>
87694 #include <linux/sched.h>
87695 #include <linux/highmem.h>
87696+#include <linux/security.h>
87697 #include <linux/ptrace.h>
87698 #include <linux/slab.h>
87699 #include <linux/syscalls.h>
87700@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
87701 size_t iov_l_curr_offset = 0;
87702 ssize_t iov_len;
87703
87704+ return -ENOSYS; // PaX: until properly audited
87705+
87706 /*
87707 * Work out how many pages of struct pages we're going to need
87708 * when eventually calling get_user_pages
87709 */
87710 for (i = 0; i < riovcnt; i++) {
87711 iov_len = rvec[i].iov_len;
87712- if (iov_len > 0) {
87713- nr_pages_iov = ((unsigned long)rvec[i].iov_base
87714- + iov_len)
87715- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
87716- / PAGE_SIZE + 1;
87717- nr_pages = max(nr_pages, nr_pages_iov);
87718- }
87719+ if (iov_len <= 0)
87720+ continue;
87721+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
87722+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
87723+ nr_pages = max(nr_pages, nr_pages_iov);
87724 }
87725
87726 if (nr_pages == 0)
87727@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
87728 goto free_proc_pages;
87729 }
87730
87731+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
87732+ rc = -EPERM;
87733+ goto put_task_struct;
87734+ }
87735+
87736 mm = mm_access(task, PTRACE_MODE_ATTACH);
87737 if (!mm || IS_ERR(mm)) {
87738 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
87739diff --git a/mm/rmap.c b/mm/rmap.c
87740index 6280da8..b5c090e 100644
87741--- a/mm/rmap.c
87742+++ b/mm/rmap.c
87743@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87744 struct anon_vma *anon_vma = vma->anon_vma;
87745 struct anon_vma_chain *avc;
87746
87747+#ifdef CONFIG_PAX_SEGMEXEC
87748+ struct anon_vma_chain *avc_m = NULL;
87749+#endif
87750+
87751 might_sleep();
87752 if (unlikely(!anon_vma)) {
87753 struct mm_struct *mm = vma->vm_mm;
87754@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87755 if (!avc)
87756 goto out_enomem;
87757
87758+#ifdef CONFIG_PAX_SEGMEXEC
87759+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
87760+ if (!avc_m)
87761+ goto out_enomem_free_avc;
87762+#endif
87763+
87764 anon_vma = find_mergeable_anon_vma(vma);
87765 allocated = NULL;
87766 if (!anon_vma) {
87767@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87768 /* page_table_lock to protect against threads */
87769 spin_lock(&mm->page_table_lock);
87770 if (likely(!vma->anon_vma)) {
87771+
87772+#ifdef CONFIG_PAX_SEGMEXEC
87773+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
87774+
87775+ if (vma_m) {
87776+ BUG_ON(vma_m->anon_vma);
87777+ vma_m->anon_vma = anon_vma;
87778+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
87779+ avc_m = NULL;
87780+ }
87781+#endif
87782+
87783 vma->anon_vma = anon_vma;
87784 anon_vma_chain_link(vma, avc, anon_vma);
87785 allocated = NULL;
87786@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87787
87788 if (unlikely(allocated))
87789 put_anon_vma(allocated);
87790+
87791+#ifdef CONFIG_PAX_SEGMEXEC
87792+ if (unlikely(avc_m))
87793+ anon_vma_chain_free(avc_m);
87794+#endif
87795+
87796 if (unlikely(avc))
87797 anon_vma_chain_free(avc);
87798 }
87799 return 0;
87800
87801 out_enomem_free_avc:
87802+
87803+#ifdef CONFIG_PAX_SEGMEXEC
87804+ if (avc_m)
87805+ anon_vma_chain_free(avc_m);
87806+#endif
87807+
87808 anon_vma_chain_free(avc);
87809 out_enomem:
87810 return -ENOMEM;
87811@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
87812 * Attach the anon_vmas from src to dst.
87813 * Returns 0 on success, -ENOMEM on failure.
87814 */
87815-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
87816+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
87817 {
87818 struct anon_vma_chain *avc, *pavc;
87819 struct anon_vma *root = NULL;
87820@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
87821 * the corresponding VMA in the parent process is attached to.
87822 * Returns 0 on success, non-zero on failure.
87823 */
87824-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
87825+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
87826 {
87827 struct anon_vma_chain *avc;
87828 struct anon_vma *anon_vma;
87829@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
87830 void __init anon_vma_init(void)
87831 {
87832 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
87833- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
87834- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
87835+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
87836+ anon_vma_ctor);
87837+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
87838+ SLAB_PANIC|SLAB_NO_SANITIZE);
87839 }
87840
87841 /*
87842diff --git a/mm/shmem.c b/mm/shmem.c
87843index 5e6a842..b41916e 100644
87844--- a/mm/shmem.c
87845+++ b/mm/shmem.c
87846@@ -33,7 +33,7 @@
87847 #include <linux/swap.h>
87848 #include <linux/aio.h>
87849
87850-static struct vfsmount *shm_mnt;
87851+struct vfsmount *shm_mnt;
87852
87853 #ifdef CONFIG_SHMEM
87854 /*
87855@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
87856 #define BOGO_DIRENT_SIZE 20
87857
87858 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
87859-#define SHORT_SYMLINK_LEN 128
87860+#define SHORT_SYMLINK_LEN 64
87861
87862 /*
87863 * shmem_fallocate and shmem_writepage communicate via inode->i_private
87864@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
87865 static int shmem_xattr_validate(const char *name)
87866 {
87867 struct { const char *prefix; size_t len; } arr[] = {
87868+
87869+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87870+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
87871+#endif
87872+
87873 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
87874 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
87875 };
87876@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
87877 if (err)
87878 return err;
87879
87880+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87881+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
87882+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
87883+ return -EOPNOTSUPP;
87884+ if (size > 8)
87885+ return -EINVAL;
87886+ }
87887+#endif
87888+
87889 return simple_xattr_set(&info->xattrs, name, value, size, flags);
87890 }
87891
87892@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
87893 int err = -ENOMEM;
87894
87895 /* Round up to L1_CACHE_BYTES to resist false sharing */
87896- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
87897- L1_CACHE_BYTES), GFP_KERNEL);
87898+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
87899 if (!sbinfo)
87900 return -ENOMEM;
87901
87902diff --git a/mm/slab.c b/mm/slab.c
87903index bd88411..2d46fd6 100644
87904--- a/mm/slab.c
87905+++ b/mm/slab.c
87906@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
87907 if ((x)->max_freeable < i) \
87908 (x)->max_freeable = i; \
87909 } while (0)
87910-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
87911-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
87912-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
87913-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
87914+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
87915+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
87916+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
87917+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
87918+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
87919+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
87920 #else
87921 #define STATS_INC_ACTIVE(x) do { } while (0)
87922 #define STATS_DEC_ACTIVE(x) do { } while (0)
87923@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
87924 #define STATS_INC_ALLOCMISS(x) do { } while (0)
87925 #define STATS_INC_FREEHIT(x) do { } while (0)
87926 #define STATS_INC_FREEMISS(x) do { } while (0)
87927+#define STATS_INC_SANITIZED(x) do { } while (0)
87928+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
87929 #endif
87930
87931 #if DEBUG
87932@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
87933 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
87934 */
87935 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
87936- const struct slab *slab, void *obj)
87937+ const struct slab *slab, const void *obj)
87938 {
87939 u32 offset = (obj - slab->s_mem);
87940 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
87941@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
87942 return notifier_from_errno(err);
87943 }
87944
87945-static struct notifier_block __cpuinitdata cpucache_notifier = {
87946+static struct notifier_block cpucache_notifier = {
87947 &cpuup_callback, NULL, 0
87948 };
87949
87950@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
87951 */
87952
87953 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
87954- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
87955+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
87956
87957 if (INDEX_AC != INDEX_NODE)
87958 kmalloc_caches[INDEX_NODE] =
87959 create_kmalloc_cache("kmalloc-node",
87960- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
87961+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
87962
87963 slab_early_init = 0;
87964
87965@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
87966 struct array_cache *ac = cpu_cache_get(cachep);
87967
87968 check_irq_off();
87969+
87970+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87971+ if (pax_sanitize_slab) {
87972+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
87973+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
87974+
87975+ if (cachep->ctor)
87976+ cachep->ctor(objp);
87977+
87978+ STATS_INC_SANITIZED(cachep);
87979+ } else
87980+ STATS_INC_NOT_SANITIZED(cachep);
87981+ }
87982+#endif
87983+
87984 kmemleak_free_recursive(objp, cachep->flags);
87985 objp = cache_free_debugcheck(cachep, objp, caller);
87986
87987@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
87988
87989 if (unlikely(ZERO_OR_NULL_PTR(objp)))
87990 return;
87991+ VM_BUG_ON(!virt_addr_valid(objp));
87992 local_irq_save(flags);
87993 kfree_debugcheck(objp);
87994 c = virt_to_cache(objp);
87995@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
87996 }
87997 /* cpu stats */
87998 {
87999- unsigned long allochit = atomic_read(&cachep->allochit);
88000- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
88001- unsigned long freehit = atomic_read(&cachep->freehit);
88002- unsigned long freemiss = atomic_read(&cachep->freemiss);
88003+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
88004+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
88005+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
88006+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
88007
88008 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
88009 allochit, allocmiss, freehit, freemiss);
88010 }
88011+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88012+ {
88013+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
88014+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
88015+
88016+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
88017+ }
88018+#endif
88019 #endif
88020 }
88021
88022@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
88023 static int __init slab_proc_init(void)
88024 {
88025 #ifdef CONFIG_DEBUG_SLAB_LEAK
88026- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
88027+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
88028 #endif
88029 return 0;
88030 }
88031 module_init(slab_proc_init);
88032 #endif
88033
88034+bool is_usercopy_object(const void *ptr)
88035+{
88036+ struct page *page;
88037+ struct kmem_cache *cachep;
88038+
88039+ if (ZERO_OR_NULL_PTR(ptr))
88040+ return false;
88041+
88042+ if (!slab_is_available())
88043+ return false;
88044+
88045+ if (!virt_addr_valid(ptr))
88046+ return false;
88047+
88048+ page = virt_to_head_page(ptr);
88049+
88050+ if (!PageSlab(page))
88051+ return false;
88052+
88053+ cachep = page->slab_cache;
88054+ return cachep->flags & SLAB_USERCOPY;
88055+}
88056+
88057+#ifdef CONFIG_PAX_USERCOPY
88058+const char *check_heap_object(const void *ptr, unsigned long n)
88059+{
88060+ struct page *page;
88061+ struct kmem_cache *cachep;
88062+ struct slab *slabp;
88063+ unsigned int objnr;
88064+ unsigned long offset;
88065+
88066+ if (ZERO_OR_NULL_PTR(ptr))
88067+ return "<null>";
88068+
88069+ if (!virt_addr_valid(ptr))
88070+ return NULL;
88071+
88072+ page = virt_to_head_page(ptr);
88073+
88074+ if (!PageSlab(page))
88075+ return NULL;
88076+
88077+ cachep = page->slab_cache;
88078+ if (!(cachep->flags & SLAB_USERCOPY))
88079+ return cachep->name;
88080+
88081+ slabp = page->slab_page;
88082+ objnr = obj_to_index(cachep, slabp, ptr);
88083+ BUG_ON(objnr >= cachep->num);
88084+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
88085+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
88086+ return NULL;
88087+
88088+ return cachep->name;
88089+}
88090+#endif
88091+
88092 /**
88093 * ksize - get the actual amount of memory allocated for a given object
88094 * @objp: Pointer to the object
88095diff --git a/mm/slab.h b/mm/slab.h
88096index f96b49e..db1d204 100644
88097--- a/mm/slab.h
88098+++ b/mm/slab.h
88099@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
88100 /* The slab cache that manages slab cache information */
88101 extern struct kmem_cache *kmem_cache;
88102
88103+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88104+#ifdef CONFIG_X86_64
88105+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
88106+#else
88107+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
88108+#endif
88109+extern bool pax_sanitize_slab;
88110+#endif
88111+
88112 unsigned long calculate_alignment(unsigned long flags,
88113 unsigned long align, unsigned long size);
88114
88115@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
88116
88117 /* Legal flag mask for kmem_cache_create(), for various configurations */
88118 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
88119- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
88120+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
88121+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
88122
88123 #if defined(CONFIG_DEBUG_SLAB)
88124 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
88125@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
88126 return s;
88127
88128 page = virt_to_head_page(x);
88129+
88130+ BUG_ON(!PageSlab(page));
88131+
88132 cachep = page->slab_cache;
88133 if (slab_equal_or_root(cachep, s))
88134 return cachep;
88135diff --git a/mm/slab_common.c b/mm/slab_common.c
88136index 2d41450..4efe6ee 100644
88137--- a/mm/slab_common.c
88138+++ b/mm/slab_common.c
88139@@ -22,11 +22,22 @@
88140
88141 #include "slab.h"
88142
88143-enum slab_state slab_state;
88144+enum slab_state slab_state __read_only;
88145 LIST_HEAD(slab_caches);
88146 DEFINE_MUTEX(slab_mutex);
88147 struct kmem_cache *kmem_cache;
88148
88149+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88150+bool pax_sanitize_slab __read_only = true;
88151+static int __init pax_sanitize_slab_setup(char *str)
88152+{
88153+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
88154+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
88155+ return 1;
88156+}
88157+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
88158+#endif
88159+
88160 #ifdef CONFIG_DEBUG_VM
88161 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
88162 size_t size)
88163@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
88164
88165 err = __kmem_cache_create(s, flags);
88166 if (!err) {
88167- s->refcount = 1;
88168+ atomic_set(&s->refcount, 1);
88169 list_add(&s->list, &slab_caches);
88170 memcg_cache_list_add(memcg, s);
88171 } else {
88172@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
88173
88174 get_online_cpus();
88175 mutex_lock(&slab_mutex);
88176- s->refcount--;
88177- if (!s->refcount) {
88178+ if (atomic_dec_and_test(&s->refcount)) {
88179 list_del(&s->list);
88180
88181 if (!__kmem_cache_shutdown(s)) {
88182@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
88183 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
88184 name, size, err);
88185
88186- s->refcount = -1; /* Exempt from merging for now */
88187+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
88188 }
88189
88190 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
88191@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
88192
88193 create_boot_cache(s, name, size, flags);
88194 list_add(&s->list, &slab_caches);
88195- s->refcount = 1;
88196+ atomic_set(&s->refcount, 1);
88197 return s;
88198 }
88199
88200@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
88201 EXPORT_SYMBOL(kmalloc_dma_caches);
88202 #endif
88203
88204+#ifdef CONFIG_PAX_USERCOPY_SLABS
88205+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
88206+EXPORT_SYMBOL(kmalloc_usercopy_caches);
88207+#endif
88208+
88209 /*
88210 * Conversion table for small slabs sizes / 8 to the index in the
88211 * kmalloc array. This is necessary for slabs < 192 since we have non power
88212@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
88213 return kmalloc_dma_caches[index];
88214
88215 #endif
88216+
88217+#ifdef CONFIG_PAX_USERCOPY_SLABS
88218+ if (unlikely((flags & GFP_USERCOPY)))
88219+ return kmalloc_usercopy_caches[index];
88220+
88221+#endif
88222+
88223 return kmalloc_caches[index];
88224 }
88225
88226@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
88227 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
88228 if (!kmalloc_caches[i]) {
88229 kmalloc_caches[i] = create_kmalloc_cache(NULL,
88230- 1 << i, flags);
88231+ 1 << i, SLAB_USERCOPY | flags);
88232 }
88233
88234 /*
88235@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
88236 * earlier power of two caches
88237 */
88238 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
88239- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
88240+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
88241
88242 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
88243- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
88244+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
88245 }
88246
88247 /* Kmalloc array is now usable */
88248@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
88249 }
88250 }
88251 #endif
88252+
88253+#ifdef CONFIG_PAX_USERCOPY_SLABS
88254+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
88255+ struct kmem_cache *s = kmalloc_caches[i];
88256+
88257+ if (s) {
88258+ int size = kmalloc_size(i);
88259+ char *n = kasprintf(GFP_NOWAIT,
88260+ "usercopy-kmalloc-%d", size);
88261+
88262+ BUG_ON(!n);
88263+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
88264+ size, SLAB_USERCOPY | flags);
88265+ }
88266+ }
88267+#endif
88268+
88269 }
88270 #endif /* !CONFIG_SLOB */
88271
88272@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
88273 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
88274 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
88275 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
88276+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88277+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
88278+#endif
88279 #endif
88280 seq_putc(m, '\n');
88281 }
88282diff --git a/mm/slob.c b/mm/slob.c
88283index eeed4a0..bb0e9ab 100644
88284--- a/mm/slob.c
88285+++ b/mm/slob.c
88286@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
88287 /*
88288 * Return the size of a slob block.
88289 */
88290-static slobidx_t slob_units(slob_t *s)
88291+static slobidx_t slob_units(const slob_t *s)
88292 {
88293 if (s->units > 0)
88294 return s->units;
88295@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
88296 /*
88297 * Return the next free slob block pointer after this one.
88298 */
88299-static slob_t *slob_next(slob_t *s)
88300+static slob_t *slob_next(const slob_t *s)
88301 {
88302 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
88303 slobidx_t next;
88304@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
88305 /*
88306 * Returns true if s is the last free block in its page.
88307 */
88308-static int slob_last(slob_t *s)
88309+static int slob_last(const slob_t *s)
88310 {
88311 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
88312 }
88313
88314-static void *slob_new_pages(gfp_t gfp, int order, int node)
88315+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
88316 {
88317- void *page;
88318+ struct page *page;
88319
88320 #ifdef CONFIG_NUMA
88321 if (node != NUMA_NO_NODE)
88322@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
88323 if (!page)
88324 return NULL;
88325
88326- return page_address(page);
88327+ __SetPageSlab(page);
88328+ return page;
88329 }
88330
88331-static void slob_free_pages(void *b, int order)
88332+static void slob_free_pages(struct page *sp, int order)
88333 {
88334 if (current->reclaim_state)
88335 current->reclaim_state->reclaimed_slab += 1 << order;
88336- free_pages((unsigned long)b, order);
88337+ __ClearPageSlab(sp);
88338+ page_mapcount_reset(sp);
88339+ sp->private = 0;
88340+ __free_pages(sp, order);
88341 }
88342
88343 /*
88344@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
88345
88346 /* Not enough space: must allocate a new page */
88347 if (!b) {
88348- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
88349- if (!b)
88350+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
88351+ if (!sp)
88352 return NULL;
88353- sp = virt_to_page(b);
88354- __SetPageSlab(sp);
88355+ b = page_address(sp);
88356
88357 spin_lock_irqsave(&slob_lock, flags);
88358 sp->units = SLOB_UNITS(PAGE_SIZE);
88359 sp->freelist = b;
88360+ sp->private = 0;
88361 INIT_LIST_HEAD(&sp->list);
88362 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
88363 set_slob_page_free(sp, slob_list);
88364@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
88365 if (slob_page_free(sp))
88366 clear_slob_page_free(sp);
88367 spin_unlock_irqrestore(&slob_lock, flags);
88368- __ClearPageSlab(sp);
88369- page_mapcount_reset(sp);
88370- slob_free_pages(b, 0);
88371+ slob_free_pages(sp, 0);
88372 return;
88373 }
88374
88375+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88376+ if (pax_sanitize_slab)
88377+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
88378+#endif
88379+
88380 if (!slob_page_free(sp)) {
88381 /* This slob page is about to become partially free. Easy! */
88382 sp->units = units;
88383@@ -424,11 +431,10 @@ out:
88384 */
88385
88386 static __always_inline void *
88387-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
88388+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
88389 {
88390- unsigned int *m;
88391- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88392- void *ret;
88393+ slob_t *m;
88394+ void *ret = NULL;
88395
88396 gfp &= gfp_allowed_mask;
88397
88398@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
88399
88400 if (!m)
88401 return NULL;
88402- *m = size;
88403+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
88404+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
88405+ m[0].units = size;
88406+ m[1].units = align;
88407 ret = (void *)m + align;
88408
88409 trace_kmalloc_node(caller, ret,
88410 size, size + align, gfp, node);
88411 } else {
88412 unsigned int order = get_order(size);
88413+ struct page *page;
88414
88415 if (likely(order))
88416 gfp |= __GFP_COMP;
88417- ret = slob_new_pages(gfp, order, node);
88418+ page = slob_new_pages(gfp, order, node);
88419+ if (page) {
88420+ ret = page_address(page);
88421+ page->private = size;
88422+ }
88423
88424 trace_kmalloc_node(caller, ret,
88425 size, PAGE_SIZE << order, gfp, node);
88426 }
88427
88428- kmemleak_alloc(ret, size, 1, gfp);
88429+ return ret;
88430+}
88431+
88432+static __always_inline void *
88433+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
88434+{
88435+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88436+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
88437+
88438+ if (!ZERO_OR_NULL_PTR(ret))
88439+ kmemleak_alloc(ret, size, 1, gfp);
88440 return ret;
88441 }
88442
88443@@ -493,34 +517,112 @@ void kfree(const void *block)
88444 return;
88445 kmemleak_free(block);
88446
88447+ VM_BUG_ON(!virt_addr_valid(block));
88448 sp = virt_to_page(block);
88449- if (PageSlab(sp)) {
88450+ VM_BUG_ON(!PageSlab(sp));
88451+ if (!sp->private) {
88452 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88453- unsigned int *m = (unsigned int *)(block - align);
88454- slob_free(m, *m + align);
88455- } else
88456+ slob_t *m = (slob_t *)(block - align);
88457+ slob_free(m, m[0].units + align);
88458+ } else {
88459+ __ClearPageSlab(sp);
88460+ page_mapcount_reset(sp);
88461+ sp->private = 0;
88462 __free_pages(sp, compound_order(sp));
88463+ }
88464 }
88465 EXPORT_SYMBOL(kfree);
88466
88467+bool is_usercopy_object(const void *ptr)
88468+{
88469+ if (!slab_is_available())
88470+ return false;
88471+
88472+ // PAX: TODO
88473+
88474+ return false;
88475+}
88476+
88477+#ifdef CONFIG_PAX_USERCOPY
88478+const char *check_heap_object(const void *ptr, unsigned long n)
88479+{
88480+ struct page *page;
88481+ const slob_t *free;
88482+ const void *base;
88483+ unsigned long flags;
88484+
88485+ if (ZERO_OR_NULL_PTR(ptr))
88486+ return "<null>";
88487+
88488+ if (!virt_addr_valid(ptr))
88489+ return NULL;
88490+
88491+ page = virt_to_head_page(ptr);
88492+ if (!PageSlab(page))
88493+ return NULL;
88494+
88495+ if (page->private) {
88496+ base = page;
88497+ if (base <= ptr && n <= page->private - (ptr - base))
88498+ return NULL;
88499+ return "<slob>";
88500+ }
88501+
88502+ /* some tricky double walking to find the chunk */
88503+ spin_lock_irqsave(&slob_lock, flags);
88504+ base = (void *)((unsigned long)ptr & PAGE_MASK);
88505+ free = page->freelist;
88506+
88507+ while (!slob_last(free) && (void *)free <= ptr) {
88508+ base = free + slob_units(free);
88509+ free = slob_next(free);
88510+ }
88511+
88512+ while (base < (void *)free) {
88513+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
88514+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
88515+ int offset;
88516+
88517+ if (ptr < base + align)
88518+ break;
88519+
88520+ offset = ptr - base - align;
88521+ if (offset >= m) {
88522+ base += size;
88523+ continue;
88524+ }
88525+
88526+ if (n > m - offset)
88527+ break;
88528+
88529+ spin_unlock_irqrestore(&slob_lock, flags);
88530+ return NULL;
88531+ }
88532+
88533+ spin_unlock_irqrestore(&slob_lock, flags);
88534+ return "<slob>";
88535+}
88536+#endif
88537+
88538 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
88539 size_t ksize(const void *block)
88540 {
88541 struct page *sp;
88542 int align;
88543- unsigned int *m;
88544+ slob_t *m;
88545
88546 BUG_ON(!block);
88547 if (unlikely(block == ZERO_SIZE_PTR))
88548 return 0;
88549
88550 sp = virt_to_page(block);
88551- if (unlikely(!PageSlab(sp)))
88552- return PAGE_SIZE << compound_order(sp);
88553+ VM_BUG_ON(!PageSlab(sp));
88554+ if (sp->private)
88555+ return sp->private;
88556
88557 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88558- m = (unsigned int *)(block - align);
88559- return SLOB_UNITS(*m) * SLOB_UNIT;
88560+ m = (slob_t *)(block - align);
88561+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
88562 }
88563 EXPORT_SYMBOL(ksize);
88564
88565@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
88566
88567 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
88568 {
88569- void *b;
88570+ void *b = NULL;
88571
88572 flags &= gfp_allowed_mask;
88573
88574 lockdep_trace_alloc(flags);
88575
88576+#ifdef CONFIG_PAX_USERCOPY_SLABS
88577+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
88578+#else
88579 if (c->size < PAGE_SIZE) {
88580 b = slob_alloc(c->size, flags, c->align, node);
88581 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
88582 SLOB_UNITS(c->size) * SLOB_UNIT,
88583 flags, node);
88584 } else {
88585- b = slob_new_pages(flags, get_order(c->size), node);
88586+ struct page *sp;
88587+
88588+ sp = slob_new_pages(flags, get_order(c->size), node);
88589+ if (sp) {
88590+ b = page_address(sp);
88591+ sp->private = c->size;
88592+ }
88593 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
88594 PAGE_SIZE << get_order(c->size),
88595 flags, node);
88596 }
88597+#endif
88598
88599 if (c->ctor)
88600 c->ctor(b);
88601@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
88602
88603 static void __kmem_cache_free(void *b, int size)
88604 {
88605- if (size < PAGE_SIZE)
88606+ struct page *sp;
88607+
88608+ sp = virt_to_page(b);
88609+ BUG_ON(!PageSlab(sp));
88610+ if (!sp->private)
88611 slob_free(b, size);
88612 else
88613- slob_free_pages(b, get_order(size));
88614+ slob_free_pages(sp, get_order(size));
88615 }
88616
88617 static void kmem_rcu_free(struct rcu_head *head)
88618@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
88619
88620 void kmem_cache_free(struct kmem_cache *c, void *b)
88621 {
88622+ int size = c->size;
88623+
88624+#ifdef CONFIG_PAX_USERCOPY_SLABS
88625+ if (size + c->align < PAGE_SIZE) {
88626+ size += c->align;
88627+ b -= c->align;
88628+ }
88629+#endif
88630+
88631 kmemleak_free_recursive(b, c->flags);
88632 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
88633 struct slob_rcu *slob_rcu;
88634- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
88635- slob_rcu->size = c->size;
88636+ slob_rcu = b + (size - sizeof(struct slob_rcu));
88637+ slob_rcu->size = size;
88638 call_rcu(&slob_rcu->head, kmem_rcu_free);
88639 } else {
88640- __kmem_cache_free(b, c->size);
88641+ __kmem_cache_free(b, size);
88642 }
88643
88644+#ifdef CONFIG_PAX_USERCOPY_SLABS
88645+ trace_kfree(_RET_IP_, b);
88646+#else
88647 trace_kmem_cache_free(_RET_IP_, b);
88648+#endif
88649+
88650 }
88651 EXPORT_SYMBOL(kmem_cache_free);
88652
88653diff --git a/mm/slub.c b/mm/slub.c
88654index 57707f0..7857bd3 100644
88655--- a/mm/slub.c
88656+++ b/mm/slub.c
88657@@ -198,7 +198,7 @@ struct track {
88658
88659 enum track_item { TRACK_ALLOC, TRACK_FREE };
88660
88661-#ifdef CONFIG_SYSFS
88662+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88663 static int sysfs_slab_add(struct kmem_cache *);
88664 static int sysfs_slab_alias(struct kmem_cache *, const char *);
88665 static void sysfs_slab_remove(struct kmem_cache *);
88666@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
88667 if (!t->addr)
88668 return;
88669
88670- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
88671+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
88672 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
88673 #ifdef CONFIG_STACKTRACE
88674 {
88675@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
88676
88677 slab_free_hook(s, x);
88678
88679+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88680+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
88681+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
88682+ if (s->ctor)
88683+ s->ctor(x);
88684+ }
88685+#endif
88686+
88687 redo:
88688 /*
88689 * Determine the currently cpus per cpu slab.
88690@@ -2661,7 +2669,7 @@ static int slub_min_objects;
88691 * Merge control. If this is set then no merging of slab caches will occur.
88692 * (Could be removed. This was introduced to pacify the merge skeptics.)
88693 */
88694-static int slub_nomerge;
88695+static int slub_nomerge = 1;
88696
88697 /*
88698 * Calculate the order of allocation given an slab object size.
88699@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
88700 s->inuse = size;
88701
88702 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
88703+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88704+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
88705+#endif
88706 s->ctor)) {
88707 /*
88708 * Relocate free pointer after the object if it is not
88709@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
88710 EXPORT_SYMBOL(__kmalloc_node);
88711 #endif
88712
88713+bool is_usercopy_object(const void *ptr)
88714+{
88715+ struct page *page;
88716+ struct kmem_cache *s;
88717+
88718+ if (ZERO_OR_NULL_PTR(ptr))
88719+ return false;
88720+
88721+ if (!slab_is_available())
88722+ return false;
88723+
88724+ if (!virt_addr_valid(ptr))
88725+ return false;
88726+
88727+ page = virt_to_head_page(ptr);
88728+
88729+ if (!PageSlab(page))
88730+ return false;
88731+
88732+ s = page->slab_cache;
88733+ return s->flags & SLAB_USERCOPY;
88734+}
88735+
88736+#ifdef CONFIG_PAX_USERCOPY
88737+const char *check_heap_object(const void *ptr, unsigned long n)
88738+{
88739+ struct page *page;
88740+ struct kmem_cache *s;
88741+ unsigned long offset;
88742+
88743+ if (ZERO_OR_NULL_PTR(ptr))
88744+ return "<null>";
88745+
88746+ if (!virt_addr_valid(ptr))
88747+ return NULL;
88748+
88749+ page = virt_to_head_page(ptr);
88750+
88751+ if (!PageSlab(page))
88752+ return NULL;
88753+
88754+ s = page->slab_cache;
88755+ if (!(s->flags & SLAB_USERCOPY))
88756+ return s->name;
88757+
88758+ offset = (ptr - page_address(page)) % s->size;
88759+ if (offset <= s->object_size && n <= s->object_size - offset)
88760+ return NULL;
88761+
88762+ return s->name;
88763+}
88764+#endif
88765+
88766 size_t ksize(const void *object)
88767 {
88768 struct page *page;
88769@@ -3347,6 +3411,7 @@ void kfree(const void *x)
88770 if (unlikely(ZERO_OR_NULL_PTR(x)))
88771 return;
88772
88773+ VM_BUG_ON(!virt_addr_valid(x));
88774 page = virt_to_head_page(x);
88775 if (unlikely(!PageSlab(page))) {
88776 BUG_ON(!PageCompound(page));
88777@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
88778 /*
88779 * We may have set a slab to be unmergeable during bootstrap.
88780 */
88781- if (s->refcount < 0)
88782+ if (atomic_read(&s->refcount) < 0)
88783 return 1;
88784
88785 return 0;
88786@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
88787
88788 s = find_mergeable(memcg, size, align, flags, name, ctor);
88789 if (s) {
88790- s->refcount++;
88791+ atomic_inc(&s->refcount);
88792 /*
88793 * Adjust the object sizes so that we clear
88794 * the complete object on kzalloc.
88795@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
88796 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
88797
88798 if (sysfs_slab_alias(s, name)) {
88799- s->refcount--;
88800+ atomic_dec(&s->refcount);
88801 s = NULL;
88802 }
88803 }
88804@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
88805 return NOTIFY_OK;
88806 }
88807
88808-static struct notifier_block __cpuinitdata slab_notifier = {
88809+static struct notifier_block slab_notifier = {
88810 .notifier_call = slab_cpuup_callback
88811 };
88812
88813@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
88814 }
88815 #endif
88816
88817-#ifdef CONFIG_SYSFS
88818+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88819 static int count_inuse(struct page *page)
88820 {
88821 return page->inuse;
88822@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
88823 validate_slab_cache(kmalloc_caches[9]);
88824 }
88825 #else
88826-#ifdef CONFIG_SYSFS
88827+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88828 static void resiliency_test(void) {};
88829 #endif
88830 #endif
88831
88832-#ifdef CONFIG_SYSFS
88833+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88834 enum slab_stat_type {
88835 SL_ALL, /* All slabs */
88836 SL_PARTIAL, /* Only partially allocated slabs */
88837@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
88838
88839 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
88840 {
88841- return sprintf(buf, "%d\n", s->refcount - 1);
88842+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
88843 }
88844 SLAB_ATTR_RO(aliases);
88845
88846@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
88847 SLAB_ATTR_RO(cache_dma);
88848 #endif
88849
88850+#ifdef CONFIG_PAX_USERCOPY_SLABS
88851+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
88852+{
88853+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
88854+}
88855+SLAB_ATTR_RO(usercopy);
88856+#endif
88857+
88858 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
88859 {
88860 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
88861@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
88862 #ifdef CONFIG_ZONE_DMA
88863 &cache_dma_attr.attr,
88864 #endif
88865+#ifdef CONFIG_PAX_USERCOPY_SLABS
88866+ &usercopy_attr.attr,
88867+#endif
88868 #ifdef CONFIG_NUMA
88869 &remote_node_defrag_ratio_attr.attr,
88870 #endif
88871@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
88872 return name;
88873 }
88874
88875+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88876 static int sysfs_slab_add(struct kmem_cache *s)
88877 {
88878 int err;
88879@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
88880 }
88881
88882 s->kobj.kset = slab_kset;
88883- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
88884+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
88885 if (err) {
88886 kobject_put(&s->kobj);
88887 return err;
88888@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
88889 kobject_del(&s->kobj);
88890 kobject_put(&s->kobj);
88891 }
88892+#endif
88893
88894 /*
88895 * Need to buffer aliases during bootup until sysfs becomes
88896@@ -5198,6 +5276,7 @@ struct saved_alias {
88897
88898 static struct saved_alias *alias_list;
88899
88900+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88901 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
88902 {
88903 struct saved_alias *al;
88904@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
88905 alias_list = al;
88906 return 0;
88907 }
88908+#endif
88909
88910 static int __init slab_sysfs_init(void)
88911 {
88912diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
88913index 27eeab3..7c3f7f2 100644
88914--- a/mm/sparse-vmemmap.c
88915+++ b/mm/sparse-vmemmap.c
88916@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
88917 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
88918 if (!p)
88919 return NULL;
88920- pud_populate(&init_mm, pud, p);
88921+ pud_populate_kernel(&init_mm, pud, p);
88922 }
88923 return pud;
88924 }
88925@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
88926 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
88927 if (!p)
88928 return NULL;
88929- pgd_populate(&init_mm, pgd, p);
88930+ pgd_populate_kernel(&init_mm, pgd, p);
88931 }
88932 return pgd;
88933 }
88934diff --git a/mm/sparse.c b/mm/sparse.c
88935index 1c91f0d3..485470a 100644
88936--- a/mm/sparse.c
88937+++ b/mm/sparse.c
88938@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
88939
88940 for (i = 0; i < PAGES_PER_SECTION; i++) {
88941 if (PageHWPoison(&memmap[i])) {
88942- atomic_long_sub(1, &num_poisoned_pages);
88943+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
88944 ClearPageHWPoison(&memmap[i]);
88945 }
88946 }
88947diff --git a/mm/swap.c b/mm/swap.c
88948index dfd7d71..ccdf688 100644
88949--- a/mm/swap.c
88950+++ b/mm/swap.c
88951@@ -31,6 +31,7 @@
88952 #include <linux/memcontrol.h>
88953 #include <linux/gfp.h>
88954 #include <linux/uio.h>
88955+#include <linux/hugetlb.h>
88956
88957 #include "internal.h"
88958
88959@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
88960
88961 __page_cache_release(page);
88962 dtor = get_compound_page_dtor(page);
88963+ if (!PageHuge(page))
88964+ BUG_ON(dtor != free_compound_page);
88965 (*dtor)(page);
88966 }
88967
88968diff --git a/mm/swapfile.c b/mm/swapfile.c
88969index 746af55b..7ac94ae 100644
88970--- a/mm/swapfile.c
88971+++ b/mm/swapfile.c
88972@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
88973
88974 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
88975 /* Activity counter to indicate that a swapon or swapoff has occurred */
88976-static atomic_t proc_poll_event = ATOMIC_INIT(0);
88977+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
88978
88979 static inline unsigned char swap_count(unsigned char ent)
88980 {
88981@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
88982 }
88983 filp_close(swap_file, NULL);
88984 err = 0;
88985- atomic_inc(&proc_poll_event);
88986+ atomic_inc_unchecked(&proc_poll_event);
88987 wake_up_interruptible(&proc_poll_wait);
88988
88989 out_dput:
88990@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
88991
88992 poll_wait(file, &proc_poll_wait, wait);
88993
88994- if (seq->poll_event != atomic_read(&proc_poll_event)) {
88995- seq->poll_event = atomic_read(&proc_poll_event);
88996+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
88997+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
88998 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
88999 }
89000
89001@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
89002 return ret;
89003
89004 seq = file->private_data;
89005- seq->poll_event = atomic_read(&proc_poll_event);
89006+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
89007 return 0;
89008 }
89009
89010@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
89011 (frontswap_map) ? "FS" : "");
89012
89013 mutex_unlock(&swapon_mutex);
89014- atomic_inc(&proc_poll_event);
89015+ atomic_inc_unchecked(&proc_poll_event);
89016 wake_up_interruptible(&proc_poll_wait);
89017
89018 if (S_ISREG(inode->i_mode))
89019diff --git a/mm/util.c b/mm/util.c
89020index ab1424d..7c5bd5a 100644
89021--- a/mm/util.c
89022+++ b/mm/util.c
89023@@ -294,6 +294,12 @@ done:
89024 void arch_pick_mmap_layout(struct mm_struct *mm)
89025 {
89026 mm->mmap_base = TASK_UNMAPPED_BASE;
89027+
89028+#ifdef CONFIG_PAX_RANDMMAP
89029+ if (mm->pax_flags & MF_PAX_RANDMMAP)
89030+ mm->mmap_base += mm->delta_mmap;
89031+#endif
89032+
89033 mm->get_unmapped_area = arch_get_unmapped_area;
89034 mm->unmap_area = arch_unmap_area;
89035 }
89036diff --git a/mm/vmalloc.c b/mm/vmalloc.c
89037index d365724..6cae7c2 100644
89038--- a/mm/vmalloc.c
89039+++ b/mm/vmalloc.c
89040@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
89041
89042 pte = pte_offset_kernel(pmd, addr);
89043 do {
89044- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
89045- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
89046+
89047+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89048+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
89049+ BUG_ON(!pte_exec(*pte));
89050+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
89051+ continue;
89052+ }
89053+#endif
89054+
89055+ {
89056+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
89057+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
89058+ }
89059 } while (pte++, addr += PAGE_SIZE, addr != end);
89060 }
89061
89062@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
89063 pte = pte_alloc_kernel(pmd, addr);
89064 if (!pte)
89065 return -ENOMEM;
89066+
89067+ pax_open_kernel();
89068 do {
89069 struct page *page = pages[*nr];
89070
89071- if (WARN_ON(!pte_none(*pte)))
89072+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89073+ if (pgprot_val(prot) & _PAGE_NX)
89074+#endif
89075+
89076+ if (!pte_none(*pte)) {
89077+ pax_close_kernel();
89078+ WARN_ON(1);
89079 return -EBUSY;
89080- if (WARN_ON(!page))
89081+ }
89082+ if (!page) {
89083+ pax_close_kernel();
89084+ WARN_ON(1);
89085 return -ENOMEM;
89086+ }
89087 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
89088 (*nr)++;
89089 } while (pte++, addr += PAGE_SIZE, addr != end);
89090+ pax_close_kernel();
89091 return 0;
89092 }
89093
89094@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
89095 pmd_t *pmd;
89096 unsigned long next;
89097
89098- pmd = pmd_alloc(&init_mm, pud, addr);
89099+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
89100 if (!pmd)
89101 return -ENOMEM;
89102 do {
89103@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
89104 pud_t *pud;
89105 unsigned long next;
89106
89107- pud = pud_alloc(&init_mm, pgd, addr);
89108+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
89109 if (!pud)
89110 return -ENOMEM;
89111 do {
89112@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
89113 if (addr >= MODULES_VADDR && addr < MODULES_END)
89114 return 1;
89115 #endif
89116+
89117+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89118+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
89119+ return 1;
89120+#endif
89121+
89122 return is_vmalloc_addr(x);
89123 }
89124
89125@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
89126
89127 if (!pgd_none(*pgd)) {
89128 pud_t *pud = pud_offset(pgd, addr);
89129+#ifdef CONFIG_X86
89130+ if (!pud_large(*pud))
89131+#endif
89132 if (!pud_none(*pud)) {
89133 pmd_t *pmd = pmd_offset(pud, addr);
89134+#ifdef CONFIG_X86
89135+ if (!pmd_large(*pmd))
89136+#endif
89137 if (!pmd_none(*pmd)) {
89138 pte_t *ptep, pte;
89139
89140@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
89141 * Allocate a region of KVA of the specified size and alignment, within the
89142 * vstart and vend.
89143 */
89144-static struct vmap_area *alloc_vmap_area(unsigned long size,
89145+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
89146 unsigned long align,
89147 unsigned long vstart, unsigned long vend,
89148 int node, gfp_t gfp_mask)
89149@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
89150 struct vm_struct *area;
89151
89152 BUG_ON(in_interrupt());
89153+
89154+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89155+ if (flags & VM_KERNEXEC) {
89156+ if (start != VMALLOC_START || end != VMALLOC_END)
89157+ return NULL;
89158+ start = (unsigned long)MODULES_EXEC_VADDR;
89159+ end = (unsigned long)MODULES_EXEC_END;
89160+ }
89161+#endif
89162+
89163 if (flags & VM_IOREMAP) {
89164 int bit = fls(size);
89165
89166@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
89167 if (count > totalram_pages)
89168 return NULL;
89169
89170+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89171+ if (!(pgprot_val(prot) & _PAGE_NX))
89172+ flags |= VM_KERNEXEC;
89173+#endif
89174+
89175 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
89176 __builtin_return_address(0));
89177 if (!area)
89178@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
89179 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
89180 goto fail;
89181
89182+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89183+ if (!(pgprot_val(prot) & _PAGE_NX))
89184+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
89185+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
89186+ else
89187+#endif
89188+
89189 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
89190 start, end, node, gfp_mask, caller);
89191 if (!area)
89192@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
89193 * For tight control over page level allocator and protection flags
89194 * use __vmalloc() instead.
89195 */
89196-
89197 void *vmalloc_exec(unsigned long size)
89198 {
89199- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
89200+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
89201 NUMA_NO_NODE, __builtin_return_address(0));
89202 }
89203
89204@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
89205 unsigned long uaddr = vma->vm_start;
89206 unsigned long usize = vma->vm_end - vma->vm_start;
89207
89208+ BUG_ON(vma->vm_mirror);
89209+
89210 if ((PAGE_SIZE-1) & (unsigned long)addr)
89211 return -EINVAL;
89212
89213@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
89214 v->addr, v->addr + v->size, v->size);
89215
89216 if (v->caller)
89217+#ifdef CONFIG_GRKERNSEC_HIDESYM
89218+ seq_printf(m, " %pK", v->caller);
89219+#else
89220 seq_printf(m, " %pS", v->caller);
89221+#endif
89222
89223 if (v->nr_pages)
89224 seq_printf(m, " pages=%d", v->nr_pages);
89225diff --git a/mm/vmstat.c b/mm/vmstat.c
89226index f42745e..62f8346 100644
89227--- a/mm/vmstat.c
89228+++ b/mm/vmstat.c
89229@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
89230 *
89231 * vm_stat contains the global counters
89232 */
89233-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
89234+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
89235 EXPORT_SYMBOL(vm_stat);
89236
89237 #ifdef CONFIG_SMP
89238@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
89239 v = p->vm_stat_diff[i];
89240 p->vm_stat_diff[i] = 0;
89241 local_irq_restore(flags);
89242- atomic_long_add(v, &zone->vm_stat[i]);
89243+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
89244 global_diff[i] += v;
89245 #ifdef CONFIG_NUMA
89246 /* 3 seconds idle till flush */
89247@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
89248
89249 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
89250 if (global_diff[i])
89251- atomic_long_add(global_diff[i], &vm_stat[i]);
89252+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
89253 }
89254
89255 /*
89256@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
89257 if (pset->vm_stat_diff[i]) {
89258 int v = pset->vm_stat_diff[i];
89259 pset->vm_stat_diff[i] = 0;
89260- atomic_long_add(v, &zone->vm_stat[i]);
89261- atomic_long_add(v, &vm_stat[i]);
89262+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
89263+ atomic_long_add_unchecked(v, &vm_stat[i]);
89264 }
89265 }
89266 #endif
89267@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
89268 return NOTIFY_OK;
89269 }
89270
89271-static struct notifier_block __cpuinitdata vmstat_notifier =
89272+static struct notifier_block vmstat_notifier =
89273 { &vmstat_cpuup_callback, NULL, 0 };
89274 #endif
89275
89276@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
89277 start_cpu_timer(cpu);
89278 #endif
89279 #ifdef CONFIG_PROC_FS
89280- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
89281- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
89282- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
89283- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
89284+ {
89285+ mode_t gr_mode = S_IRUGO;
89286+#ifdef CONFIG_GRKERNSEC_PROC_ADD
89287+ gr_mode = S_IRUSR;
89288+#endif
89289+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
89290+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
89291+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89292+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
89293+#else
89294+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
89295+#endif
89296+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
89297+ }
89298 #endif
89299 return 0;
89300 }
89301diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
89302index 9424f37..6aabf19 100644
89303--- a/net/8021q/vlan.c
89304+++ b/net/8021q/vlan.c
89305@@ -469,7 +469,7 @@ out:
89306 return NOTIFY_DONE;
89307 }
89308
89309-static struct notifier_block vlan_notifier_block __read_mostly = {
89310+static struct notifier_block vlan_notifier_block = {
89311 .notifier_call = vlan_device_event,
89312 };
89313
89314@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
89315 err = -EPERM;
89316 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
89317 break;
89318- if ((args.u.name_type >= 0) &&
89319- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
89320+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
89321 struct vlan_net *vn;
89322
89323 vn = net_generic(net, vlan_net_id);
89324diff --git a/net/9p/mod.c b/net/9p/mod.c
89325index 6ab36ae..6f1841b 100644
89326--- a/net/9p/mod.c
89327+++ b/net/9p/mod.c
89328@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
89329 void v9fs_register_trans(struct p9_trans_module *m)
89330 {
89331 spin_lock(&v9fs_trans_lock);
89332- list_add_tail(&m->list, &v9fs_trans_list);
89333+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
89334 spin_unlock(&v9fs_trans_lock);
89335 }
89336 EXPORT_SYMBOL(v9fs_register_trans);
89337@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
89338 void v9fs_unregister_trans(struct p9_trans_module *m)
89339 {
89340 spin_lock(&v9fs_trans_lock);
89341- list_del_init(&m->list);
89342+ pax_list_del_init((struct list_head *)&m->list);
89343 spin_unlock(&v9fs_trans_lock);
89344 }
89345 EXPORT_SYMBOL(v9fs_unregister_trans);
89346diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
89347index 02efb25..41541a9 100644
89348--- a/net/9p/trans_fd.c
89349+++ b/net/9p/trans_fd.c
89350@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
89351 oldfs = get_fs();
89352 set_fs(get_ds());
89353 /* The cast to a user pointer is valid due to the set_fs() */
89354- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
89355+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
89356 set_fs(oldfs);
89357
89358 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
89359diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
89360index 876fbe8..8bbea9f 100644
89361--- a/net/atm/atm_misc.c
89362+++ b/net/atm/atm_misc.c
89363@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
89364 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
89365 return 1;
89366 atm_return(vcc, truesize);
89367- atomic_inc(&vcc->stats->rx_drop);
89368+ atomic_inc_unchecked(&vcc->stats->rx_drop);
89369 return 0;
89370 }
89371 EXPORT_SYMBOL(atm_charge);
89372@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
89373 }
89374 }
89375 atm_return(vcc, guess);
89376- atomic_inc(&vcc->stats->rx_drop);
89377+ atomic_inc_unchecked(&vcc->stats->rx_drop);
89378 return NULL;
89379 }
89380 EXPORT_SYMBOL(atm_alloc_charge);
89381@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
89382
89383 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89384 {
89385-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
89386+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
89387 __SONET_ITEMS
89388 #undef __HANDLE_ITEM
89389 }
89390@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
89391
89392 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89393 {
89394-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
89395+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
89396 __SONET_ITEMS
89397 #undef __HANDLE_ITEM
89398 }
89399diff --git a/net/atm/lec.h b/net/atm/lec.h
89400index 4149db1..f2ab682 100644
89401--- a/net/atm/lec.h
89402+++ b/net/atm/lec.h
89403@@ -48,7 +48,7 @@ struct lane2_ops {
89404 const u8 *tlvs, u32 sizeoftlvs);
89405 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
89406 const u8 *tlvs, u32 sizeoftlvs);
89407-};
89408+} __no_const;
89409
89410 /*
89411 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
89412diff --git a/net/atm/proc.c b/net/atm/proc.c
89413index bbb6461..cf04016 100644
89414--- a/net/atm/proc.c
89415+++ b/net/atm/proc.c
89416@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
89417 const struct k_atm_aal_stats *stats)
89418 {
89419 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
89420- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
89421- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
89422- atomic_read(&stats->rx_drop));
89423+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
89424+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
89425+ atomic_read_unchecked(&stats->rx_drop));
89426 }
89427
89428 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
89429diff --git a/net/atm/resources.c b/net/atm/resources.c
89430index 0447d5d..3cf4728 100644
89431--- a/net/atm/resources.c
89432+++ b/net/atm/resources.c
89433@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
89434 static void copy_aal_stats(struct k_atm_aal_stats *from,
89435 struct atm_aal_stats *to)
89436 {
89437-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
89438+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
89439 __AAL_STAT_ITEMS
89440 #undef __HANDLE_ITEM
89441 }
89442@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
89443 static void subtract_aal_stats(struct k_atm_aal_stats *from,
89444 struct atm_aal_stats *to)
89445 {
89446-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
89447+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
89448 __AAL_STAT_ITEMS
89449 #undef __HANDLE_ITEM
89450 }
89451diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
89452index d5744b7..506bae3 100644
89453--- a/net/ax25/sysctl_net_ax25.c
89454+++ b/net/ax25/sysctl_net_ax25.c
89455@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
89456 {
89457 char path[sizeof("net/ax25/") + IFNAMSIZ];
89458 int k;
89459- struct ctl_table *table;
89460+ ctl_table_no_const *table;
89461
89462 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
89463 if (!table)
89464diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
89465index f680ee1..97e3542 100644
89466--- a/net/batman-adv/bat_iv_ogm.c
89467+++ b/net/batman-adv/bat_iv_ogm.c
89468@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
89469
89470 /* randomize initial seqno to avoid collision */
89471 get_random_bytes(&random_seqno, sizeof(random_seqno));
89472- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
89473+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
89474
89475 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
89476 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
89477@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
89478 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
89479
89480 /* change sequence number to network order */
89481- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
89482+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
89483 batadv_ogm_packet->seqno = htonl(seqno);
89484- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
89485+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
89486
89487 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
89488 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
89489@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
89490 return;
89491
89492 /* could be changed by schedule_own_packet() */
89493- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
89494+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
89495
89496 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
89497 has_directlink_flag = 1;
89498diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
89499index de27b31..7058bfe 100644
89500--- a/net/batman-adv/bridge_loop_avoidance.c
89501+++ b/net/batman-adv/bridge_loop_avoidance.c
89502@@ -1522,6 +1522,8 @@ out:
89503 * in these cases, the skb is further handled by this function and
89504 * returns 1, otherwise it returns 0 and the caller shall further
89505 * process the skb.
89506+ *
89507+ * This call might reallocate skb data.
89508 */
89509 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
89510 {
89511diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
89512index f105219..7614af3 100644
89513--- a/net/batman-adv/gateway_client.c
89514+++ b/net/batman-adv/gateway_client.c
89515@@ -508,6 +508,7 @@ out:
89516 return 0;
89517 }
89518
89519+/* this call might reallocate skb data */
89520 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
89521 {
89522 int ret = false;
89523@@ -568,6 +569,7 @@ out:
89524 return ret;
89525 }
89526
89527+/* this call might reallocate skb data */
89528 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
89529 {
89530 struct ethhdr *ethhdr;
89531@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
89532
89533 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
89534 return false;
89535+
89536+ /* skb->data might have been reallocated by pskb_may_pull() */
89537+ ethhdr = (struct ethhdr *)skb->data;
89538+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
89539+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
89540+
89541 udphdr = (struct udphdr *)(skb->data + *header_len);
89542 *header_len += sizeof(*udphdr);
89543
89544@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
89545 return true;
89546 }
89547
89548+/* this call might reallocate skb data */
89549 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
89550- struct sk_buff *skb, struct ethhdr *ethhdr)
89551+ struct sk_buff *skb)
89552 {
89553 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
89554 struct batadv_orig_node *orig_dst_node = NULL;
89555 struct batadv_gw_node *curr_gw = NULL;
89556+ struct ethhdr *ethhdr;
89557 bool ret, out_of_range = false;
89558 unsigned int header_len = 0;
89559 uint8_t curr_tq_avg;
89560@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
89561 if (!ret)
89562 goto out;
89563
89564+ ethhdr = (struct ethhdr *)skb->data;
89565 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
89566 ethhdr->h_dest);
89567 if (!orig_dst_node)
89568diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
89569index 039902d..1037d75 100644
89570--- a/net/batman-adv/gateway_client.h
89571+++ b/net/batman-adv/gateway_client.h
89572@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
89573 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
89574 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
89575 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
89576-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
89577- struct sk_buff *skb, struct ethhdr *ethhdr);
89578+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
89579
89580 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
89581diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
89582index 522243a..b48c0ef 100644
89583--- a/net/batman-adv/hard-interface.c
89584+++ b/net/batman-adv/hard-interface.c
89585@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
89586 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
89587 dev_add_pack(&hard_iface->batman_adv_ptype);
89588
89589- atomic_set(&hard_iface->frag_seqno, 1);
89590+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
89591 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
89592 hard_iface->net_dev->name);
89593
89594@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
89595 /* This can't be called via a bat_priv callback because
89596 * we have no bat_priv yet.
89597 */
89598- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
89599+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
89600 hard_iface->bat_iv.ogm_buff = NULL;
89601
89602 return hard_iface;
89603diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
89604index 819dfb0..226bacd 100644
89605--- a/net/batman-adv/soft-interface.c
89606+++ b/net/batman-adv/soft-interface.c
89607@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
89608 if (batadv_bla_tx(bat_priv, skb, vid))
89609 goto dropped;
89610
89611+ /* skb->data might have been reallocated by batadv_bla_tx() */
89612+ ethhdr = (struct ethhdr *)skb->data;
89613+
89614 /* Register the client MAC in the transtable */
89615 if (!is_multicast_ether_addr(ethhdr->h_source))
89616 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
89617@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
89618 default:
89619 break;
89620 }
89621+
89622+ /* reminder: ethhdr might have become unusable from here on
89623+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
89624+ */
89625 }
89626
89627 /* ethernet packet should be broadcasted */
89628@@ -253,7 +260,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
89629 primary_if->net_dev->dev_addr, ETH_ALEN);
89630
89631 /* set broadcast sequence number */
89632- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
89633+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
89634 bcast_packet->seqno = htonl(seqno);
89635
89636 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
89637@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
89638 /* unicast packet */
89639 } else {
89640 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
89641- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
89642+ ret = batadv_gw_out_of_range(bat_priv, skb);
89643 if (ret)
89644 goto dropped;
89645 }
89646@@ -472,7 +479,7 @@ static int batadv_softif_init_late(struct net_device *dev)
89647 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
89648
89649 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
89650- atomic_set(&bat_priv->bcast_seqno, 1);
89651+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
89652 atomic_set(&bat_priv->tt.vn, 0);
89653 atomic_set(&bat_priv->tt.local_changes, 0);
89654 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
89655diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
89656index aba8364..50fcbb8 100644
89657--- a/net/batman-adv/types.h
89658+++ b/net/batman-adv/types.h
89659@@ -51,7 +51,7 @@
89660 struct batadv_hard_iface_bat_iv {
89661 unsigned char *ogm_buff;
89662 int ogm_buff_len;
89663- atomic_t ogm_seqno;
89664+ atomic_unchecked_t ogm_seqno;
89665 };
89666
89667 /**
89668@@ -75,7 +75,7 @@ struct batadv_hard_iface {
89669 int16_t if_num;
89670 char if_status;
89671 struct net_device *net_dev;
89672- atomic_t frag_seqno;
89673+ atomic_unchecked_t frag_seqno;
89674 struct kobject *hardif_obj;
89675 atomic_t refcount;
89676 struct packet_type batman_adv_ptype;
89677@@ -558,7 +558,7 @@ struct batadv_priv {
89678 #ifdef CONFIG_BATMAN_ADV_DEBUG
89679 atomic_t log_level;
89680 #endif
89681- atomic_t bcast_seqno;
89682+ atomic_unchecked_t bcast_seqno;
89683 atomic_t bcast_queue_left;
89684 atomic_t batman_queue_left;
89685 char num_ifaces;
89686diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
89687index 0bb3b59..0e3052e 100644
89688--- a/net/batman-adv/unicast.c
89689+++ b/net/batman-adv/unicast.c
89690@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
89691 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
89692 frag2->flags = large_tail;
89693
89694- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
89695+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
89696 frag1->seqno = htons(seqno - 1);
89697 frag2->seqno = htons(seqno);
89698
89699@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
89700 * @skb: the skb containing the payload to encapsulate
89701 * @orig_node: the destination node
89702 *
89703- * Returns false if the payload could not be encapsulated or true otherwise
89704+ * Returns false if the payload could not be encapsulated or true otherwise.
89705+ *
89706+ * This call might reallocate skb data.
89707 */
89708 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
89709 struct batadv_orig_node *orig_node)
89710@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
89711 * @orig_node: the destination node
89712 * @packet_subtype: the batman 4addr packet subtype to use
89713 *
89714- * Returns false if the payload could not be encapsulated or true otherwise
89715+ * Returns false if the payload could not be encapsulated or true otherwise.
89716+ *
89717+ * This call might reallocate skb data.
89718 */
89719 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
89720 struct sk_buff *skb,
89721@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
89722 struct batadv_neigh_node *neigh_node;
89723 int data_len = skb->len;
89724 int ret = NET_RX_DROP;
89725- unsigned int dev_mtu;
89726+ unsigned int dev_mtu, header_len;
89727
89728 /* get routing information */
89729 if (is_multicast_ether_addr(ethhdr->h_dest)) {
89730@@ -429,10 +433,12 @@ find_router:
89731 switch (packet_type) {
89732 case BATADV_UNICAST:
89733 batadv_unicast_prepare_skb(skb, orig_node);
89734+ header_len = sizeof(struct batadv_unicast_packet);
89735 break;
89736 case BATADV_UNICAST_4ADDR:
89737 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
89738 packet_subtype);
89739+ header_len = sizeof(struct batadv_unicast_4addr_packet);
89740 break;
89741 default:
89742 /* this function supports UNICAST and UNICAST_4ADDR only. It
89743@@ -441,6 +447,7 @@ find_router:
89744 goto out;
89745 }
89746
89747+ ethhdr = (struct ethhdr *)(skb->data + header_len);
89748 unicast_packet = (struct batadv_unicast_packet *)skb->data;
89749
89750 /* inform the destination node that we are still missing a correct route
89751diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
89752index ace5e55..a65a1c0 100644
89753--- a/net/bluetooth/hci_core.c
89754+++ b/net/bluetooth/hci_core.c
89755@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
89756 list_add(&hdev->list, &hci_dev_list);
89757 write_unlock(&hci_dev_list_lock);
89758
89759- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
89760- WQ_MEM_RECLAIM, 1);
89761+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
89762+ WQ_MEM_RECLAIM, 1, hdev->name);
89763 if (!hdev->workqueue) {
89764 error = -ENOMEM;
89765 goto err;
89766 }
89767
89768- hdev->req_workqueue = alloc_workqueue(hdev->name,
89769+ hdev->req_workqueue = alloc_workqueue("%s",
89770 WQ_HIGHPRI | WQ_UNBOUND |
89771- WQ_MEM_RECLAIM, 1);
89772+ WQ_MEM_RECLAIM, 1, hdev->name);
89773 if (!hdev->req_workqueue) {
89774 destroy_workqueue(hdev->workqueue);
89775 error = -ENOMEM;
89776diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
89777index 9bd7d95..6c4884f 100644
89778--- a/net/bluetooth/hci_sock.c
89779+++ b/net/bluetooth/hci_sock.c
89780@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
89781 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
89782 }
89783
89784- len = min_t(unsigned int, len, sizeof(uf));
89785+ len = min((size_t)len, sizeof(uf));
89786 if (copy_from_user(&uf, optval, len)) {
89787 err = -EFAULT;
89788 break;
89789diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
89790index 68843a2..30e9342 100644
89791--- a/net/bluetooth/l2cap_core.c
89792+++ b/net/bluetooth/l2cap_core.c
89793@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
89794 break;
89795
89796 case L2CAP_CONF_RFC:
89797- if (olen == sizeof(rfc))
89798- memcpy(&rfc, (void *)val, olen);
89799+ if (olen != sizeof(rfc))
89800+ break;
89801+
89802+ memcpy(&rfc, (void *)val, olen);
89803
89804 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
89805 rfc.mode != chan->mode)
89806diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
89807index 36fed40..be2eeb2 100644
89808--- a/net/bluetooth/l2cap_sock.c
89809+++ b/net/bluetooth/l2cap_sock.c
89810@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
89811 struct sock *sk = sock->sk;
89812 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
89813 struct l2cap_options opts;
89814- int len, err = 0;
89815+ int err = 0;
89816+ size_t len = optlen;
89817 u32 opt;
89818
89819 BT_DBG("sk %p", sk);
89820@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
89821 opts.max_tx = chan->max_tx;
89822 opts.txwin_size = chan->tx_win;
89823
89824- len = min_t(unsigned int, sizeof(opts), optlen);
89825+ len = min(sizeof(opts), len);
89826 if (copy_from_user((char *) &opts, optval, len)) {
89827 err = -EFAULT;
89828 break;
89829@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
89830 struct bt_security sec;
89831 struct bt_power pwr;
89832 struct l2cap_conn *conn;
89833- int len, err = 0;
89834+ int err = 0;
89835+ size_t len = optlen;
89836 u32 opt;
89837
89838 BT_DBG("sk %p", sk);
89839@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
89840
89841 sec.level = BT_SECURITY_LOW;
89842
89843- len = min_t(unsigned int, sizeof(sec), optlen);
89844+ len = min(sizeof(sec), len);
89845 if (copy_from_user((char *) &sec, optval, len)) {
89846 err = -EFAULT;
89847 break;
89848@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
89849
89850 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
89851
89852- len = min_t(unsigned int, sizeof(pwr), optlen);
89853+ len = min(sizeof(pwr), len);
89854 if (copy_from_user((char *) &pwr, optval, len)) {
89855 err = -EFAULT;
89856 break;
89857diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
89858index 30b3721..c1bd0a0 100644
89859--- a/net/bluetooth/rfcomm/sock.c
89860+++ b/net/bluetooth/rfcomm/sock.c
89861@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
89862 struct sock *sk = sock->sk;
89863 struct bt_security sec;
89864 int err = 0;
89865- size_t len;
89866+ size_t len = optlen;
89867 u32 opt;
89868
89869 BT_DBG("sk %p", sk);
89870@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
89871
89872 sec.level = BT_SECURITY_LOW;
89873
89874- len = min_t(unsigned int, sizeof(sec), optlen);
89875+ len = min(sizeof(sec), len);
89876 if (copy_from_user((char *) &sec, optval, len)) {
89877 err = -EFAULT;
89878 break;
89879diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
89880index b6e44ad..5b0d514 100644
89881--- a/net/bluetooth/rfcomm/tty.c
89882+++ b/net/bluetooth/rfcomm/tty.c
89883@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
89884 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
89885
89886 spin_lock_irqsave(&dev->port.lock, flags);
89887- if (dev->port.count > 0) {
89888+ if (atomic_read(&dev->port.count) > 0) {
89889 spin_unlock_irqrestore(&dev->port.lock, flags);
89890 return;
89891 }
89892@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
89893 return -ENODEV;
89894
89895 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
89896- dev->channel, dev->port.count);
89897+ dev->channel, atomic_read(&dev->port.count));
89898
89899 spin_lock_irqsave(&dev->port.lock, flags);
89900- if (++dev->port.count > 1) {
89901+ if (atomic_inc_return(&dev->port.count) > 1) {
89902 spin_unlock_irqrestore(&dev->port.lock, flags);
89903 return 0;
89904 }
89905@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
89906 return;
89907
89908 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
89909- dev->port.count);
89910+ atomic_read(&dev->port.count));
89911
89912 spin_lock_irqsave(&dev->port.lock, flags);
89913- if (!--dev->port.count) {
89914+ if (!atomic_dec_return(&dev->port.count)) {
89915 spin_unlock_irqrestore(&dev->port.lock, flags);
89916 if (dev->tty_dev->parent)
89917 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
89918diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
89919index 3d110c4..4e1b2eb 100644
89920--- a/net/bridge/netfilter/ebtables.c
89921+++ b/net/bridge/netfilter/ebtables.c
89922@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
89923 tmp.valid_hooks = t->table->valid_hooks;
89924 }
89925 mutex_unlock(&ebt_mutex);
89926- if (copy_to_user(user, &tmp, *len) != 0){
89927+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
89928 BUGPRINT("c2u Didn't work\n");
89929 ret = -EFAULT;
89930 break;
89931@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
89932 goto out;
89933 tmp.valid_hooks = t->valid_hooks;
89934
89935- if (copy_to_user(user, &tmp, *len) != 0) {
89936+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
89937 ret = -EFAULT;
89938 break;
89939 }
89940@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
89941 tmp.entries_size = t->table->entries_size;
89942 tmp.valid_hooks = t->table->valid_hooks;
89943
89944- if (copy_to_user(user, &tmp, *len) != 0) {
89945+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
89946 ret = -EFAULT;
89947 break;
89948 }
89949diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
89950index 2bd4b58..0dc30a1 100644
89951--- a/net/caif/cfctrl.c
89952+++ b/net/caif/cfctrl.c
89953@@ -10,6 +10,7 @@
89954 #include <linux/spinlock.h>
89955 #include <linux/slab.h>
89956 #include <linux/pkt_sched.h>
89957+#include <linux/sched.h>
89958 #include <net/caif/caif_layer.h>
89959 #include <net/caif/cfpkt.h>
89960 #include <net/caif/cfctrl.h>
89961@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
89962 memset(&dev_info, 0, sizeof(dev_info));
89963 dev_info.id = 0xff;
89964 cfsrvl_init(&this->serv, 0, &dev_info, false);
89965- atomic_set(&this->req_seq_no, 1);
89966- atomic_set(&this->rsp_seq_no, 1);
89967+ atomic_set_unchecked(&this->req_seq_no, 1);
89968+ atomic_set_unchecked(&this->rsp_seq_no, 1);
89969 this->serv.layer.receive = cfctrl_recv;
89970 sprintf(this->serv.layer.name, "ctrl");
89971 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
89972@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
89973 struct cfctrl_request_info *req)
89974 {
89975 spin_lock_bh(&ctrl->info_list_lock);
89976- atomic_inc(&ctrl->req_seq_no);
89977- req->sequence_no = atomic_read(&ctrl->req_seq_no);
89978+ atomic_inc_unchecked(&ctrl->req_seq_no);
89979+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
89980 list_add_tail(&req->list, &ctrl->list);
89981 spin_unlock_bh(&ctrl->info_list_lock);
89982 }
89983@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
89984 if (p != first)
89985 pr_warn("Requests are not received in order\n");
89986
89987- atomic_set(&ctrl->rsp_seq_no,
89988+ atomic_set_unchecked(&ctrl->rsp_seq_no,
89989 p->sequence_no);
89990 list_del(&p->list);
89991 goto out;
89992diff --git a/net/can/af_can.c b/net/can/af_can.c
89993index c4e5085..aa9efdf 100644
89994--- a/net/can/af_can.c
89995+++ b/net/can/af_can.c
89996@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
89997 };
89998
89999 /* notifier block for netdevice event */
90000-static struct notifier_block can_netdev_notifier __read_mostly = {
90001+static struct notifier_block can_netdev_notifier = {
90002 .notifier_call = can_notifier,
90003 };
90004
90005diff --git a/net/can/gw.c b/net/can/gw.c
90006index 3ee690e..00d581b 100644
90007--- a/net/can/gw.c
90008+++ b/net/can/gw.c
90009@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
90010 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
90011
90012 static HLIST_HEAD(cgw_list);
90013-static struct notifier_block notifier;
90014
90015 static struct kmem_cache *cgw_cache __read_mostly;
90016
90017@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
90018 return err;
90019 }
90020
90021+static struct notifier_block notifier = {
90022+ .notifier_call = cgw_notifier
90023+};
90024+
90025 static __init int cgw_module_init(void)
90026 {
90027 /* sanitize given module parameter */
90028@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
90029 return -ENOMEM;
90030
90031 /* set notifier */
90032- notifier.notifier_call = cgw_notifier;
90033 register_netdevice_notifier(&notifier);
90034
90035 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
90036diff --git a/net/compat.c b/net/compat.c
90037index f0a1ba6..0541331 100644
90038--- a/net/compat.c
90039+++ b/net/compat.c
90040@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
90041 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
90042 __get_user(kmsg->msg_flags, &umsg->msg_flags))
90043 return -EFAULT;
90044- kmsg->msg_name = compat_ptr(tmp1);
90045- kmsg->msg_iov = compat_ptr(tmp2);
90046- kmsg->msg_control = compat_ptr(tmp3);
90047+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
90048+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
90049+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
90050 return 0;
90051 }
90052
90053@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
90054
90055 if (kern_msg->msg_namelen) {
90056 if (mode == VERIFY_READ) {
90057- int err = move_addr_to_kernel(kern_msg->msg_name,
90058+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
90059 kern_msg->msg_namelen,
90060 kern_address);
90061 if (err < 0)
90062@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
90063 kern_msg->msg_name = NULL;
90064
90065 tot_len = iov_from_user_compat_to_kern(kern_iov,
90066- (struct compat_iovec __user *)kern_msg->msg_iov,
90067+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
90068 kern_msg->msg_iovlen);
90069 if (tot_len >= 0)
90070 kern_msg->msg_iov = kern_iov;
90071@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
90072
90073 #define CMSG_COMPAT_FIRSTHDR(msg) \
90074 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
90075- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
90076+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
90077 (struct compat_cmsghdr __user *)NULL)
90078
90079 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
90080 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
90081 (ucmlen) <= (unsigned long) \
90082 ((mhdr)->msg_controllen - \
90083- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
90084+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
90085
90086 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
90087 struct compat_cmsghdr __user *cmsg, int cmsg_len)
90088 {
90089 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
90090- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
90091+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
90092 msg->msg_controllen)
90093 return NULL;
90094 return (struct compat_cmsghdr __user *)ptr;
90095@@ -219,7 +219,7 @@ Efault:
90096
90097 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
90098 {
90099- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
90100+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
90101 struct compat_cmsghdr cmhdr;
90102 struct compat_timeval ctv;
90103 struct compat_timespec cts[3];
90104@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
90105
90106 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
90107 {
90108- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
90109+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
90110 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
90111 int fdnum = scm->fp->count;
90112 struct file **fp = scm->fp->fp;
90113@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
90114 return -EFAULT;
90115 old_fs = get_fs();
90116 set_fs(KERNEL_DS);
90117- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
90118+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
90119 set_fs(old_fs);
90120
90121 return err;
90122@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
90123 len = sizeof(ktime);
90124 old_fs = get_fs();
90125 set_fs(KERNEL_DS);
90126- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
90127+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
90128 set_fs(old_fs);
90129
90130 if (!err) {
90131@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
90132 case MCAST_JOIN_GROUP:
90133 case MCAST_LEAVE_GROUP:
90134 {
90135- struct compat_group_req __user *gr32 = (void *)optval;
90136+ struct compat_group_req __user *gr32 = (void __user *)optval;
90137 struct group_req __user *kgr =
90138 compat_alloc_user_space(sizeof(struct group_req));
90139 u32 interface;
90140@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
90141 case MCAST_BLOCK_SOURCE:
90142 case MCAST_UNBLOCK_SOURCE:
90143 {
90144- struct compat_group_source_req __user *gsr32 = (void *)optval;
90145+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
90146 struct group_source_req __user *kgsr = compat_alloc_user_space(
90147 sizeof(struct group_source_req));
90148 u32 interface;
90149@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
90150 }
90151 case MCAST_MSFILTER:
90152 {
90153- struct compat_group_filter __user *gf32 = (void *)optval;
90154+ struct compat_group_filter __user *gf32 = (void __user *)optval;
90155 struct group_filter __user *kgf;
90156 u32 interface, fmode, numsrc;
90157
90158@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
90159 char __user *optval, int __user *optlen,
90160 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
90161 {
90162- struct compat_group_filter __user *gf32 = (void *)optval;
90163+ struct compat_group_filter __user *gf32 = (void __user *)optval;
90164 struct group_filter __user *kgf;
90165 int __user *koptlen;
90166 u32 interface, fmode, numsrc;
90167@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
90168
90169 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
90170 return -EINVAL;
90171- if (copy_from_user(a, args, nas[call]))
90172+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
90173 return -EFAULT;
90174 a0 = a[0];
90175 a1 = a[1];
90176diff --git a/net/core/datagram.c b/net/core/datagram.c
90177index b71423d..0360434 100644
90178--- a/net/core/datagram.c
90179+++ b/net/core/datagram.c
90180@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
90181 }
90182
90183 kfree_skb(skb);
90184- atomic_inc(&sk->sk_drops);
90185+ atomic_inc_unchecked(&sk->sk_drops);
90186 sk_mem_reclaim_partial(sk);
90187
90188 return err;
90189diff --git a/net/core/dev.c b/net/core/dev.c
90190index 7ddbb31..3902452 100644
90191--- a/net/core/dev.c
90192+++ b/net/core/dev.c
90193@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
90194 {
90195 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
90196 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
90197- atomic_long_inc(&dev->rx_dropped);
90198+ atomic_long_inc_unchecked(&dev->rx_dropped);
90199 kfree_skb(skb);
90200 return NET_RX_DROP;
90201 }
90202@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
90203 skb_orphan(skb);
90204
90205 if (unlikely(!is_skb_forwardable(dev, skb))) {
90206- atomic_long_inc(&dev->rx_dropped);
90207+ atomic_long_inc_unchecked(&dev->rx_dropped);
90208 kfree_skb(skb);
90209 return NET_RX_DROP;
90210 }
90211@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
90212
90213 struct dev_gso_cb {
90214 void (*destructor)(struct sk_buff *skb);
90215-};
90216+} __no_const;
90217
90218 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
90219
90220@@ -3139,7 +3139,7 @@ enqueue:
90221
90222 local_irq_restore(flags);
90223
90224- atomic_long_inc(&skb->dev->rx_dropped);
90225+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
90226 kfree_skb(skb);
90227 return NET_RX_DROP;
90228 }
90229@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
90230 }
90231 EXPORT_SYMBOL(netif_rx_ni);
90232
90233-static void net_tx_action(struct softirq_action *h)
90234+static void net_tx_action(void)
90235 {
90236 struct softnet_data *sd = &__get_cpu_var(softnet_data);
90237
90238@@ -3545,7 +3545,7 @@ ncls:
90239 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
90240 } else {
90241 drop:
90242- atomic_long_inc(&skb->dev->rx_dropped);
90243+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
90244 kfree_skb(skb);
90245 /* Jamal, now you will not able to escape explaining
90246 * me how you were going to use this. :-)
90247@@ -4153,7 +4153,7 @@ void netif_napi_del(struct napi_struct *napi)
90248 }
90249 EXPORT_SYMBOL(netif_napi_del);
90250
90251-static void net_rx_action(struct softirq_action *h)
90252+static void net_rx_action(void)
90253 {
90254 struct softnet_data *sd = &__get_cpu_var(softnet_data);
90255 unsigned long time_limit = jiffies + 2;
90256@@ -5590,7 +5590,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
90257 } else {
90258 netdev_stats_to_stats64(storage, &dev->stats);
90259 }
90260- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
90261+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
90262 return storage;
90263 }
90264 EXPORT_SYMBOL(dev_get_stats);
90265diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
90266index 5b7d0e1..cb960fc 100644
90267--- a/net/core/dev_ioctl.c
90268+++ b/net/core/dev_ioctl.c
90269@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
90270 if (no_module && capable(CAP_NET_ADMIN))
90271 no_module = request_module("netdev-%s", name);
90272 if (no_module && capable(CAP_SYS_MODULE)) {
90273+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90274+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
90275+#else
90276 if (!request_module("%s", name))
90277 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
90278 name);
90279+#endif
90280 }
90281 }
90282 EXPORT_SYMBOL(dev_load);
90283diff --git a/net/core/ethtool.c b/net/core/ethtool.c
90284index ce91766..3b71cdb 100644
90285--- a/net/core/ethtool.c
90286+++ b/net/core/ethtool.c
90287@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
90288 if (ret)
90289 return ret;
90290
90291- len = (tmp.len > dump.len) ? dump.len : tmp.len;
90292+ len = min(tmp.len, dump.len);
90293 if (!len)
90294 return -EFAULT;
90295
90296+ /* Don't ever let the driver think there's more space available
90297+ * than it requested with .get_dump_flag().
90298+ */
90299+ dump.len = len;
90300+
90301+ /* Always allocate enough space to hold the whole thing so that the
90302+ * driver does not need to check the length and bother with partial
90303+ * dumping.
90304+ */
90305 data = vzalloc(tmp.len);
90306 if (!data)
90307 return -ENOMEM;
90308@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
90309 if (ret)
90310 goto out;
90311
90312+ /* There are two sane possibilities:
90313+ * 1. The driver's .get_dump_data() does not touch dump.len.
90314+ * 2. Or it may set dump.len to how much it really writes, which
90315+ * should be tmp.len (or len if it can do a partial dump).
90316+ * In any case respond to userspace with the actual length of data
90317+ * it's receiving.
90318+ */
90319+ WARN_ON(dump.len != len && dump.len != tmp.len);
90320+ dump.len = len;
90321+
90322 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
90323 ret = -EFAULT;
90324 goto out;
90325diff --git a/net/core/flow.c b/net/core/flow.c
90326index 7102f16..146b4bd 100644
90327--- a/net/core/flow.c
90328+++ b/net/core/flow.c
90329@@ -61,7 +61,7 @@ struct flow_cache {
90330 struct timer_list rnd_timer;
90331 };
90332
90333-atomic_t flow_cache_genid = ATOMIC_INIT(0);
90334+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
90335 EXPORT_SYMBOL(flow_cache_genid);
90336 static struct flow_cache flow_cache_global;
90337 static struct kmem_cache *flow_cachep __read_mostly;
90338@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
90339
90340 static int flow_entry_valid(struct flow_cache_entry *fle)
90341 {
90342- if (atomic_read(&flow_cache_genid) != fle->genid)
90343+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
90344 return 0;
90345 if (fle->object && !fle->object->ops->check(fle->object))
90346 return 0;
90347@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
90348 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
90349 fcp->hash_count++;
90350 }
90351- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
90352+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
90353 flo = fle->object;
90354 if (!flo)
90355 goto ret_object;
90356@@ -279,7 +279,7 @@ nocache:
90357 }
90358 flo = resolver(net, key, family, dir, flo, ctx);
90359 if (fle) {
90360- fle->genid = atomic_read(&flow_cache_genid);
90361+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
90362 if (!IS_ERR(flo))
90363 fle->object = flo;
90364 else
90365diff --git a/net/core/iovec.c b/net/core/iovec.c
90366index de178e4..1dabd8b 100644
90367--- a/net/core/iovec.c
90368+++ b/net/core/iovec.c
90369@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
90370 if (m->msg_namelen) {
90371 if (mode == VERIFY_READ) {
90372 void __user *namep;
90373- namep = (void __user __force *) m->msg_name;
90374+ namep = (void __force_user *) m->msg_name;
90375 err = move_addr_to_kernel(namep, m->msg_namelen,
90376 address);
90377 if (err < 0)
90378@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
90379 }
90380
90381 size = m->msg_iovlen * sizeof(struct iovec);
90382- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
90383+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
90384 return -EFAULT;
90385
90386 m->msg_iov = iov;
90387diff --git a/net/core/neighbour.c b/net/core/neighbour.c
90388index ce90b02..8752627 100644
90389--- a/net/core/neighbour.c
90390+++ b/net/core/neighbour.c
90391@@ -2771,7 +2771,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
90392 size_t *lenp, loff_t *ppos)
90393 {
90394 int size, ret;
90395- ctl_table tmp = *ctl;
90396+ ctl_table_no_const tmp = *ctl;
90397
90398 tmp.extra1 = &zero;
90399 tmp.extra2 = &unres_qlen_max;
90400diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
90401index 569d355..79cf2d0 100644
90402--- a/net/core/net-procfs.c
90403+++ b/net/core/net-procfs.c
90404@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
90405 else
90406 seq_printf(seq, "%04x", ntohs(pt->type));
90407
90408+#ifdef CONFIG_GRKERNSEC_HIDESYM
90409+ seq_printf(seq, " %-8s %pf\n",
90410+ pt->dev ? pt->dev->name : "", NULL);
90411+#else
90412 seq_printf(seq, " %-8s %pf\n",
90413 pt->dev ? pt->dev->name : "", pt->func);
90414+#endif
90415 }
90416
90417 return 0;
90418diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
90419index 981fed3..536af34 100644
90420--- a/net/core/net-sysfs.c
90421+++ b/net/core/net-sysfs.c
90422@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
90423 }
90424 EXPORT_SYMBOL(netdev_class_remove_file);
90425
90426-int netdev_kobject_init(void)
90427+int __init netdev_kobject_init(void)
90428 {
90429 kobj_ns_type_register(&net_ns_type_operations);
90430 return class_register(&net_class);
90431diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
90432index f9765203..9feaef8 100644
90433--- a/net/core/net_namespace.c
90434+++ b/net/core/net_namespace.c
90435@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
90436 int error;
90437 LIST_HEAD(net_exit_list);
90438
90439- list_add_tail(&ops->list, list);
90440+ pax_list_add_tail((struct list_head *)&ops->list, list);
90441 if (ops->init || (ops->id && ops->size)) {
90442 for_each_net(net) {
90443 error = ops_init(ops, net);
90444@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
90445
90446 out_undo:
90447 /* If I have an error cleanup all namespaces I initialized */
90448- list_del(&ops->list);
90449+ pax_list_del((struct list_head *)&ops->list);
90450 ops_exit_list(ops, &net_exit_list);
90451 ops_free_list(ops, &net_exit_list);
90452 return error;
90453@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
90454 struct net *net;
90455 LIST_HEAD(net_exit_list);
90456
90457- list_del(&ops->list);
90458+ pax_list_del((struct list_head *)&ops->list);
90459 for_each_net(net)
90460 list_add_tail(&net->exit_list, &net_exit_list);
90461 ops_exit_list(ops, &net_exit_list);
90462@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
90463 mutex_lock(&net_mutex);
90464 error = register_pernet_operations(&pernet_list, ops);
90465 if (!error && (first_device == &pernet_list))
90466- first_device = &ops->list;
90467+ first_device = (struct list_head *)&ops->list;
90468 mutex_unlock(&net_mutex);
90469 return error;
90470 }
90471diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
90472index a08bd2b..c59bd7c 100644
90473--- a/net/core/rtnetlink.c
90474+++ b/net/core/rtnetlink.c
90475@@ -58,7 +58,7 @@ struct rtnl_link {
90476 rtnl_doit_func doit;
90477 rtnl_dumpit_func dumpit;
90478 rtnl_calcit_func calcit;
90479-};
90480+} __no_const;
90481
90482 static DEFINE_MUTEX(rtnl_mutex);
90483
90484@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
90485 if (rtnl_link_ops_get(ops->kind))
90486 return -EEXIST;
90487
90488- if (!ops->dellink)
90489- ops->dellink = unregister_netdevice_queue;
90490+ if (!ops->dellink) {
90491+ pax_open_kernel();
90492+ *(void **)&ops->dellink = unregister_netdevice_queue;
90493+ pax_close_kernel();
90494+ }
90495
90496- list_add_tail(&ops->list, &link_ops);
90497+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
90498 return 0;
90499 }
90500 EXPORT_SYMBOL_GPL(__rtnl_link_register);
90501@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
90502 for_each_net(net) {
90503 __rtnl_kill_links(net, ops);
90504 }
90505- list_del(&ops->list);
90506+ pax_list_del((struct list_head *)&ops->list);
90507 }
90508 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
90509
90510@@ -2374,7 +2377,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
90511 struct nlattr *extfilt;
90512 u32 filter_mask = 0;
90513
90514- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
90515+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
90516 IFLA_EXT_MASK);
90517 if (extfilt)
90518 filter_mask = nla_get_u32(extfilt);
90519diff --git a/net/core/scm.c b/net/core/scm.c
90520index 03795d0..eaf7368 100644
90521--- a/net/core/scm.c
90522+++ b/net/core/scm.c
90523@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
90524 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
90525 {
90526 struct cmsghdr __user *cm
90527- = (__force struct cmsghdr __user *)msg->msg_control;
90528+ = (struct cmsghdr __force_user *)msg->msg_control;
90529 struct cmsghdr cmhdr;
90530 int cmlen = CMSG_LEN(len);
90531 int err;
90532@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
90533 err = -EFAULT;
90534 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
90535 goto out;
90536- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
90537+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
90538 goto out;
90539 cmlen = CMSG_SPACE(len);
90540 if (msg->msg_controllen < cmlen)
90541@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
90542 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
90543 {
90544 struct cmsghdr __user *cm
90545- = (__force struct cmsghdr __user*)msg->msg_control;
90546+ = (struct cmsghdr __force_user *)msg->msg_control;
90547
90548 int fdmax = 0;
90549 int fdnum = scm->fp->count;
90550@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
90551 if (fdnum < fdmax)
90552 fdmax = fdnum;
90553
90554- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
90555+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
90556 i++, cmfptr++)
90557 {
90558 struct socket *sock;
90559diff --git a/net/core/skbuff.c b/net/core/skbuff.c
90560index 1c1738c..4cab7f0 100644
90561--- a/net/core/skbuff.c
90562+++ b/net/core/skbuff.c
90563@@ -3087,13 +3087,15 @@ void __init skb_init(void)
90564 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
90565 sizeof(struct sk_buff),
90566 0,
90567- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
90568+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
90569+ SLAB_NO_SANITIZE,
90570 NULL);
90571 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
90572 (2*sizeof(struct sk_buff)) +
90573 sizeof(atomic_t),
90574 0,
90575- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
90576+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
90577+ SLAB_NO_SANITIZE,
90578 NULL);
90579 }
90580
90581diff --git a/net/core/sock.c b/net/core/sock.c
90582index d6d024c..6ea7ab4 100644
90583--- a/net/core/sock.c
90584+++ b/net/core/sock.c
90585@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
90586 struct sk_buff_head *list = &sk->sk_receive_queue;
90587
90588 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
90589- atomic_inc(&sk->sk_drops);
90590+ atomic_inc_unchecked(&sk->sk_drops);
90591 trace_sock_rcvqueue_full(sk, skb);
90592 return -ENOMEM;
90593 }
90594@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
90595 return err;
90596
90597 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
90598- atomic_inc(&sk->sk_drops);
90599+ atomic_inc_unchecked(&sk->sk_drops);
90600 return -ENOBUFS;
90601 }
90602
90603@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
90604 skb_dst_force(skb);
90605
90606 spin_lock_irqsave(&list->lock, flags);
90607- skb->dropcount = atomic_read(&sk->sk_drops);
90608+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
90609 __skb_queue_tail(list, skb);
90610 spin_unlock_irqrestore(&list->lock, flags);
90611
90612@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
90613 skb->dev = NULL;
90614
90615 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
90616- atomic_inc(&sk->sk_drops);
90617+ atomic_inc_unchecked(&sk->sk_drops);
90618 goto discard_and_relse;
90619 }
90620 if (nested)
90621@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
90622 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
90623 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
90624 bh_unlock_sock(sk);
90625- atomic_inc(&sk->sk_drops);
90626+ atomic_inc_unchecked(&sk->sk_drops);
90627 goto discard_and_relse;
90628 }
90629
90630@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
90631 struct timeval tm;
90632 } v;
90633
90634- int lv = sizeof(int);
90635- int len;
90636+ unsigned int lv = sizeof(int);
90637+ unsigned int len;
90638
90639 if (get_user(len, optlen))
90640 return -EFAULT;
90641- if (len < 0)
90642+ if (len > INT_MAX)
90643 return -EINVAL;
90644
90645 memset(&v, 0, sizeof(v));
90646@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
90647
90648 case SO_PEERNAME:
90649 {
90650- char address[128];
90651+ char address[_K_SS_MAXSIZE];
90652
90653 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
90654 return -ENOTCONN;
90655- if (lv < len)
90656+ if (lv < len || sizeof address < len)
90657 return -EINVAL;
90658 if (copy_to_user(optval, address, len))
90659 return -EFAULT;
90660@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
90661
90662 if (len > lv)
90663 len = lv;
90664- if (copy_to_user(optval, &v, len))
90665+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
90666 return -EFAULT;
90667 lenout:
90668 if (put_user(len, optlen))
90669@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
90670 */
90671 smp_wmb();
90672 atomic_set(&sk->sk_refcnt, 1);
90673- atomic_set(&sk->sk_drops, 0);
90674+ atomic_set_unchecked(&sk->sk_drops, 0);
90675 }
90676 EXPORT_SYMBOL(sock_init_data);
90677
90678diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
90679index a0e9cf6..ef7f9ed 100644
90680--- a/net/core/sock_diag.c
90681+++ b/net/core/sock_diag.c
90682@@ -9,26 +9,33 @@
90683 #include <linux/inet_diag.h>
90684 #include <linux/sock_diag.h>
90685
90686-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
90687+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
90688 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
90689 static DEFINE_MUTEX(sock_diag_table_mutex);
90690
90691 int sock_diag_check_cookie(void *sk, __u32 *cookie)
90692 {
90693+#ifndef CONFIG_GRKERNSEC_HIDESYM
90694 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
90695 cookie[1] != INET_DIAG_NOCOOKIE) &&
90696 ((u32)(unsigned long)sk != cookie[0] ||
90697 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
90698 return -ESTALE;
90699 else
90700+#endif
90701 return 0;
90702 }
90703 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
90704
90705 void sock_diag_save_cookie(void *sk, __u32 *cookie)
90706 {
90707+#ifdef CONFIG_GRKERNSEC_HIDESYM
90708+ cookie[0] = 0;
90709+ cookie[1] = 0;
90710+#else
90711 cookie[0] = (u32)(unsigned long)sk;
90712 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
90713+#endif
90714 }
90715 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
90716
90717@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
90718 mutex_lock(&sock_diag_table_mutex);
90719 if (sock_diag_handlers[hndl->family])
90720 err = -EBUSY;
90721- else
90722+ else {
90723+ pax_open_kernel();
90724 sock_diag_handlers[hndl->family] = hndl;
90725+ pax_close_kernel();
90726+ }
90727 mutex_unlock(&sock_diag_table_mutex);
90728
90729 return err;
90730@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
90731
90732 mutex_lock(&sock_diag_table_mutex);
90733 BUG_ON(sock_diag_handlers[family] != hnld);
90734+ pax_open_kernel();
90735 sock_diag_handlers[family] = NULL;
90736+ pax_close_kernel();
90737 mutex_unlock(&sock_diag_table_mutex);
90738 }
90739 EXPORT_SYMBOL_GPL(sock_diag_unregister);
90740diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
90741index cfdb46a..cef55e1 100644
90742--- a/net/core/sysctl_net_core.c
90743+++ b/net/core/sysctl_net_core.c
90744@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
90745 {
90746 unsigned int orig_size, size;
90747 int ret, i;
90748- ctl_table tmp = {
90749+ ctl_table_no_const tmp = {
90750 .data = &size,
90751 .maxlen = sizeof(size),
90752 .mode = table->mode
90753@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
90754
90755 static __net_init int sysctl_core_net_init(struct net *net)
90756 {
90757- struct ctl_table *tbl;
90758+ ctl_table_no_const *tbl = NULL;
90759
90760 net->core.sysctl_somaxconn = SOMAXCONN;
90761
90762- tbl = netns_core_table;
90763 if (!net_eq(net, &init_net)) {
90764- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
90765+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
90766 if (tbl == NULL)
90767 goto err_dup;
90768
90769@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
90770 if (net->user_ns != &init_user_ns) {
90771 tbl[0].procname = NULL;
90772 }
90773- }
90774-
90775- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
90776+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
90777+ } else
90778+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
90779 if (net->core.sysctl_hdr == NULL)
90780 goto err_reg;
90781
90782 return 0;
90783
90784 err_reg:
90785- if (tbl != netns_core_table)
90786- kfree(tbl);
90787+ kfree(tbl);
90788 err_dup:
90789 return -ENOMEM;
90790 }
90791@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
90792 kfree(tbl);
90793 }
90794
90795-static __net_initdata struct pernet_operations sysctl_core_ops = {
90796+static __net_initconst struct pernet_operations sysctl_core_ops = {
90797 .init = sysctl_core_net_init,
90798 .exit = sysctl_core_net_exit,
90799 };
90800diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
90801index c21f200..bc4565b 100644
90802--- a/net/decnet/af_decnet.c
90803+++ b/net/decnet/af_decnet.c
90804@@ -465,6 +465,7 @@ static struct proto dn_proto = {
90805 .sysctl_rmem = sysctl_decnet_rmem,
90806 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
90807 .obj_size = sizeof(struct dn_sock),
90808+ .slab_flags = SLAB_USERCOPY,
90809 };
90810
90811 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
90812diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
90813index a55eecc..dd8428c 100644
90814--- a/net/decnet/sysctl_net_decnet.c
90815+++ b/net/decnet/sysctl_net_decnet.c
90816@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
90817
90818 if (len > *lenp) len = *lenp;
90819
90820- if (copy_to_user(buffer, addr, len))
90821+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
90822 return -EFAULT;
90823
90824 *lenp = len;
90825@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
90826
90827 if (len > *lenp) len = *lenp;
90828
90829- if (copy_to_user(buffer, devname, len))
90830+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
90831 return -EFAULT;
90832
90833 *lenp = len;
90834diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
90835index d01be2a..8976537 100644
90836--- a/net/ipv4/af_inet.c
90837+++ b/net/ipv4/af_inet.c
90838@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
90839
90840 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
90841
90842- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
90843- if (!sysctl_local_reserved_ports)
90844- goto out;
90845-
90846 rc = proto_register(&tcp_prot, 1);
90847 if (rc)
90848- goto out_free_reserved_ports;
90849+ goto out;
90850
90851 rc = proto_register(&udp_prot, 1);
90852 if (rc)
90853@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
90854 proto_unregister(&udp_prot);
90855 out_unregister_tcp_proto:
90856 proto_unregister(&tcp_prot);
90857-out_free_reserved_ports:
90858- kfree(sysctl_local_reserved_ports);
90859 goto out;
90860 }
90861
90862diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
90863index 2e7f194..0fa4d6d 100644
90864--- a/net/ipv4/ah4.c
90865+++ b/net/ipv4/ah4.c
90866@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
90867 return;
90868
90869 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
90870- atomic_inc(&flow_cache_genid);
90871+ atomic_inc_unchecked(&flow_cache_genid);
90872 rt_genid_bump(net);
90873
90874 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
90875diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
90876index dfc39d4..0d4fa52 100644
90877--- a/net/ipv4/devinet.c
90878+++ b/net/ipv4/devinet.c
90879@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
90880 ci = nla_data(tb[IFA_CACHEINFO]);
90881 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
90882 err = -EINVAL;
90883- goto errout;
90884+ goto errout_free;
90885 }
90886 *pvalid_lft = ci->ifa_valid;
90887 *pprefered_lft = ci->ifa_prefered;
90888@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
90889
90890 return ifa;
90891
90892+errout_free:
90893+ inet_free_ifa(ifa);
90894 errout:
90895 return ERR_PTR(err);
90896 }
90897@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
90898 idx = 0;
90899 head = &net->dev_index_head[h];
90900 rcu_read_lock();
90901- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
90902+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
90903 net->dev_base_seq;
90904 hlist_for_each_entry_rcu(dev, head, index_hlist) {
90905 if (idx < s_idx)
90906@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
90907 idx = 0;
90908 head = &net->dev_index_head[h];
90909 rcu_read_lock();
90910- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
90911+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
90912 net->dev_base_seq;
90913 hlist_for_each_entry_rcu(dev, head, index_hlist) {
90914 if (idx < s_idx)
90915@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
90916 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
90917 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
90918
90919-static struct devinet_sysctl_table {
90920+static const struct devinet_sysctl_table {
90921 struct ctl_table_header *sysctl_header;
90922 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
90923 } devinet_sysctl = {
90924@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
90925 int err;
90926 struct ipv4_devconf *all, *dflt;
90927 #ifdef CONFIG_SYSCTL
90928- struct ctl_table *tbl = ctl_forward_entry;
90929+ ctl_table_no_const *tbl = NULL;
90930 struct ctl_table_header *forw_hdr;
90931 #endif
90932
90933@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
90934 goto err_alloc_dflt;
90935
90936 #ifdef CONFIG_SYSCTL
90937- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
90938+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
90939 if (tbl == NULL)
90940 goto err_alloc_ctl;
90941
90942@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
90943 goto err_reg_dflt;
90944
90945 err = -ENOMEM;
90946- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
90947+ if (!net_eq(net, &init_net))
90948+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
90949+ else
90950+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
90951 if (forw_hdr == NULL)
90952 goto err_reg_ctl;
90953 net->ipv4.forw_hdr = forw_hdr;
90954@@ -2237,8 +2242,7 @@ err_reg_ctl:
90955 err_reg_dflt:
90956 __devinet_sysctl_unregister(all);
90957 err_reg_all:
90958- if (tbl != ctl_forward_entry)
90959- kfree(tbl);
90960+ kfree(tbl);
90961 err_alloc_ctl:
90962 #endif
90963 if (dflt != &ipv4_devconf_dflt)
90964diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
90965index 4cfe34d..d2fac8a 100644
90966--- a/net/ipv4/esp4.c
90967+++ b/net/ipv4/esp4.c
90968@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
90969 }
90970
90971 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
90972- net_adj) & ~(align - 1)) + (net_adj - 2);
90973+ net_adj) & ~(align - 1)) + net_adj - 2;
90974 }
90975
90976 static void esp4_err(struct sk_buff *skb, u32 info)
90977@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
90978 return;
90979
90980 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
90981- atomic_inc(&flow_cache_genid);
90982+ atomic_inc_unchecked(&flow_cache_genid);
90983 rt_genid_bump(net);
90984
90985 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
90986diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
90987index c7629a2..b62d139 100644
90988--- a/net/ipv4/fib_frontend.c
90989+++ b/net/ipv4/fib_frontend.c
90990@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
90991 #ifdef CONFIG_IP_ROUTE_MULTIPATH
90992 fib_sync_up(dev);
90993 #endif
90994- atomic_inc(&net->ipv4.dev_addr_genid);
90995+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
90996 rt_cache_flush(dev_net(dev));
90997 break;
90998 case NETDEV_DOWN:
90999 fib_del_ifaddr(ifa, NULL);
91000- atomic_inc(&net->ipv4.dev_addr_genid);
91001+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
91002 if (ifa->ifa_dev->ifa_list == NULL) {
91003 /* Last address was deleted from this interface.
91004 * Disable IP.
91005@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
91006 #ifdef CONFIG_IP_ROUTE_MULTIPATH
91007 fib_sync_up(dev);
91008 #endif
91009- atomic_inc(&net->ipv4.dev_addr_genid);
91010+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
91011 rt_cache_flush(net);
91012 break;
91013 case NETDEV_DOWN:
91014diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
91015index 8f6cb7a..34507f9 100644
91016--- a/net/ipv4/fib_semantics.c
91017+++ b/net/ipv4/fib_semantics.c
91018@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
91019 nh->nh_saddr = inet_select_addr(nh->nh_dev,
91020 nh->nh_gw,
91021 nh->nh_parent->fib_scope);
91022- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
91023+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
91024
91025 return nh->nh_saddr;
91026 }
91027diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
91028index 49616fe..6e8a13d 100644
91029--- a/net/ipv4/fib_trie.c
91030+++ b/net/ipv4/fib_trie.c
91031@@ -71,7 +71,6 @@
91032 #include <linux/init.h>
91033 #include <linux/list.h>
91034 #include <linux/slab.h>
91035-#include <linux/prefetch.h>
91036 #include <linux/export.h>
91037 #include <net/net_namespace.h>
91038 #include <net/ip.h>
91039@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
91040 if (!c)
91041 continue;
91042
91043- if (IS_LEAF(c)) {
91044- prefetch(rcu_dereference_rtnl(p->child[idx]));
91045+ if (IS_LEAF(c))
91046 return (struct leaf *) c;
91047- }
91048
91049 /* Rescan start scanning in new node */
91050 p = (struct tnode *) c;
91051diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
91052index 6acb541..9ea617d 100644
91053--- a/net/ipv4/inet_connection_sock.c
91054+++ b/net/ipv4/inet_connection_sock.c
91055@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
91056 .range = { 32768, 61000 },
91057 };
91058
91059-unsigned long *sysctl_local_reserved_ports;
91060+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
91061 EXPORT_SYMBOL(sysctl_local_reserved_ports);
91062
91063 void inet_get_local_port_range(int *low, int *high)
91064diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
91065index 6af375a..c493c74 100644
91066--- a/net/ipv4/inet_hashtables.c
91067+++ b/net/ipv4/inet_hashtables.c
91068@@ -18,12 +18,15 @@
91069 #include <linux/sched.h>
91070 #include <linux/slab.h>
91071 #include <linux/wait.h>
91072+#include <linux/security.h>
91073
91074 #include <net/inet_connection_sock.h>
91075 #include <net/inet_hashtables.h>
91076 #include <net/secure_seq.h>
91077 #include <net/ip.h>
91078
91079+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
91080+
91081 /*
91082 * Allocate and initialize a new local port bind bucket.
91083 * The bindhash mutex for snum's hash chain must be held here.
91084@@ -554,6 +557,8 @@ ok:
91085 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
91086 spin_unlock(&head->lock);
91087
91088+ gr_update_task_in_ip_table(current, inet_sk(sk));
91089+
91090 if (tw) {
91091 inet_twsk_deschedule(tw, death_row);
91092 while (twrefcnt) {
91093diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
91094index 000e3d2..5472da3 100644
91095--- a/net/ipv4/inetpeer.c
91096+++ b/net/ipv4/inetpeer.c
91097@@ -503,8 +503,8 @@ relookup:
91098 if (p) {
91099 p->daddr = *daddr;
91100 atomic_set(&p->refcnt, 1);
91101- atomic_set(&p->rid, 0);
91102- atomic_set(&p->ip_id_count,
91103+ atomic_set_unchecked(&p->rid, 0);
91104+ atomic_set_unchecked(&p->ip_id_count,
91105 (daddr->family == AF_INET) ?
91106 secure_ip_id(daddr->addr.a4) :
91107 secure_ipv6_id(daddr->addr.a6));
91108diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
91109index b66910a..cfe416e 100644
91110--- a/net/ipv4/ip_fragment.c
91111+++ b/net/ipv4/ip_fragment.c
91112@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
91113 return 0;
91114
91115 start = qp->rid;
91116- end = atomic_inc_return(&peer->rid);
91117+ end = atomic_inc_return_unchecked(&peer->rid);
91118 qp->rid = end;
91119
91120 rc = qp->q.fragments && (end - start) > max;
91121@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
91122
91123 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
91124 {
91125- struct ctl_table *table;
91126+ ctl_table_no_const *table = NULL;
91127 struct ctl_table_header *hdr;
91128
91129- table = ip4_frags_ns_ctl_table;
91130 if (!net_eq(net, &init_net)) {
91131- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
91132+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
91133 if (table == NULL)
91134 goto err_alloc;
91135
91136@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
91137 /* Don't export sysctls to unprivileged users */
91138 if (net->user_ns != &init_user_ns)
91139 table[0].procname = NULL;
91140- }
91141+ hdr = register_net_sysctl(net, "net/ipv4", table);
91142+ } else
91143+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
91144
91145- hdr = register_net_sysctl(net, "net/ipv4", table);
91146 if (hdr == NULL)
91147 goto err_reg;
91148
91149@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
91150 return 0;
91151
91152 err_reg:
91153- if (!net_eq(net, &init_net))
91154- kfree(table);
91155+ kfree(table);
91156 err_alloc:
91157 return -ENOMEM;
91158 }
91159diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
91160index 855004f..9644112 100644
91161--- a/net/ipv4/ip_gre.c
91162+++ b/net/ipv4/ip_gre.c
91163@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
91164 module_param(log_ecn_error, bool, 0644);
91165 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
91166
91167-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
91168+static struct rtnl_link_ops ipgre_link_ops;
91169 static int ipgre_tunnel_init(struct net_device *dev);
91170
91171 static int ipgre_net_id __read_mostly;
91172@@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
91173 if (daddr)
91174 memcpy(&iph->daddr, daddr, 4);
91175 if (iph->daddr)
91176- return t->hlen;
91177+ return t->hlen + sizeof(*iph);
91178
91179 return -(t->hlen + sizeof(*iph));
91180 }
91181@@ -919,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
91182 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
91183 };
91184
91185-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
91186+static struct rtnl_link_ops ipgre_link_ops = {
91187 .kind = "gre",
91188 .maxtype = IFLA_GRE_MAX,
91189 .policy = ipgre_policy,
91190@@ -933,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
91191 .fill_info = ipgre_fill_info,
91192 };
91193
91194-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
91195+static struct rtnl_link_ops ipgre_tap_ops = {
91196 .kind = "gretap",
91197 .maxtype = IFLA_GRE_MAX,
91198 .policy = ipgre_policy,
91199diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
91200index d9c4f11..02b82dbc 100644
91201--- a/net/ipv4/ip_sockglue.c
91202+++ b/net/ipv4/ip_sockglue.c
91203@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
91204 len = min_t(unsigned int, len, opt->optlen);
91205 if (put_user(len, optlen))
91206 return -EFAULT;
91207- if (copy_to_user(optval, opt->__data, len))
91208+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
91209+ copy_to_user(optval, opt->__data, len))
91210 return -EFAULT;
91211 return 0;
91212 }
91213@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
91214 if (sk->sk_type != SOCK_STREAM)
91215 return -ENOPROTOOPT;
91216
91217- msg.msg_control = optval;
91218+ msg.msg_control = (void __force_kernel *)optval;
91219 msg.msg_controllen = len;
91220 msg.msg_flags = flags;
91221
91222diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
91223index 17cc0ff..63856c4 100644
91224--- a/net/ipv4/ip_vti.c
91225+++ b/net/ipv4/ip_vti.c
91226@@ -47,7 +47,7 @@
91227 #define HASH_SIZE 16
91228 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
91229
91230-static struct rtnl_link_ops vti_link_ops __read_mostly;
91231+static struct rtnl_link_ops vti_link_ops;
91232
91233 static int vti_net_id __read_mostly;
91234 struct vti_net {
91235@@ -840,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
91236 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
91237 };
91238
91239-static struct rtnl_link_ops vti_link_ops __read_mostly = {
91240+static struct rtnl_link_ops vti_link_ops = {
91241 .kind = "vti",
91242 .maxtype = IFLA_VTI_MAX,
91243 .policy = vti_policy,
91244diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
91245index 59cb8c7..a72160c 100644
91246--- a/net/ipv4/ipcomp.c
91247+++ b/net/ipv4/ipcomp.c
91248@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
91249 return;
91250
91251 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
91252- atomic_inc(&flow_cache_genid);
91253+ atomic_inc_unchecked(&flow_cache_genid);
91254 rt_genid_bump(net);
91255
91256 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
91257diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
91258index efa1138..20dbba0 100644
91259--- a/net/ipv4/ipconfig.c
91260+++ b/net/ipv4/ipconfig.c
91261@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
91262
91263 mm_segment_t oldfs = get_fs();
91264 set_fs(get_ds());
91265- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
91266+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
91267 set_fs(oldfs);
91268 return res;
91269 }
91270@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
91271
91272 mm_segment_t oldfs = get_fs();
91273 set_fs(get_ds());
91274- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
91275+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
91276 set_fs(oldfs);
91277 return res;
91278 }
91279@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
91280
91281 mm_segment_t oldfs = get_fs();
91282 set_fs(get_ds());
91283- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
91284+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
91285 set_fs(oldfs);
91286 return res;
91287 }
91288diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
91289index 7cfc456..e726868 100644
91290--- a/net/ipv4/ipip.c
91291+++ b/net/ipv4/ipip.c
91292@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
91293 static int ipip_net_id __read_mostly;
91294
91295 static int ipip_tunnel_init(struct net_device *dev);
91296-static struct rtnl_link_ops ipip_link_ops __read_mostly;
91297+static struct rtnl_link_ops ipip_link_ops;
91298
91299 static int ipip_err(struct sk_buff *skb, u32 info)
91300 {
91301@@ -406,7 +406,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
91302 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
91303 };
91304
91305-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
91306+static struct rtnl_link_ops ipip_link_ops = {
91307 .kind = "ipip",
91308 .maxtype = IFLA_IPTUN_MAX,
91309 .policy = ipip_policy,
91310diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
91311index 85a4f21..1beb1f5 100644
91312--- a/net/ipv4/netfilter/arp_tables.c
91313+++ b/net/ipv4/netfilter/arp_tables.c
91314@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
91315 #endif
91316
91317 static int get_info(struct net *net, void __user *user,
91318- const int *len, int compat)
91319+ int len, int compat)
91320 {
91321 char name[XT_TABLE_MAXNAMELEN];
91322 struct xt_table *t;
91323 int ret;
91324
91325- if (*len != sizeof(struct arpt_getinfo)) {
91326- duprintf("length %u != %Zu\n", *len,
91327+ if (len != sizeof(struct arpt_getinfo)) {
91328+ duprintf("length %u != %Zu\n", len,
91329 sizeof(struct arpt_getinfo));
91330 return -EINVAL;
91331 }
91332@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
91333 info.size = private->size;
91334 strcpy(info.name, name);
91335
91336- if (copy_to_user(user, &info, *len) != 0)
91337+ if (copy_to_user(user, &info, len) != 0)
91338 ret = -EFAULT;
91339 else
91340 ret = 0;
91341@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
91342
91343 switch (cmd) {
91344 case ARPT_SO_GET_INFO:
91345- ret = get_info(sock_net(sk), user, len, 1);
91346+ ret = get_info(sock_net(sk), user, *len, 1);
91347 break;
91348 case ARPT_SO_GET_ENTRIES:
91349 ret = compat_get_entries(sock_net(sk), user, len);
91350@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
91351
91352 switch (cmd) {
91353 case ARPT_SO_GET_INFO:
91354- ret = get_info(sock_net(sk), user, len, 0);
91355+ ret = get_info(sock_net(sk), user, *len, 0);
91356 break;
91357
91358 case ARPT_SO_GET_ENTRIES:
91359diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
91360index d23118d..6ad7277 100644
91361--- a/net/ipv4/netfilter/ip_tables.c
91362+++ b/net/ipv4/netfilter/ip_tables.c
91363@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
91364 #endif
91365
91366 static int get_info(struct net *net, void __user *user,
91367- const int *len, int compat)
91368+ int len, int compat)
91369 {
91370 char name[XT_TABLE_MAXNAMELEN];
91371 struct xt_table *t;
91372 int ret;
91373
91374- if (*len != sizeof(struct ipt_getinfo)) {
91375- duprintf("length %u != %zu\n", *len,
91376+ if (len != sizeof(struct ipt_getinfo)) {
91377+ duprintf("length %u != %zu\n", len,
91378 sizeof(struct ipt_getinfo));
91379 return -EINVAL;
91380 }
91381@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
91382 info.size = private->size;
91383 strcpy(info.name, name);
91384
91385- if (copy_to_user(user, &info, *len) != 0)
91386+ if (copy_to_user(user, &info, len) != 0)
91387 ret = -EFAULT;
91388 else
91389 ret = 0;
91390@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91391
91392 switch (cmd) {
91393 case IPT_SO_GET_INFO:
91394- ret = get_info(sock_net(sk), user, len, 1);
91395+ ret = get_info(sock_net(sk), user, *len, 1);
91396 break;
91397 case IPT_SO_GET_ENTRIES:
91398 ret = compat_get_entries(sock_net(sk), user, len);
91399@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91400
91401 switch (cmd) {
91402 case IPT_SO_GET_INFO:
91403- ret = get_info(sock_net(sk), user, len, 0);
91404+ ret = get_info(sock_net(sk), user, *len, 0);
91405 break;
91406
91407 case IPT_SO_GET_ENTRIES:
91408diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
91409index 7d93d62..cbbf2a3 100644
91410--- a/net/ipv4/ping.c
91411+++ b/net/ipv4/ping.c
91412@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
91413 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
91414 0, sock_i_ino(sp),
91415 atomic_read(&sp->sk_refcnt), sp,
91416- atomic_read(&sp->sk_drops), len);
91417+ atomic_read_unchecked(&sp->sk_drops), len);
91418 }
91419
91420 static int ping_seq_show(struct seq_file *seq, void *v)
91421diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
91422index dd44e0a..06dcca4 100644
91423--- a/net/ipv4/raw.c
91424+++ b/net/ipv4/raw.c
91425@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
91426 int raw_rcv(struct sock *sk, struct sk_buff *skb)
91427 {
91428 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
91429- atomic_inc(&sk->sk_drops);
91430+ atomic_inc_unchecked(&sk->sk_drops);
91431 kfree_skb(skb);
91432 return NET_RX_DROP;
91433 }
91434@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
91435
91436 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
91437 {
91438+ struct icmp_filter filter;
91439+
91440 if (optlen > sizeof(struct icmp_filter))
91441 optlen = sizeof(struct icmp_filter);
91442- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
91443+ if (copy_from_user(&filter, optval, optlen))
91444 return -EFAULT;
91445+ raw_sk(sk)->filter = filter;
91446 return 0;
91447 }
91448
91449 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
91450 {
91451 int len, ret = -EFAULT;
91452+ struct icmp_filter filter;
91453
91454 if (get_user(len, optlen))
91455 goto out;
91456@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
91457 if (len > sizeof(struct icmp_filter))
91458 len = sizeof(struct icmp_filter);
91459 ret = -EFAULT;
91460- if (put_user(len, optlen) ||
91461- copy_to_user(optval, &raw_sk(sk)->filter, len))
91462+ filter = raw_sk(sk)->filter;
91463+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
91464 goto out;
91465 ret = 0;
91466 out: return ret;
91467@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
91468 0, 0L, 0,
91469 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
91470 0, sock_i_ino(sp),
91471- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
91472+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
91473 }
91474
91475 static int raw_seq_show(struct seq_file *seq, void *v)
91476diff --git a/net/ipv4/route.c b/net/ipv4/route.c
91477index d35bbf0..faa3ab8 100644
91478--- a/net/ipv4/route.c
91479+++ b/net/ipv4/route.c
91480@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
91481 .maxlen = sizeof(int),
91482 .mode = 0200,
91483 .proc_handler = ipv4_sysctl_rtcache_flush,
91484+ .extra1 = &init_net,
91485 },
91486 { },
91487 };
91488
91489 static __net_init int sysctl_route_net_init(struct net *net)
91490 {
91491- struct ctl_table *tbl;
91492+ ctl_table_no_const *tbl = NULL;
91493
91494- tbl = ipv4_route_flush_table;
91495 if (!net_eq(net, &init_net)) {
91496- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
91497+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
91498 if (tbl == NULL)
91499 goto err_dup;
91500
91501 /* Don't export sysctls to unprivileged users */
91502 if (net->user_ns != &init_user_ns)
91503 tbl[0].procname = NULL;
91504- }
91505- tbl[0].extra1 = net;
91506+ tbl[0].extra1 = net;
91507+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
91508+ } else
91509+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
91510
91511- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
91512 if (net->ipv4.route_hdr == NULL)
91513 goto err_reg;
91514 return 0;
91515
91516 err_reg:
91517- if (tbl != ipv4_route_flush_table)
91518- kfree(tbl);
91519+ kfree(tbl);
91520 err_dup:
91521 return -ENOMEM;
91522 }
91523@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
91524
91525 static __net_init int rt_genid_init(struct net *net)
91526 {
91527- atomic_set(&net->rt_genid, 0);
91528+ atomic_set_unchecked(&net->rt_genid, 0);
91529 get_random_bytes(&net->ipv4.dev_addr_genid,
91530 sizeof(net->ipv4.dev_addr_genid));
91531 return 0;
91532diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
91533index 3f25e75..3ae0f4d 100644
91534--- a/net/ipv4/sysctl_net_ipv4.c
91535+++ b/net/ipv4/sysctl_net_ipv4.c
91536@@ -57,7 +57,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
91537 {
91538 int ret;
91539 int range[2];
91540- ctl_table tmp = {
91541+ ctl_table_no_const tmp = {
91542 .data = &range,
91543 .maxlen = sizeof(range),
91544 .mode = table->mode,
91545@@ -110,7 +110,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
91546 int ret;
91547 gid_t urange[2];
91548 kgid_t low, high;
91549- ctl_table tmp = {
91550+ ctl_table_no_const tmp = {
91551 .data = &urange,
91552 .maxlen = sizeof(urange),
91553 .mode = table->mode,
91554@@ -141,7 +141,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
91555 void __user *buffer, size_t *lenp, loff_t *ppos)
91556 {
91557 char val[TCP_CA_NAME_MAX];
91558- ctl_table tbl = {
91559+ ctl_table_no_const tbl = {
91560 .data = val,
91561 .maxlen = TCP_CA_NAME_MAX,
91562 };
91563@@ -160,7 +160,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
91564 void __user *buffer, size_t *lenp,
91565 loff_t *ppos)
91566 {
91567- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
91568+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
91569 int ret;
91570
91571 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
91572@@ -177,7 +177,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
91573 void __user *buffer, size_t *lenp,
91574 loff_t *ppos)
91575 {
91576- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
91577+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
91578 int ret;
91579
91580 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
91581@@ -203,15 +203,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
91582 struct mem_cgroup *memcg;
91583 #endif
91584
91585- ctl_table tmp = {
91586+ ctl_table_no_const tmp = {
91587 .data = &vec,
91588 .maxlen = sizeof(vec),
91589 .mode = ctl->mode,
91590 };
91591
91592 if (!write) {
91593- ctl->data = &net->ipv4.sysctl_tcp_mem;
91594- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
91595+ ctl_table_no_const tcp_mem = *ctl;
91596+
91597+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
91598+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
91599 }
91600
91601 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
91602@@ -238,7 +240,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
91603 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
91604 size_t *lenp, loff_t *ppos)
91605 {
91606- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
91607+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
91608 struct tcp_fastopen_context *ctxt;
91609 int ret;
91610 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
91611@@ -481,7 +483,7 @@ static struct ctl_table ipv4_table[] = {
91612 },
91613 {
91614 .procname = "ip_local_reserved_ports",
91615- .data = NULL, /* initialized in sysctl_ipv4_init */
91616+ .data = sysctl_local_reserved_ports,
91617 .maxlen = 65536,
91618 .mode = 0644,
91619 .proc_handler = proc_do_large_bitmap,
91620@@ -846,11 +848,10 @@ static struct ctl_table ipv4_net_table[] = {
91621
91622 static __net_init int ipv4_sysctl_init_net(struct net *net)
91623 {
91624- struct ctl_table *table;
91625+ ctl_table_no_const *table = NULL;
91626
91627- table = ipv4_net_table;
91628 if (!net_eq(net, &init_net)) {
91629- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
91630+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
91631 if (table == NULL)
91632 goto err_alloc;
91633
91634@@ -885,15 +886,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
91635
91636 tcp_init_mem(net);
91637
91638- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
91639+ if (!net_eq(net, &init_net))
91640+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
91641+ else
91642+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
91643 if (net->ipv4.ipv4_hdr == NULL)
91644 goto err_reg;
91645
91646 return 0;
91647
91648 err_reg:
91649- if (!net_eq(net, &init_net))
91650- kfree(table);
91651+ kfree(table);
91652 err_alloc:
91653 return -ENOMEM;
91654 }
91655@@ -915,16 +918,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
91656 static __init int sysctl_ipv4_init(void)
91657 {
91658 struct ctl_table_header *hdr;
91659- struct ctl_table *i;
91660-
91661- for (i = ipv4_table; i->procname; i++) {
91662- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
91663- i->data = sysctl_local_reserved_ports;
91664- break;
91665- }
91666- }
91667- if (!i->procname)
91668- return -EINVAL;
91669
91670 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
91671 if (hdr == NULL)
91672diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
91673index 9c62257..651cc27 100644
91674--- a/net/ipv4/tcp_input.c
91675+++ b/net/ipv4/tcp_input.c
91676@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
91677 * simplifies code)
91678 */
91679 static void
91680-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
91681+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
91682 struct sk_buff *head, struct sk_buff *tail,
91683 u32 start, u32 end)
91684 {
91685@@ -5522,6 +5522,7 @@ discard:
91686 tcp_paws_reject(&tp->rx_opt, 0))
91687 goto discard_and_undo;
91688
91689+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
91690 if (th->syn) {
91691 /* We see SYN without ACK. It is attempt of
91692 * simultaneous connect with crossed SYNs.
91693@@ -5572,6 +5573,7 @@ discard:
91694 goto discard;
91695 #endif
91696 }
91697+#endif
91698 /* "fifth, if neither of the SYN or RST bits is set then
91699 * drop the segment and return."
91700 */
91701@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
91702 goto discard;
91703
91704 if (th->syn) {
91705- if (th->fin)
91706+ if (th->fin || th->urg || th->psh)
91707 goto discard;
91708 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
91709 return 1;
91710diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
91711index 7999fc5..c812f42 100644
91712--- a/net/ipv4/tcp_ipv4.c
91713+++ b/net/ipv4/tcp_ipv4.c
91714@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
91715 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91716
91717
91718+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91719+extern int grsec_enable_blackhole;
91720+#endif
91721+
91722 #ifdef CONFIG_TCP_MD5SIG
91723 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
91724 __be32 daddr, __be32 saddr, const struct tcphdr *th);
91725@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
91726 return 0;
91727
91728 reset:
91729+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91730+ if (!grsec_enable_blackhole)
91731+#endif
91732 tcp_v4_send_reset(rsk, skb);
91733 discard:
91734 kfree_skb(skb);
91735@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
91736 TCP_SKB_CB(skb)->sacked = 0;
91737
91738 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
91739- if (!sk)
91740+ if (!sk) {
91741+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91742+ ret = 1;
91743+#endif
91744 goto no_tcp_socket;
91745-
91746+ }
91747 process:
91748- if (sk->sk_state == TCP_TIME_WAIT)
91749+ if (sk->sk_state == TCP_TIME_WAIT) {
91750+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91751+ ret = 2;
91752+#endif
91753 goto do_time_wait;
91754+ }
91755
91756 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
91757 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
91758@@ -2058,6 +2072,10 @@ csum_error:
91759 bad_packet:
91760 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
91761 } else {
91762+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91763+ if (!grsec_enable_blackhole || (ret == 1 &&
91764+ (skb->dev->flags & IFF_LOOPBACK)))
91765+#endif
91766 tcp_v4_send_reset(NULL, skb);
91767 }
91768
91769diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
91770index 0f01788..d52a859 100644
91771--- a/net/ipv4/tcp_minisocks.c
91772+++ b/net/ipv4/tcp_minisocks.c
91773@@ -27,6 +27,10 @@
91774 #include <net/inet_common.h>
91775 #include <net/xfrm.h>
91776
91777+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91778+extern int grsec_enable_blackhole;
91779+#endif
91780+
91781 int sysctl_tcp_syncookies __read_mostly = 1;
91782 EXPORT_SYMBOL(sysctl_tcp_syncookies);
91783
91784@@ -717,7 +721,10 @@ embryonic_reset:
91785 * avoid becoming vulnerable to outside attack aiming at
91786 * resetting legit local connections.
91787 */
91788- req->rsk_ops->send_reset(sk, skb);
91789+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91790+ if (!grsec_enable_blackhole)
91791+#endif
91792+ req->rsk_ops->send_reset(sk, skb);
91793 } else if (fastopen) { /* received a valid RST pkt */
91794 reqsk_fastopen_remove(sk, req, true);
91795 tcp_reset(sk);
91796diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
91797index d4943f6..e7a74a5 100644
91798--- a/net/ipv4/tcp_probe.c
91799+++ b/net/ipv4/tcp_probe.c
91800@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
91801 if (cnt + width >= len)
91802 break;
91803
91804- if (copy_to_user(buf + cnt, tbuf, width))
91805+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
91806 return -EFAULT;
91807 cnt += width;
91808 }
91809diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
91810index 4b85e6f..22f9ac9 100644
91811--- a/net/ipv4/tcp_timer.c
91812+++ b/net/ipv4/tcp_timer.c
91813@@ -22,6 +22,10 @@
91814 #include <linux/gfp.h>
91815 #include <net/tcp.h>
91816
91817+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91818+extern int grsec_lastack_retries;
91819+#endif
91820+
91821 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
91822 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
91823 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
91824@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
91825 }
91826 }
91827
91828+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91829+ if ((sk->sk_state == TCP_LAST_ACK) &&
91830+ (grsec_lastack_retries > 0) &&
91831+ (grsec_lastack_retries < retry_until))
91832+ retry_until = grsec_lastack_retries;
91833+#endif
91834+
91835 if (retransmits_timed_out(sk, retry_until,
91836 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
91837 /* Has it gone just too far? */
91838diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
91839index 93b731d..5a2dd92 100644
91840--- a/net/ipv4/udp.c
91841+++ b/net/ipv4/udp.c
91842@@ -87,6 +87,7 @@
91843 #include <linux/types.h>
91844 #include <linux/fcntl.h>
91845 #include <linux/module.h>
91846+#include <linux/security.h>
91847 #include <linux/socket.h>
91848 #include <linux/sockios.h>
91849 #include <linux/igmp.h>
91850@@ -111,6 +112,10 @@
91851 #include <trace/events/skb.h>
91852 #include "udp_impl.h"
91853
91854+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91855+extern int grsec_enable_blackhole;
91856+#endif
91857+
91858 struct udp_table udp_table __read_mostly;
91859 EXPORT_SYMBOL(udp_table);
91860
91861@@ -594,6 +599,9 @@ found:
91862 return s;
91863 }
91864
91865+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
91866+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
91867+
91868 /*
91869 * This routine is called by the ICMP module when it gets some
91870 * sort of error condition. If err < 0 then the socket should
91871@@ -890,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
91872 dport = usin->sin_port;
91873 if (dport == 0)
91874 return -EINVAL;
91875+
91876+ err = gr_search_udp_sendmsg(sk, usin);
91877+ if (err)
91878+ return err;
91879 } else {
91880 if (sk->sk_state != TCP_ESTABLISHED)
91881 return -EDESTADDRREQ;
91882+
91883+ err = gr_search_udp_sendmsg(sk, NULL);
91884+ if (err)
91885+ return err;
91886+
91887 daddr = inet->inet_daddr;
91888 dport = inet->inet_dport;
91889 /* Open fast path for connected socket.
91890@@ -1136,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
91891 IS_UDPLITE(sk));
91892 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
91893 IS_UDPLITE(sk));
91894- atomic_inc(&sk->sk_drops);
91895+ atomic_inc_unchecked(&sk->sk_drops);
91896 __skb_unlink(skb, rcvq);
91897 __skb_queue_tail(&list_kill, skb);
91898 }
91899@@ -1222,6 +1239,10 @@ try_again:
91900 if (!skb)
91901 goto out;
91902
91903+ err = gr_search_udp_recvmsg(sk, skb);
91904+ if (err)
91905+ goto out_free;
91906+
91907 ulen = skb->len - sizeof(struct udphdr);
91908 copied = len;
91909 if (copied > ulen)
91910@@ -1255,7 +1276,7 @@ try_again:
91911 if (unlikely(err)) {
91912 trace_kfree_skb(skb, udp_recvmsg);
91913 if (!peeked) {
91914- atomic_inc(&sk->sk_drops);
91915+ atomic_inc_unchecked(&sk->sk_drops);
91916 UDP_INC_STATS_USER(sock_net(sk),
91917 UDP_MIB_INERRORS, is_udplite);
91918 }
91919@@ -1542,7 +1563,7 @@ csum_error:
91920 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
91921 drop:
91922 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
91923- atomic_inc(&sk->sk_drops);
91924+ atomic_inc_unchecked(&sk->sk_drops);
91925 kfree_skb(skb);
91926 return -1;
91927 }
91928@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
91929 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
91930
91931 if (!skb1) {
91932- atomic_inc(&sk->sk_drops);
91933+ atomic_inc_unchecked(&sk->sk_drops);
91934 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
91935 IS_UDPLITE(sk));
91936 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
91937@@ -1730,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
91938 goto csum_error;
91939
91940 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
91941+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91942+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
91943+#endif
91944 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
91945
91946 /*
91947@@ -2160,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
91948 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
91949 0, sock_i_ino(sp),
91950 atomic_read(&sp->sk_refcnt), sp,
91951- atomic_read(&sp->sk_drops), len);
91952+ atomic_read_unchecked(&sp->sk_drops), len);
91953 }
91954
91955 int udp4_seq_show(struct seq_file *seq, void *v)
91956diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
91957index 9a459be..086b866 100644
91958--- a/net/ipv4/xfrm4_policy.c
91959+++ b/net/ipv4/xfrm4_policy.c
91960@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
91961
91962 static int __net_init xfrm4_net_init(struct net *net)
91963 {
91964- struct ctl_table *table;
91965+ ctl_table_no_const *table = NULL;
91966 struct ctl_table_header *hdr;
91967
91968- table = xfrm4_policy_table;
91969 if (!net_eq(net, &init_net)) {
91970- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
91971+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
91972 if (!table)
91973 goto err_alloc;
91974
91975 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
91976- }
91977-
91978- hdr = register_net_sysctl(net, "net/ipv4", table);
91979+ hdr = register_net_sysctl(net, "net/ipv4", table);
91980+ } else
91981+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
91982 if (!hdr)
91983 goto err_reg;
91984
91985@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
91986 return 0;
91987
91988 err_reg:
91989- if (!net_eq(net, &init_net))
91990- kfree(table);
91991+ kfree(table);
91992 err_alloc:
91993 return -ENOMEM;
91994 }
91995diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
91996index fb8c94c..fb18024 100644
91997--- a/net/ipv6/addrconf.c
91998+++ b/net/ipv6/addrconf.c
91999@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
92000 idx = 0;
92001 head = &net->dev_index_head[h];
92002 rcu_read_lock();
92003- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
92004+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
92005 net->dev_base_seq;
92006 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92007 if (idx < s_idx)
92008@@ -2380,7 +2380,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
92009 p.iph.ihl = 5;
92010 p.iph.protocol = IPPROTO_IPV6;
92011 p.iph.ttl = 64;
92012- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
92013+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
92014
92015 if (ops->ndo_do_ioctl) {
92016 mm_segment_t oldfs = get_fs();
92017@@ -4002,7 +4002,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
92018 s_ip_idx = ip_idx = cb->args[2];
92019
92020 rcu_read_lock();
92021- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
92022+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
92023 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
92024 idx = 0;
92025 head = &net->dev_index_head[h];
92026@@ -4587,7 +4587,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
92027 dst_free(&ifp->rt->dst);
92028 break;
92029 }
92030- atomic_inc(&net->ipv6.dev_addr_genid);
92031+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
92032 }
92033
92034 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
92035@@ -4607,7 +4607,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
92036 int *valp = ctl->data;
92037 int val = *valp;
92038 loff_t pos = *ppos;
92039- ctl_table lctl;
92040+ ctl_table_no_const lctl;
92041 int ret;
92042
92043 /*
92044@@ -4689,7 +4689,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
92045 int *valp = ctl->data;
92046 int val = *valp;
92047 loff_t pos = *ppos;
92048- ctl_table lctl;
92049+ ctl_table_no_const lctl;
92050 int ret;
92051
92052 /*
92053diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
92054index 40ffd72..aeac0dc 100644
92055--- a/net/ipv6/esp6.c
92056+++ b/net/ipv6/esp6.c
92057@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
92058 net_adj = 0;
92059
92060 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
92061- net_adj) & ~(align - 1)) + (net_adj - 2);
92062+ net_adj) & ~(align - 1)) + net_adj - 2;
92063 }
92064
92065 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
92066diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
92067index b4ff0a4..db9b764 100644
92068--- a/net/ipv6/icmp.c
92069+++ b/net/ipv6/icmp.c
92070@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
92071
92072 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
92073 {
92074- struct ctl_table *table;
92075+ ctl_table_no_const *table;
92076
92077 table = kmemdup(ipv6_icmp_table_template,
92078 sizeof(ipv6_icmp_table_template),
92079diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
92080index ecd6073..58162ae 100644
92081--- a/net/ipv6/ip6_gre.c
92082+++ b/net/ipv6/ip6_gre.c
92083@@ -74,7 +74,7 @@ struct ip6gre_net {
92084 struct net_device *fb_tunnel_dev;
92085 };
92086
92087-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
92088+static struct rtnl_link_ops ip6gre_link_ops;
92089 static int ip6gre_tunnel_init(struct net_device *dev);
92090 static void ip6gre_tunnel_setup(struct net_device *dev);
92091 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
92092@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
92093 }
92094
92095
92096-static struct inet6_protocol ip6gre_protocol __read_mostly = {
92097+static struct inet6_protocol ip6gre_protocol = {
92098 .handler = ip6gre_rcv,
92099 .err_handler = ip6gre_err,
92100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
92101@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
92102 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
92103 };
92104
92105-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
92106+static struct rtnl_link_ops ip6gre_link_ops = {
92107 .kind = "ip6gre",
92108 .maxtype = IFLA_GRE_MAX,
92109 .policy = ip6gre_policy,
92110@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
92111 .fill_info = ip6gre_fill_info,
92112 };
92113
92114-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
92115+static struct rtnl_link_ops ip6gre_tap_ops = {
92116 .kind = "ip6gretap",
92117 .maxtype = IFLA_GRE_MAX,
92118 .policy = ip6gre_policy,
92119diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
92120index 1e55866..b398dab 100644
92121--- a/net/ipv6/ip6_tunnel.c
92122+++ b/net/ipv6/ip6_tunnel.c
92123@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
92124
92125 static int ip6_tnl_dev_init(struct net_device *dev);
92126 static void ip6_tnl_dev_setup(struct net_device *dev);
92127-static struct rtnl_link_ops ip6_link_ops __read_mostly;
92128+static struct rtnl_link_ops ip6_link_ops;
92129
92130 static int ip6_tnl_net_id __read_mostly;
92131 struct ip6_tnl_net {
92132@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
92133 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
92134 };
92135
92136-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
92137+static struct rtnl_link_ops ip6_link_ops = {
92138 .kind = "ip6tnl",
92139 .maxtype = IFLA_IPTUN_MAX,
92140 .policy = ip6_tnl_policy,
92141diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
92142index d1e2e8e..51c19ae 100644
92143--- a/net/ipv6/ipv6_sockglue.c
92144+++ b/net/ipv6/ipv6_sockglue.c
92145@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
92146 if (sk->sk_type != SOCK_STREAM)
92147 return -ENOPROTOOPT;
92148
92149- msg.msg_control = optval;
92150+ msg.msg_control = (void __force_kernel *)optval;
92151 msg.msg_controllen = len;
92152 msg.msg_flags = flags;
92153
92154diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
92155index 44400c2..8e11f52 100644
92156--- a/net/ipv6/netfilter/ip6_tables.c
92157+++ b/net/ipv6/netfilter/ip6_tables.c
92158@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
92159 #endif
92160
92161 static int get_info(struct net *net, void __user *user,
92162- const int *len, int compat)
92163+ int len, int compat)
92164 {
92165 char name[XT_TABLE_MAXNAMELEN];
92166 struct xt_table *t;
92167 int ret;
92168
92169- if (*len != sizeof(struct ip6t_getinfo)) {
92170- duprintf("length %u != %zu\n", *len,
92171+ if (len != sizeof(struct ip6t_getinfo)) {
92172+ duprintf("length %u != %zu\n", len,
92173 sizeof(struct ip6t_getinfo));
92174 return -EINVAL;
92175 }
92176@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
92177 info.size = private->size;
92178 strcpy(info.name, name);
92179
92180- if (copy_to_user(user, &info, *len) != 0)
92181+ if (copy_to_user(user, &info, len) != 0)
92182 ret = -EFAULT;
92183 else
92184 ret = 0;
92185@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92186
92187 switch (cmd) {
92188 case IP6T_SO_GET_INFO:
92189- ret = get_info(sock_net(sk), user, len, 1);
92190+ ret = get_info(sock_net(sk), user, *len, 1);
92191 break;
92192 case IP6T_SO_GET_ENTRIES:
92193 ret = compat_get_entries(sock_net(sk), user, len);
92194@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92195
92196 switch (cmd) {
92197 case IP6T_SO_GET_INFO:
92198- ret = get_info(sock_net(sk), user, len, 0);
92199+ ret = get_info(sock_net(sk), user, *len, 0);
92200 break;
92201
92202 case IP6T_SO_GET_ENTRIES:
92203diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
92204index dffdc1a..ccc6678 100644
92205--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
92206+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
92207@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
92208
92209 static int nf_ct_frag6_sysctl_register(struct net *net)
92210 {
92211- struct ctl_table *table;
92212+ ctl_table_no_const *table = NULL;
92213 struct ctl_table_header *hdr;
92214
92215- table = nf_ct_frag6_sysctl_table;
92216 if (!net_eq(net, &init_net)) {
92217- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
92218+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
92219 GFP_KERNEL);
92220 if (table == NULL)
92221 goto err_alloc;
92222@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
92223 table[0].data = &net->nf_frag.frags.timeout;
92224 table[1].data = &net->nf_frag.frags.low_thresh;
92225 table[2].data = &net->nf_frag.frags.high_thresh;
92226- }
92227-
92228- hdr = register_net_sysctl(net, "net/netfilter", table);
92229+ hdr = register_net_sysctl(net, "net/netfilter", table);
92230+ } else
92231+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
92232 if (hdr == NULL)
92233 goto err_reg;
92234
92235@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
92236 return 0;
92237
92238 err_reg:
92239- if (!net_eq(net, &init_net))
92240- kfree(table);
92241+ kfree(table);
92242 err_alloc:
92243 return -ENOMEM;
92244 }
92245diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
92246index eedff8c..6e13a47 100644
92247--- a/net/ipv6/raw.c
92248+++ b/net/ipv6/raw.c
92249@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
92250 {
92251 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
92252 skb_checksum_complete(skb)) {
92253- atomic_inc(&sk->sk_drops);
92254+ atomic_inc_unchecked(&sk->sk_drops);
92255 kfree_skb(skb);
92256 return NET_RX_DROP;
92257 }
92258@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
92259 struct raw6_sock *rp = raw6_sk(sk);
92260
92261 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
92262- atomic_inc(&sk->sk_drops);
92263+ atomic_inc_unchecked(&sk->sk_drops);
92264 kfree_skb(skb);
92265 return NET_RX_DROP;
92266 }
92267@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
92268
92269 if (inet->hdrincl) {
92270 if (skb_checksum_complete(skb)) {
92271- atomic_inc(&sk->sk_drops);
92272+ atomic_inc_unchecked(&sk->sk_drops);
92273 kfree_skb(skb);
92274 return NET_RX_DROP;
92275 }
92276@@ -602,7 +602,7 @@ out:
92277 return err;
92278 }
92279
92280-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
92281+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
92282 struct flowi6 *fl6, struct dst_entry **dstp,
92283 unsigned int flags)
92284 {
92285@@ -914,12 +914,15 @@ do_confirm:
92286 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
92287 char __user *optval, int optlen)
92288 {
92289+ struct icmp6_filter filter;
92290+
92291 switch (optname) {
92292 case ICMPV6_FILTER:
92293 if (optlen > sizeof(struct icmp6_filter))
92294 optlen = sizeof(struct icmp6_filter);
92295- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
92296+ if (copy_from_user(&filter, optval, optlen))
92297 return -EFAULT;
92298+ raw6_sk(sk)->filter = filter;
92299 return 0;
92300 default:
92301 return -ENOPROTOOPT;
92302@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
92303 char __user *optval, int __user *optlen)
92304 {
92305 int len;
92306+ struct icmp6_filter filter;
92307
92308 switch (optname) {
92309 case ICMPV6_FILTER:
92310@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
92311 len = sizeof(struct icmp6_filter);
92312 if (put_user(len, optlen))
92313 return -EFAULT;
92314- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
92315+ filter = raw6_sk(sk)->filter;
92316+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
92317 return -EFAULT;
92318 return 0;
92319 default:
92320@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
92321 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
92322 0,
92323 sock_i_ino(sp),
92324- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
92325+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
92326 }
92327
92328 static int raw6_seq_show(struct seq_file *seq, void *v)
92329diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
92330index 790d9f4..68ae078 100644
92331--- a/net/ipv6/reassembly.c
92332+++ b/net/ipv6/reassembly.c
92333@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
92334
92335 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
92336 {
92337- struct ctl_table *table;
92338+ ctl_table_no_const *table = NULL;
92339 struct ctl_table_header *hdr;
92340
92341- table = ip6_frags_ns_ctl_table;
92342 if (!net_eq(net, &init_net)) {
92343- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
92344+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
92345 if (table == NULL)
92346 goto err_alloc;
92347
92348@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
92349 /* Don't export sysctls to unprivileged users */
92350 if (net->user_ns != &init_user_ns)
92351 table[0].procname = NULL;
92352- }
92353+ hdr = register_net_sysctl(net, "net/ipv6", table);
92354+ } else
92355+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
92356
92357- hdr = register_net_sysctl(net, "net/ipv6", table);
92358 if (hdr == NULL)
92359 goto err_reg;
92360
92361@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
92362 return 0;
92363
92364 err_reg:
92365- if (!net_eq(net, &init_net))
92366- kfree(table);
92367+ kfree(table);
92368 err_alloc:
92369 return -ENOMEM;
92370 }
92371diff --git a/net/ipv6/route.c b/net/ipv6/route.c
92372index bacce6c..9d1741a 100644
92373--- a/net/ipv6/route.c
92374+++ b/net/ipv6/route.c
92375@@ -2903,7 +2903,7 @@ ctl_table ipv6_route_table_template[] = {
92376
92377 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
92378 {
92379- struct ctl_table *table;
92380+ ctl_table_no_const *table;
92381
92382 table = kmemdup(ipv6_route_table_template,
92383 sizeof(ipv6_route_table_template),
92384diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
92385index 60df36d..f3ab7c8 100644
92386--- a/net/ipv6/sit.c
92387+++ b/net/ipv6/sit.c
92388@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
92389 static void ipip6_dev_free(struct net_device *dev);
92390 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
92391 __be32 *v4dst);
92392-static struct rtnl_link_ops sit_link_ops __read_mostly;
92393+static struct rtnl_link_ops sit_link_ops;
92394
92395 static int sit_net_id __read_mostly;
92396 struct sit_net {
92397@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
92398 #endif
92399 };
92400
92401-static struct rtnl_link_ops sit_link_ops __read_mostly = {
92402+static struct rtnl_link_ops sit_link_ops = {
92403 .kind = "sit",
92404 .maxtype = IFLA_IPTUN_MAX,
92405 .policy = ipip6_policy,
92406diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
92407index e85c48b..b8268d3 100644
92408--- a/net/ipv6/sysctl_net_ipv6.c
92409+++ b/net/ipv6/sysctl_net_ipv6.c
92410@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
92411
92412 static int __net_init ipv6_sysctl_net_init(struct net *net)
92413 {
92414- struct ctl_table *ipv6_table;
92415+ ctl_table_no_const *ipv6_table;
92416 struct ctl_table *ipv6_route_table;
92417 struct ctl_table *ipv6_icmp_table;
92418 int err;
92419diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
92420index 0a17ed9..2526cc3 100644
92421--- a/net/ipv6/tcp_ipv6.c
92422+++ b/net/ipv6/tcp_ipv6.c
92423@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92424 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
92425 }
92426
92427+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92428+extern int grsec_enable_blackhole;
92429+#endif
92430+
92431 static void tcp_v6_hash(struct sock *sk)
92432 {
92433 if (sk->sk_state != TCP_CLOSE) {
92434@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
92435 return 0;
92436
92437 reset:
92438+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92439+ if (!grsec_enable_blackhole)
92440+#endif
92441 tcp_v6_send_reset(sk, skb);
92442 discard:
92443 if (opt_skb)
92444@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
92445 TCP_SKB_CB(skb)->sacked = 0;
92446
92447 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
92448- if (!sk)
92449+ if (!sk) {
92450+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92451+ ret = 1;
92452+#endif
92453 goto no_tcp_socket;
92454+ }
92455
92456 process:
92457- if (sk->sk_state == TCP_TIME_WAIT)
92458+ if (sk->sk_state == TCP_TIME_WAIT) {
92459+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92460+ ret = 2;
92461+#endif
92462 goto do_time_wait;
92463+ }
92464
92465 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
92466 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
92467@@ -1536,6 +1551,10 @@ csum_error:
92468 bad_packet:
92469 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
92470 } else {
92471+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92472+ if (!grsec_enable_blackhole || (ret == 1 &&
92473+ (skb->dev->flags & IFF_LOOPBACK)))
92474+#endif
92475 tcp_v6_send_reset(NULL, skb);
92476 }
92477
92478diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
92479index e7b28f9..d09c290 100644
92480--- a/net/ipv6/udp.c
92481+++ b/net/ipv6/udp.c
92482@@ -52,6 +52,10 @@
92483 #include <trace/events/skb.h>
92484 #include "udp_impl.h"
92485
92486+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92487+extern int grsec_enable_blackhole;
92488+#endif
92489+
92490 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
92491 {
92492 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
92493@@ -419,7 +423,7 @@ try_again:
92494 if (unlikely(err)) {
92495 trace_kfree_skb(skb, udpv6_recvmsg);
92496 if (!peeked) {
92497- atomic_inc(&sk->sk_drops);
92498+ atomic_inc_unchecked(&sk->sk_drops);
92499 if (is_udp4)
92500 UDP_INC_STATS_USER(sock_net(sk),
92501 UDP_MIB_INERRORS,
92502@@ -665,7 +669,7 @@ csum_error:
92503 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
92504 drop:
92505 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
92506- atomic_inc(&sk->sk_drops);
92507+ atomic_inc_unchecked(&sk->sk_drops);
92508 kfree_skb(skb);
92509 return -1;
92510 }
92511@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
92512 if (likely(skb1 == NULL))
92513 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
92514 if (!skb1) {
92515- atomic_inc(&sk->sk_drops);
92516+ atomic_inc_unchecked(&sk->sk_drops);
92517 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
92518 IS_UDPLITE(sk));
92519 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
92520@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
92521 goto csum_error;
92522
92523 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
92524+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92525+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
92526+#endif
92527 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
92528
92529 kfree_skb(skb);
92530@@ -1392,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
92531 0,
92532 sock_i_ino(sp),
92533 atomic_read(&sp->sk_refcnt), sp,
92534- atomic_read(&sp->sk_drops));
92535+ atomic_read_unchecked(&sp->sk_drops));
92536 }
92537
92538 int udp6_seq_show(struct seq_file *seq, void *v)
92539diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
92540index 23ed03d..465a71d 100644
92541--- a/net/ipv6/xfrm6_policy.c
92542+++ b/net/ipv6/xfrm6_policy.c
92543@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
92544
92545 static int __net_init xfrm6_net_init(struct net *net)
92546 {
92547- struct ctl_table *table;
92548+ ctl_table_no_const *table = NULL;
92549 struct ctl_table_header *hdr;
92550
92551- table = xfrm6_policy_table;
92552 if (!net_eq(net, &init_net)) {
92553- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
92554+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
92555 if (!table)
92556 goto err_alloc;
92557
92558 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
92559- }
92560+ hdr = register_net_sysctl(net, "net/ipv6", table);
92561+ } else
92562+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
92563
92564- hdr = register_net_sysctl(net, "net/ipv6", table);
92565 if (!hdr)
92566 goto err_reg;
92567
92568@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
92569 return 0;
92570
92571 err_reg:
92572- if (!net_eq(net, &init_net))
92573- kfree(table);
92574+ kfree(table);
92575 err_alloc:
92576 return -ENOMEM;
92577 }
92578diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
92579index 41ac7938..75e3bb1 100644
92580--- a/net/irda/ircomm/ircomm_tty.c
92581+++ b/net/irda/ircomm/ircomm_tty.c
92582@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
92583 add_wait_queue(&port->open_wait, &wait);
92584
92585 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
92586- __FILE__, __LINE__, tty->driver->name, port->count);
92587+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
92588
92589 spin_lock_irqsave(&port->lock, flags);
92590 if (!tty_hung_up_p(filp))
92591- port->count--;
92592+ atomic_dec(&port->count);
92593 port->blocked_open++;
92594 spin_unlock_irqrestore(&port->lock, flags);
92595
92596@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
92597 }
92598
92599 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
92600- __FILE__, __LINE__, tty->driver->name, port->count);
92601+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
92602
92603 schedule();
92604 }
92605@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
92606
92607 spin_lock_irqsave(&port->lock, flags);
92608 if (!tty_hung_up_p(filp))
92609- port->count++;
92610+ atomic_inc(&port->count);
92611 port->blocked_open--;
92612 spin_unlock_irqrestore(&port->lock, flags);
92613
92614 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
92615- __FILE__, __LINE__, tty->driver->name, port->count);
92616+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
92617
92618 if (!retval)
92619 port->flags |= ASYNC_NORMAL_ACTIVE;
92620@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
92621
92622 /* ++ is not atomic, so this should be protected - Jean II */
92623 spin_lock_irqsave(&self->port.lock, flags);
92624- self->port.count++;
92625+ atomic_inc(&self->port.count);
92626 spin_unlock_irqrestore(&self->port.lock, flags);
92627 tty_port_tty_set(&self->port, tty);
92628
92629 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
92630- self->line, self->port.count);
92631+ self->line, atomic_read(&self->port.count));
92632
92633 /* Not really used by us, but lets do it anyway */
92634 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
92635@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
92636 tty_kref_put(port->tty);
92637 }
92638 port->tty = NULL;
92639- port->count = 0;
92640+ atomic_set(&port->count, 0);
92641 spin_unlock_irqrestore(&port->lock, flags);
92642
92643 wake_up_interruptible(&port->open_wait);
92644@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
92645 seq_putc(m, '\n');
92646
92647 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
92648- seq_printf(m, "Open count: %d\n", self->port.count);
92649+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
92650 seq_printf(m, "Max data size: %d\n", self->max_data_size);
92651 seq_printf(m, "Max header size: %d\n", self->max_header_size);
92652
92653diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
92654index ae69165..c8b82d8 100644
92655--- a/net/iucv/af_iucv.c
92656+++ b/net/iucv/af_iucv.c
92657@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
92658
92659 write_lock_bh(&iucv_sk_list.lock);
92660
92661- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
92662+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
92663 while (__iucv_get_sock_by_name(name)) {
92664 sprintf(name, "%08x",
92665- atomic_inc_return(&iucv_sk_list.autobind_name));
92666+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
92667 }
92668
92669 write_unlock_bh(&iucv_sk_list.lock);
92670diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
92671index 4fe76ff..426a904 100644
92672--- a/net/iucv/iucv.c
92673+++ b/net/iucv/iucv.c
92674@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
92675 return NOTIFY_OK;
92676 }
92677
92678-static struct notifier_block __refdata iucv_cpu_notifier = {
92679+static struct notifier_block iucv_cpu_notifier = {
92680 .notifier_call = iucv_cpu_notify,
92681 };
92682
92683diff --git a/net/key/af_key.c b/net/key/af_key.c
92684index ab8bd2c..cd2d641 100644
92685--- a/net/key/af_key.c
92686+++ b/net/key/af_key.c
92687@@ -3048,10 +3048,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
92688 static u32 get_acqseq(void)
92689 {
92690 u32 res;
92691- static atomic_t acqseq;
92692+ static atomic_unchecked_t acqseq;
92693
92694 do {
92695- res = atomic_inc_return(&acqseq);
92696+ res = atomic_inc_return_unchecked(&acqseq);
92697 } while (!res);
92698 return res;
92699 }
92700diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
92701index ae36f8e..09d42ac 100644
92702--- a/net/mac80211/cfg.c
92703+++ b/net/mac80211/cfg.c
92704@@ -806,7 +806,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
92705 ret = ieee80211_vif_use_channel(sdata, chandef,
92706 IEEE80211_CHANCTX_EXCLUSIVE);
92707 }
92708- } else if (local->open_count == local->monitors) {
92709+ } else if (local_read(&local->open_count) == local->monitors) {
92710 local->_oper_chandef = *chandef;
92711 ieee80211_hw_config(local, 0);
92712 }
92713@@ -2922,7 +2922,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
92714 else
92715 local->probe_req_reg--;
92716
92717- if (!local->open_count)
92718+ if (!local_read(&local->open_count))
92719 break;
92720
92721 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
92722@@ -3385,8 +3385,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
92723 if (chanctx_conf) {
92724 *chandef = chanctx_conf->def;
92725 ret = 0;
92726- } else if (local->open_count > 0 &&
92727- local->open_count == local->monitors &&
92728+ } else if (local_read(&local->open_count) > 0 &&
92729+ local_read(&local->open_count) == local->monitors &&
92730 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
92731 if (local->use_chanctx)
92732 *chandef = local->monitor_chandef;
92733diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
92734index 9ca8e32..48e4a9b 100644
92735--- a/net/mac80211/ieee80211_i.h
92736+++ b/net/mac80211/ieee80211_i.h
92737@@ -28,6 +28,7 @@
92738 #include <net/ieee80211_radiotap.h>
92739 #include <net/cfg80211.h>
92740 #include <net/mac80211.h>
92741+#include <asm/local.h>
92742 #include "key.h"
92743 #include "sta_info.h"
92744 #include "debug.h"
92745@@ -891,7 +892,7 @@ struct ieee80211_local {
92746 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
92747 spinlock_t queue_stop_reason_lock;
92748
92749- int open_count;
92750+ local_t open_count;
92751 int monitors, cooked_mntrs;
92752 /* number of interfaces with corresponding FIF_ flags */
92753 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
92754diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
92755index 514e90f..56f22bf 100644
92756--- a/net/mac80211/iface.c
92757+++ b/net/mac80211/iface.c
92758@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92759 break;
92760 }
92761
92762- if (local->open_count == 0) {
92763+ if (local_read(&local->open_count) == 0) {
92764 res = drv_start(local);
92765 if (res)
92766 goto err_del_bss;
92767@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92768 break;
92769 }
92770
92771- if (local->monitors == 0 && local->open_count == 0) {
92772+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
92773 res = ieee80211_add_virtual_monitor(local);
92774 if (res)
92775 goto err_stop;
92776@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92777 atomic_inc(&local->iff_promiscs);
92778
92779 if (coming_up)
92780- local->open_count++;
92781+ local_inc(&local->open_count);
92782
92783 if (hw_reconf_flags)
92784 ieee80211_hw_config(local, hw_reconf_flags);
92785@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92786 err_del_interface:
92787 drv_remove_interface(local, sdata);
92788 err_stop:
92789- if (!local->open_count)
92790+ if (!local_read(&local->open_count))
92791 drv_stop(local);
92792 err_del_bss:
92793 sdata->bss = NULL;
92794@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92795 }
92796
92797 if (going_down)
92798- local->open_count--;
92799+ local_dec(&local->open_count);
92800
92801 switch (sdata->vif.type) {
92802 case NL80211_IFTYPE_AP_VLAN:
92803@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92804 }
92805 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
92806
92807- if (local->open_count == 0)
92808+ if (local_read(&local->open_count) == 0)
92809 ieee80211_clear_tx_pending(local);
92810
92811 /*
92812@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92813
92814 ieee80211_recalc_ps(local, -1);
92815
92816- if (local->open_count == 0) {
92817+ if (local_read(&local->open_count) == 0) {
92818 ieee80211_stop_device(local);
92819
92820 /* no reconfiguring after stop! */
92821@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92822 ieee80211_configure_filter(local);
92823 ieee80211_hw_config(local, hw_reconf_flags);
92824
92825- if (local->monitors == local->open_count)
92826+ if (local->monitors == local_read(&local->open_count))
92827 ieee80211_add_virtual_monitor(local);
92828 }
92829
92830diff --git a/net/mac80211/main.c b/net/mac80211/main.c
92831index 8a7bfc4..4407cd0 100644
92832--- a/net/mac80211/main.c
92833+++ b/net/mac80211/main.c
92834@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
92835 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
92836 IEEE80211_CONF_CHANGE_POWER);
92837
92838- if (changed && local->open_count) {
92839+ if (changed && local_read(&local->open_count)) {
92840 ret = drv_config(local, changed);
92841 /*
92842 * Goal:
92843diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
92844index 3401262..d5cd68d 100644
92845--- a/net/mac80211/pm.c
92846+++ b/net/mac80211/pm.c
92847@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
92848 struct ieee80211_sub_if_data *sdata;
92849 struct sta_info *sta;
92850
92851- if (!local->open_count)
92852+ if (!local_read(&local->open_count))
92853 goto suspend;
92854
92855 ieee80211_scan_cancel(local);
92856@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
92857 cancel_work_sync(&local->dynamic_ps_enable_work);
92858 del_timer_sync(&local->dynamic_ps_timer);
92859
92860- local->wowlan = wowlan && local->open_count;
92861+ local->wowlan = wowlan && local_read(&local->open_count);
92862 if (local->wowlan) {
92863 int err = drv_suspend(local, wowlan);
92864 if (err < 0) {
92865@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
92866 WARN_ON(!list_empty(&local->chanctx_list));
92867
92868 /* stop hardware - this must stop RX */
92869- if (local->open_count)
92870+ if (local_read(&local->open_count))
92871 ieee80211_stop_device(local);
92872
92873 suspend:
92874diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
92875index a02bef3..f2f38dd 100644
92876--- a/net/mac80211/rate.c
92877+++ b/net/mac80211/rate.c
92878@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
92879
92880 ASSERT_RTNL();
92881
92882- if (local->open_count)
92883+ if (local_read(&local->open_count))
92884 return -EBUSY;
92885
92886 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
92887diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
92888index c97a065..ff61928 100644
92889--- a/net/mac80211/rc80211_pid_debugfs.c
92890+++ b/net/mac80211/rc80211_pid_debugfs.c
92891@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
92892
92893 spin_unlock_irqrestore(&events->lock, status);
92894
92895- if (copy_to_user(buf, pb, p))
92896+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
92897 return -EFAULT;
92898
92899 return p;
92900diff --git a/net/mac80211/util.c b/net/mac80211/util.c
92901index 72e6292..e6319eb 100644
92902--- a/net/mac80211/util.c
92903+++ b/net/mac80211/util.c
92904@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
92905 }
92906 #endif
92907 /* everything else happens only if HW was up & running */
92908- if (!local->open_count)
92909+ if (!local_read(&local->open_count))
92910 goto wake_up;
92911
92912 /*
92913@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
92914 local->in_reconfig = false;
92915 barrier();
92916
92917- if (local->monitors == local->open_count && local->monitors > 0)
92918+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
92919 ieee80211_add_virtual_monitor(local);
92920
92921 /*
92922diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
92923index 56d22ca..87c778f 100644
92924--- a/net/netfilter/Kconfig
92925+++ b/net/netfilter/Kconfig
92926@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
92927
92928 To compile it as a module, choose M here. If unsure, say N.
92929
92930+config NETFILTER_XT_MATCH_GRADM
92931+ tristate '"gradm" match support'
92932+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
92933+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
92934+ ---help---
92935+ The gradm match allows to match on grsecurity RBAC being enabled.
92936+ It is useful when iptables rules are applied early on bootup to
92937+ prevent connections to the machine (except from a trusted host)
92938+ while the RBAC system is disabled.
92939+
92940 config NETFILTER_XT_MATCH_HASHLIMIT
92941 tristate '"hashlimit" match support'
92942 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
92943diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
92944index a1abf87..dbcb7ee 100644
92945--- a/net/netfilter/Makefile
92946+++ b/net/netfilter/Makefile
92947@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
92948 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
92949 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
92950 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
92951+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
92952 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
92953 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
92954 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
92955diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
92956index f771390..145b765 100644
92957--- a/net/netfilter/ipset/ip_set_core.c
92958+++ b/net/netfilter/ipset/ip_set_core.c
92959@@ -1820,7 +1820,7 @@ done:
92960 return ret;
92961 }
92962
92963-static struct nf_sockopt_ops so_set __read_mostly = {
92964+static struct nf_sockopt_ops so_set = {
92965 .pf = PF_INET,
92966 .get_optmin = SO_IP_SET,
92967 .get_optmax = SO_IP_SET + 1,
92968diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
92969index a083bda..da661c3 100644
92970--- a/net/netfilter/ipvs/ip_vs_conn.c
92971+++ b/net/netfilter/ipvs/ip_vs_conn.c
92972@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
92973 /* Increase the refcnt counter of the dest */
92974 ip_vs_dest_hold(dest);
92975
92976- conn_flags = atomic_read(&dest->conn_flags);
92977+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
92978 if (cp->protocol != IPPROTO_UDP)
92979 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
92980 flags = cp->flags;
92981@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
92982
92983 cp->control = NULL;
92984 atomic_set(&cp->n_control, 0);
92985- atomic_set(&cp->in_pkts, 0);
92986+ atomic_set_unchecked(&cp->in_pkts, 0);
92987
92988 cp->packet_xmit = NULL;
92989 cp->app = NULL;
92990@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
92991
92992 /* Don't drop the entry if its number of incoming packets is not
92993 located in [0, 8] */
92994- i = atomic_read(&cp->in_pkts);
92995+ i = atomic_read_unchecked(&cp->in_pkts);
92996 if (i > 8 || i < 0) return 0;
92997
92998 if (!todrop_rate[i]) return 0;
92999diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
93000index 23b8eb5..48a8959 100644
93001--- a/net/netfilter/ipvs/ip_vs_core.c
93002+++ b/net/netfilter/ipvs/ip_vs_core.c
93003@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
93004 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
93005 /* do not touch skb anymore */
93006
93007- atomic_inc(&cp->in_pkts);
93008+ atomic_inc_unchecked(&cp->in_pkts);
93009 ip_vs_conn_put(cp);
93010 return ret;
93011 }
93012@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
93013 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
93014 pkts = sysctl_sync_threshold(ipvs);
93015 else
93016- pkts = atomic_add_return(1, &cp->in_pkts);
93017+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
93018
93019 if (ipvs->sync_state & IP_VS_STATE_MASTER)
93020 ip_vs_sync_conn(net, cp, pkts);
93021diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
93022index 9e6c2a0..28552e2 100644
93023--- a/net/netfilter/ipvs/ip_vs_ctl.c
93024+++ b/net/netfilter/ipvs/ip_vs_ctl.c
93025@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
93026 */
93027 ip_vs_rs_hash(ipvs, dest);
93028 }
93029- atomic_set(&dest->conn_flags, conn_flags);
93030+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
93031
93032 /* bind the service */
93033 if (!dest->svc) {
93034@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
93035 * align with netns init in ip_vs_control_net_init()
93036 */
93037
93038-static struct ctl_table vs_vars[] = {
93039+static ctl_table_no_const vs_vars[] __read_only = {
93040 {
93041 .procname = "amemthresh",
93042 .maxlen = sizeof(int),
93043@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
93044 " %-7s %-6d %-10d %-10d\n",
93045 &dest->addr.in6,
93046 ntohs(dest->port),
93047- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
93048+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
93049 atomic_read(&dest->weight),
93050 atomic_read(&dest->activeconns),
93051 atomic_read(&dest->inactconns));
93052@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
93053 "%-7s %-6d %-10d %-10d\n",
93054 ntohl(dest->addr.ip),
93055 ntohs(dest->port),
93056- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
93057+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
93058 atomic_read(&dest->weight),
93059 atomic_read(&dest->activeconns),
93060 atomic_read(&dest->inactconns));
93061@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
93062
93063 entry.addr = dest->addr.ip;
93064 entry.port = dest->port;
93065- entry.conn_flags = atomic_read(&dest->conn_flags);
93066+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
93067 entry.weight = atomic_read(&dest->weight);
93068 entry.u_threshold = dest->u_threshold;
93069 entry.l_threshold = dest->l_threshold;
93070@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
93071 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
93072 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
93073 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
93074- (atomic_read(&dest->conn_flags) &
93075+ (atomic_read_unchecked(&dest->conn_flags) &
93076 IP_VS_CONN_F_FWD_MASK)) ||
93077 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
93078 atomic_read(&dest->weight)) ||
93079@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
93080 {
93081 int idx;
93082 struct netns_ipvs *ipvs = net_ipvs(net);
93083- struct ctl_table *tbl;
93084+ ctl_table_no_const *tbl;
93085
93086 atomic_set(&ipvs->dropentry, 0);
93087 spin_lock_init(&ipvs->dropentry_lock);
93088diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
93089index 5ea26bd..c9bc65f 100644
93090--- a/net/netfilter/ipvs/ip_vs_lblc.c
93091+++ b/net/netfilter/ipvs/ip_vs_lblc.c
93092@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
93093 * IPVS LBLC sysctl table
93094 */
93095 #ifdef CONFIG_SYSCTL
93096-static ctl_table vs_vars_table[] = {
93097+static ctl_table_no_const vs_vars_table[] __read_only = {
93098 {
93099 .procname = "lblc_expiration",
93100 .data = NULL,
93101diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
93102index 50123c2..067c773 100644
93103--- a/net/netfilter/ipvs/ip_vs_lblcr.c
93104+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
93105@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
93106 * IPVS LBLCR sysctl table
93107 */
93108
93109-static ctl_table vs_vars_table[] = {
93110+static ctl_table_no_const vs_vars_table[] __read_only = {
93111 {
93112 .procname = "lblcr_expiration",
93113 .data = NULL,
93114diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
93115index f6046d9..4f10cfd 100644
93116--- a/net/netfilter/ipvs/ip_vs_sync.c
93117+++ b/net/netfilter/ipvs/ip_vs_sync.c
93118@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
93119 cp = cp->control;
93120 if (cp) {
93121 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
93122- pkts = atomic_add_return(1, &cp->in_pkts);
93123+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
93124 else
93125 pkts = sysctl_sync_threshold(ipvs);
93126 ip_vs_sync_conn(net, cp->control, pkts);
93127@@ -758,7 +758,7 @@ control:
93128 if (!cp)
93129 return;
93130 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
93131- pkts = atomic_add_return(1, &cp->in_pkts);
93132+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
93133 else
93134 pkts = sysctl_sync_threshold(ipvs);
93135 goto sloop;
93136@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
93137
93138 if (opt)
93139 memcpy(&cp->in_seq, opt, sizeof(*opt));
93140- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
93141+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
93142 cp->state = state;
93143 cp->old_state = cp->state;
93144 /*
93145diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
93146index b75ff64..0c51bbe 100644
93147--- a/net/netfilter/ipvs/ip_vs_xmit.c
93148+++ b/net/netfilter/ipvs/ip_vs_xmit.c
93149@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
93150 else
93151 rc = NF_ACCEPT;
93152 /* do not touch skb anymore */
93153- atomic_inc(&cp->in_pkts);
93154+ atomic_inc_unchecked(&cp->in_pkts);
93155 goto out;
93156 }
93157
93158@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
93159 else
93160 rc = NF_ACCEPT;
93161 /* do not touch skb anymore */
93162- atomic_inc(&cp->in_pkts);
93163+ atomic_inc_unchecked(&cp->in_pkts);
93164 goto out;
93165 }
93166
93167diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
93168index 2d3030a..7ba1c0a 100644
93169--- a/net/netfilter/nf_conntrack_acct.c
93170+++ b/net/netfilter/nf_conntrack_acct.c
93171@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
93172 #ifdef CONFIG_SYSCTL
93173 static int nf_conntrack_acct_init_sysctl(struct net *net)
93174 {
93175- struct ctl_table *table;
93176+ ctl_table_no_const *table;
93177
93178 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
93179 GFP_KERNEL);
93180diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
93181index 0283bae..5febcb0 100644
93182--- a/net/netfilter/nf_conntrack_core.c
93183+++ b/net/netfilter/nf_conntrack_core.c
93184@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
93185 #define DYING_NULLS_VAL ((1<<30)+1)
93186 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
93187
93188+#ifdef CONFIG_GRKERNSEC_HIDESYM
93189+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
93190+#endif
93191+
93192 int nf_conntrack_init_net(struct net *net)
93193 {
93194 int ret;
93195@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
93196 goto err_stat;
93197 }
93198
93199+#ifdef CONFIG_GRKERNSEC_HIDESYM
93200+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
93201+#else
93202 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
93203+#endif
93204 if (!net->ct.slabname) {
93205 ret = -ENOMEM;
93206 goto err_slabname;
93207diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
93208index 1df1761..ce8b88a 100644
93209--- a/net/netfilter/nf_conntrack_ecache.c
93210+++ b/net/netfilter/nf_conntrack_ecache.c
93211@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
93212 #ifdef CONFIG_SYSCTL
93213 static int nf_conntrack_event_init_sysctl(struct net *net)
93214 {
93215- struct ctl_table *table;
93216+ ctl_table_no_const *table;
93217
93218 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
93219 GFP_KERNEL);
93220diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
93221index 974a2a4..52cc6ff 100644
93222--- a/net/netfilter/nf_conntrack_helper.c
93223+++ b/net/netfilter/nf_conntrack_helper.c
93224@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
93225
93226 static int nf_conntrack_helper_init_sysctl(struct net *net)
93227 {
93228- struct ctl_table *table;
93229+ ctl_table_no_const *table;
93230
93231 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
93232 GFP_KERNEL);
93233diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
93234index 0ab9636..cea3c6a 100644
93235--- a/net/netfilter/nf_conntrack_proto.c
93236+++ b/net/netfilter/nf_conntrack_proto.c
93237@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
93238
93239 static void
93240 nf_ct_unregister_sysctl(struct ctl_table_header **header,
93241- struct ctl_table **table,
93242+ ctl_table_no_const **table,
93243 unsigned int users)
93244 {
93245 if (users > 0)
93246diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
93247index a99b6c3..3841268 100644
93248--- a/net/netfilter/nf_conntrack_proto_dccp.c
93249+++ b/net/netfilter/nf_conntrack_proto_dccp.c
93250@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
93251 out_invalid:
93252 if (LOG_INVALID(net, IPPROTO_DCCP))
93253 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
93254- NULL, msg);
93255+ NULL, "%s", msg);
93256 return false;
93257 }
93258
93259@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
93260
93261 out_invalid:
93262 if (LOG_INVALID(net, IPPROTO_DCCP))
93263- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
93264+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
93265 return -NF_ACCEPT;
93266 }
93267
93268diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
93269index 4d4d8f1..e0f9a32 100644
93270--- a/net/netfilter/nf_conntrack_proto_tcp.c
93271+++ b/net/netfilter/nf_conntrack_proto_tcp.c
93272@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
93273 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
93274 __u32 seq, ack, sack, end, win, swin;
93275 s16 receiver_offset;
93276- bool res;
93277+ bool res, in_recv_win;
93278
93279 /*
93280 * Get the required data from the packet.
93281@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
93282 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
93283 receiver->td_scale);
93284
93285+ /* Is the ending sequence in the receive window (if available)? */
93286+ in_recv_win = !receiver->td_maxwin ||
93287+ after(end, sender->td_end - receiver->td_maxwin - 1);
93288+
93289 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
93290 before(seq, sender->td_maxend + 1),
93291- after(end, sender->td_end - receiver->td_maxwin - 1),
93292+ (in_recv_win ? 1 : 0),
93293 before(sack, receiver->td_end + 1),
93294 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
93295
93296 if (before(seq, sender->td_maxend + 1) &&
93297- after(end, sender->td_end - receiver->td_maxwin - 1) &&
93298+ in_recv_win &&
93299 before(sack, receiver->td_end + 1) &&
93300 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
93301 /*
93302@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
93303 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
93304 "nf_ct_tcp: %s ",
93305 before(seq, sender->td_maxend + 1) ?
93306- after(end, sender->td_end - receiver->td_maxwin - 1) ?
93307+ in_recv_win ?
93308 before(sack, receiver->td_end + 1) ?
93309 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
93310 : "ACK is under the lower bound (possible overly delayed ACK)"
93311diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
93312index bd700b4..4a3dc61 100644
93313--- a/net/netfilter/nf_conntrack_standalone.c
93314+++ b/net/netfilter/nf_conntrack_standalone.c
93315@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
93316
93317 static int nf_conntrack_standalone_init_sysctl(struct net *net)
93318 {
93319- struct ctl_table *table;
93320+ ctl_table_no_const *table;
93321
93322 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
93323 GFP_KERNEL);
93324diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
93325index 902fb0a..87f7fdb 100644
93326--- a/net/netfilter/nf_conntrack_timestamp.c
93327+++ b/net/netfilter/nf_conntrack_timestamp.c
93328@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
93329 #ifdef CONFIG_SYSCTL
93330 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
93331 {
93332- struct ctl_table *table;
93333+ ctl_table_no_const *table;
93334
93335 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
93336 GFP_KERNEL);
93337diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
93338index 3b18dd1..f79e0ca 100644
93339--- a/net/netfilter/nf_log.c
93340+++ b/net/netfilter/nf_log.c
93341@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
93342
93343 #ifdef CONFIG_SYSCTL
93344 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
93345-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
93346+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
93347
93348 static int nf_log_proc_dostring(ctl_table *table, int write,
93349 void __user *buffer, size_t *lenp, loff_t *ppos)
93350@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
93351 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
93352 mutex_unlock(&nf_log_mutex);
93353 } else {
93354+ ctl_table_no_const nf_log_table = *table;
93355+
93356 mutex_lock(&nf_log_mutex);
93357 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
93358 lockdep_is_held(&nf_log_mutex));
93359 if (!logger)
93360- table->data = "NONE";
93361+ nf_log_table.data = "NONE";
93362 else
93363- table->data = logger->name;
93364- r = proc_dostring(table, write, buffer, lenp, ppos);
93365+ nf_log_table.data = logger->name;
93366+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
93367 mutex_unlock(&nf_log_mutex);
93368 }
93369
93370diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
93371index f042ae5..30ea486 100644
93372--- a/net/netfilter/nf_sockopt.c
93373+++ b/net/netfilter/nf_sockopt.c
93374@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
93375 }
93376 }
93377
93378- list_add(&reg->list, &nf_sockopts);
93379+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
93380 out:
93381 mutex_unlock(&nf_sockopt_mutex);
93382 return ret;
93383@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
93384 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
93385 {
93386 mutex_lock(&nf_sockopt_mutex);
93387- list_del(&reg->list);
93388+ pax_list_del((struct list_head *)&reg->list);
93389 mutex_unlock(&nf_sockopt_mutex);
93390 }
93391 EXPORT_SYMBOL(nf_unregister_sockopt);
93392diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
93393index 962e979..e46f350 100644
93394--- a/net/netfilter/nfnetlink_log.c
93395+++ b/net/netfilter/nfnetlink_log.c
93396@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
93397 struct nfnl_log_net {
93398 spinlock_t instances_lock;
93399 struct hlist_head instance_table[INSTANCE_BUCKETS];
93400- atomic_t global_seq;
93401+ atomic_unchecked_t global_seq;
93402 };
93403
93404 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
93405@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
93406 nfmsg->version = NFNETLINK_V0;
93407 nfmsg->res_id = htons(inst->group_num);
93408
93409+ memset(&pmsg, 0, sizeof(pmsg));
93410 pmsg.hw_protocol = skb->protocol;
93411 pmsg.hook = hooknum;
93412
93413@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
93414 if (indev && skb->dev &&
93415 skb->mac_header != skb->network_header) {
93416 struct nfulnl_msg_packet_hw phw;
93417- int len = dev_parse_header(skb, phw.hw_addr);
93418+ int len;
93419+
93420+ memset(&phw, 0, sizeof(phw));
93421+ len = dev_parse_header(skb, phw.hw_addr);
93422 if (len > 0) {
93423 phw.hw_addrlen = htons(len);
93424 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
93425@@ -559,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
93426 /* global sequence number */
93427 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
93428 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
93429- htonl(atomic_inc_return(&log->global_seq))))
93430+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
93431 goto nla_put_failure;
93432
93433 if (data_len) {
93434diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
93435index 5352b2d..e0083ce 100644
93436--- a/net/netfilter/nfnetlink_queue_core.c
93437+++ b/net/netfilter/nfnetlink_queue_core.c
93438@@ -444,7 +444,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
93439 if (indev && entskb->dev &&
93440 entskb->mac_header != entskb->network_header) {
93441 struct nfqnl_msg_packet_hw phw;
93442- int len = dev_parse_header(entskb, phw.hw_addr);
93443+ int len;
93444+
93445+ memset(&phw, 0, sizeof(phw));
93446+ len = dev_parse_header(entskb, phw.hw_addr);
93447 if (len) {
93448 phw.hw_addrlen = htons(len);
93449 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
93450diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
93451index 7011c71..6113cc7 100644
93452--- a/net/netfilter/xt_TCPMSS.c
93453+++ b/net/netfilter/xt_TCPMSS.c
93454@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93455 {
93456 const struct xt_tcpmss_info *info = par->targinfo;
93457 struct tcphdr *tcph;
93458- unsigned int tcplen, i;
93459+ int len, tcp_hdrlen;
93460+ unsigned int i;
93461 __be16 oldval;
93462 u16 newmss;
93463 u8 *opt;
93464@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93465 if (!skb_make_writable(skb, skb->len))
93466 return -1;
93467
93468- tcplen = skb->len - tcphoff;
93469+ len = skb->len - tcphoff;
93470+ if (len < (int)sizeof(struct tcphdr))
93471+ return -1;
93472+
93473 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
93474+ tcp_hdrlen = tcph->doff * 4;
93475
93476- /* Header cannot be larger than the packet */
93477- if (tcplen < tcph->doff*4)
93478+ if (len < tcp_hdrlen)
93479 return -1;
93480
93481 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
93482@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93483 newmss = info->mss;
93484
93485 opt = (u_int8_t *)tcph;
93486- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
93487- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
93488- opt[i+1] == TCPOLEN_MSS) {
93489+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
93490+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
93491 u_int16_t oldmss;
93492
93493 oldmss = (opt[i+2] << 8) | opt[i+3];
93494@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93495 }
93496
93497 /* There is data after the header so the option can't be added
93498- without moving it, and doing so may make the SYN packet
93499- itself too large. Accept the packet unmodified instead. */
93500- if (tcplen > tcph->doff*4)
93501+ * without moving it, and doing so may make the SYN packet
93502+ * itself too large. Accept the packet unmodified instead.
93503+ */
93504+ if (len > tcp_hdrlen)
93505 return 0;
93506
93507 /*
93508@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93509 newmss = min(newmss, (u16)1220);
93510
93511 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
93512- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
93513+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
93514
93515 inet_proto_csum_replace2(&tcph->check, skb,
93516- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
93517+ htons(len), htons(len + TCPOLEN_MSS), 1);
93518 opt[0] = TCPOPT_MSS;
93519 opt[1] = TCPOLEN_MSS;
93520 opt[2] = (newmss & 0xff00) >> 8;
93521diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
93522index b68fa19..625fa1d 100644
93523--- a/net/netfilter/xt_TCPOPTSTRIP.c
93524+++ b/net/netfilter/xt_TCPOPTSTRIP.c
93525@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
93526 struct tcphdr *tcph;
93527 u_int16_t n, o;
93528 u_int8_t *opt;
93529- int len;
93530+ int len, tcp_hdrlen;
93531
93532 /* This is a fragment, no TCP header is available */
93533 if (par->fragoff != 0)
93534@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
93535 return NF_DROP;
93536
93537 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
93538- if (tcph->doff * 4 > len)
93539+ tcp_hdrlen = tcph->doff * 4;
93540+
93541+ if (len < tcp_hdrlen)
93542 return NF_DROP;
93543
93544 opt = (u_int8_t *)tcph;
93545@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
93546 * Walk through all TCP options - if we find some option to remove,
93547 * set all octets to %TCPOPT_NOP and adjust checksum.
93548 */
93549- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
93550+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
93551 optl = optlen(opt, i);
93552
93553- if (i + optl > tcp_hdrlen(skb))
93554+ if (i + optl > tcp_hdrlen)
93555 break;
93556
93557 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
93558diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
93559new file mode 100644
93560index 0000000..c566332
93561--- /dev/null
93562+++ b/net/netfilter/xt_gradm.c
93563@@ -0,0 +1,51 @@
93564+/*
93565+ * gradm match for netfilter
93566