]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.10.10-201308301923.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.10.10-201308301923.patch
CommitLineData
352325f5
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..889ee23 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 noexec=on: enable non-executable mappings (default)
248 noexec=off: disable non-executable mappings
249
250+ nopcid [X86-64]
251+ Disable PCID (Process-Context IDentifier) even if it
252+ is supported by the processor.
253+
254 nosmap [X86]
255 Disable SMAP (Supervisor Mode Access Prevention)
256 even if it is supported by processor.
257@@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
258 the specified number of seconds. This is to be used if
259 your oopses keep scrolling off the screen.
260
261+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
262+ virtualization environments that don't cope well with the
263+ expand down segment used by UDEREF on X86-32 or the frequent
264+ page table updates on X86-64.
265+
266+ pax_sanitize_slab=
267+ 0/1 to disable/enable slab object sanitization (enabled by
268+ default).
269+
270+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
271+
272+ pax_extra_latent_entropy
273+ Enable a very simple form of latent entropy extraction
274+ from the first 4GB of memory as the bootmem allocator
275+ passes the memory pages to the buddy allocator.
276+
277+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
278+ when the processor supports PCID.
279+
280 pcbit= [HW,ISDN]
281
282 pcd. [PARIDE]
283diff --git a/Makefile b/Makefile
284index b119684..13ac256 100644
285--- a/Makefile
286+++ b/Makefile
287@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
288
289 HOSTCC = gcc
290 HOSTCXX = g++
291-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
292-HOSTCXXFLAGS = -O2
293+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
294+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
295+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
296
297 # Decide whether to build built-in, modular, or both.
298 # Normally, just do built-in.
299@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
300 # Rules shared between *config targets and build targets
301
302 # Basic helpers built in scripts/
303-PHONY += scripts_basic
304-scripts_basic:
305+PHONY += scripts_basic gcc-plugins
306+scripts_basic: gcc-plugins
307 $(Q)$(MAKE) $(build)=scripts/basic
308 $(Q)rm -f .tmp_quiet_recordmcount
309
310@@ -576,6 +577,65 @@ else
311 KBUILD_CFLAGS += -O2
312 endif
313
314+ifndef DISABLE_PAX_PLUGINS
315+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
316+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
317+else
318+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
319+endif
320+ifneq ($(PLUGINCC),)
321+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
322+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
323+endif
324+ifdef CONFIG_PAX_MEMORY_STACKLEAK
325+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
326+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
327+endif
328+ifdef CONFIG_KALLOCSTAT_PLUGIN
329+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
330+endif
331+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
332+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
333+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
334+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
335+endif
336+ifdef CONFIG_CHECKER_PLUGIN
337+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
338+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
339+endif
340+endif
341+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
342+ifdef CONFIG_PAX_SIZE_OVERFLOW
343+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
344+endif
345+ifdef CONFIG_PAX_LATENT_ENTROPY
346+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
347+endif
348+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
349+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
350+endif
351+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
352+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
353+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
354+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
355+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
356+ifeq ($(KBUILD_EXTMOD),)
357+gcc-plugins:
358+ $(Q)$(MAKE) $(build)=tools/gcc
359+else
360+gcc-plugins: ;
361+endif
362+else
363+gcc-plugins:
364+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
365+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
366+else
367+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
368+endif
369+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
370+endif
371+endif
372+
373 include $(srctree)/arch/$(SRCARCH)/Makefile
374
375 ifdef CONFIG_READABLE_ASM
376@@ -733,7 +793,7 @@ export mod_sign_cmd
377
378
379 ifeq ($(KBUILD_EXTMOD),)
380-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
381+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
382
383 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
384 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
385@@ -782,6 +842,8 @@ endif
386
387 # The actual objects are generated when descending,
388 # make sure no implicit rule kicks in
389+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
390+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
391 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
392
393 # Handle descending into subdirectories listed in $(vmlinux-dirs)
394@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
395 # Error messages still appears in the original language
396
397 PHONY += $(vmlinux-dirs)
398-$(vmlinux-dirs): prepare scripts
399+$(vmlinux-dirs): gcc-plugins prepare scripts
400 $(Q)$(MAKE) $(build)=$@
401
402 # Store (new) KERNELRELASE string in include/config/kernel.release
403@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
404 $(Q)$(MAKE) $(build)=.
405
406 # All the preparing..
407+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
408 prepare: prepare0
409
410 # Generate some files
411@@ -942,6 +1005,8 @@ all: modules
412 # using awk while concatenating to the final file.
413
414 PHONY += modules
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
418 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
419 @$(kecho) ' Building modules, stage 2.';
420@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
421
422 # Target to prepare building external modules
423 PHONY += modules_prepare
424-modules_prepare: prepare scripts
425+modules_prepare: gcc-plugins prepare scripts
426
427 # Target to install modules
428 PHONY += modules_install
429@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
430 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
431 signing_key.priv signing_key.x509 x509.genkey \
432 extra_certificates signing_key.x509.keyid \
433- signing_key.x509.signer
434+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
435
436 # clean - Delete most, but leave enough to build external modules
437 #
438@@ -1063,6 +1128,7 @@ distclean: mrproper
439 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
440 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
441 -o -name '.*.rej' \
442+ -o -name '.*.rej' -o -name '*.so' \
443 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
444 -type f -print | xargs rm -f
445
446@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
449
450+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
451+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
452 modules: $(module-dirs)
453 @$(kecho) ' Building modules, stage 2.';
454 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
455@@ -1359,17 +1427,21 @@ else
456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
457 endif
458
459-%.s: %.c prepare scripts FORCE
460+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
461+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
462+%.s: %.c gcc-plugins prepare scripts FORCE
463 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
464 %.i: %.c prepare scripts FORCE
465 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
466-%.o: %.c prepare scripts FORCE
467+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
468+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
469+%.o: %.c gcc-plugins prepare scripts FORCE
470 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
471 %.lst: %.c prepare scripts FORCE
472 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
473-%.s: %.S prepare scripts FORCE
474+%.s: %.S gcc-plugins prepare scripts FORCE
475 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
476-%.o: %.S prepare scripts FORCE
477+%.o: %.S gcc-plugins prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 %.symtypes: %.c prepare scripts FORCE
480 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
481@@ -1379,11 +1451,15 @@ endif
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir)
485-%/: prepare scripts FORCE
486+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
487+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
488+%/: gcc-plugins prepare scripts FORCE
489 $(cmd_crmodverdir)
490 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
491 $(build)=$(build-dir)
492-%.ko: prepare scripts FORCE
493+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495+%.ko: gcc-plugins prepare scripts FORCE
496 $(cmd_crmodverdir)
497 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
498 $(build)=$(build-dir) $(@:.ko=.o)
499diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
500index c2cbe4f..f7264b4 100644
501--- a/arch/alpha/include/asm/atomic.h
502+++ b/arch/alpha/include/asm/atomic.h
503@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
504 #define atomic_dec(v) atomic_sub(1,(v))
505 #define atomic64_dec(v) atomic64_sub(1,(v))
506
507+#define atomic64_read_unchecked(v) atomic64_read(v)
508+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
509+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
510+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
511+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
512+#define atomic64_inc_unchecked(v) atomic64_inc(v)
513+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
514+#define atomic64_dec_unchecked(v) atomic64_dec(v)
515+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
516+
517 #define smp_mb__before_atomic_dec() smp_mb()
518 #define smp_mb__after_atomic_dec() smp_mb()
519 #define smp_mb__before_atomic_inc() smp_mb()
520diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
521index ad368a9..fbe0f25 100644
522--- a/arch/alpha/include/asm/cache.h
523+++ b/arch/alpha/include/asm/cache.h
524@@ -4,19 +4,19 @@
525 #ifndef __ARCH_ALPHA_CACHE_H
526 #define __ARCH_ALPHA_CACHE_H
527
528+#include <linux/const.h>
529
530 /* Bytes per L1 (data) cache line. */
531 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
532-# define L1_CACHE_BYTES 64
533 # define L1_CACHE_SHIFT 6
534 #else
535 /* Both EV4 and EV5 are write-through, read-allocate,
536 direct-mapped, physical.
537 */
538-# define L1_CACHE_BYTES 32
539 # define L1_CACHE_SHIFT 5
540 #endif
541
542+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
543 #define SMP_CACHE_BYTES L1_CACHE_BYTES
544
545 #endif
546diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
547index 968d999..d36b2df 100644
548--- a/arch/alpha/include/asm/elf.h
549+++ b/arch/alpha/include/asm/elf.h
550@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
551
552 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
553
554+#ifdef CONFIG_PAX_ASLR
555+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
556+
557+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
558+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
559+#endif
560+
561 /* $0 is set by ld.so to a pointer to a function which might be
562 registered using atexit. This provides a mean for the dynamic
563 linker to call DT_FINI functions for shared libraries that have
564diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
565index bc2a0da..8ad11ee 100644
566--- a/arch/alpha/include/asm/pgalloc.h
567+++ b/arch/alpha/include/asm/pgalloc.h
568@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
569 pgd_set(pgd, pmd);
570 }
571
572+static inline void
573+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
574+{
575+ pgd_populate(mm, pgd, pmd);
576+}
577+
578 extern pgd_t *pgd_alloc(struct mm_struct *mm);
579
580 static inline void
581diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
582index 81a4342..348b927 100644
583--- a/arch/alpha/include/asm/pgtable.h
584+++ b/arch/alpha/include/asm/pgtable.h
585@@ -102,6 +102,17 @@ struct vm_area_struct;
586 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
587 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
588 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
589+
590+#ifdef CONFIG_PAX_PAGEEXEC
591+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
592+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
593+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
594+#else
595+# define PAGE_SHARED_NOEXEC PAGE_SHARED
596+# define PAGE_COPY_NOEXEC PAGE_COPY
597+# define PAGE_READONLY_NOEXEC PAGE_READONLY
598+#endif
599+
600 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
601
602 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
603diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
604index 2fd00b7..cfd5069 100644
605--- a/arch/alpha/kernel/module.c
606+++ b/arch/alpha/kernel/module.c
607@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
608
609 /* The small sections were sorted to the end of the segment.
610 The following should definitely cover them. */
611- gp = (u64)me->module_core + me->core_size - 0x8000;
612+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
613 got = sechdrs[me->arch.gotsecindex].sh_addr;
614
615 for (i = 0; i < n; i++) {
616diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
617index b9e37ad..44c24e7 100644
618--- a/arch/alpha/kernel/osf_sys.c
619+++ b/arch/alpha/kernel/osf_sys.c
620@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
621 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
622
623 static unsigned long
624-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
625- unsigned long limit)
626+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
627+ unsigned long limit, unsigned long flags)
628 {
629 struct vm_unmapped_area_info info;
630+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
631
632 info.flags = 0;
633 info.length = len;
634@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
635 info.high_limit = limit;
636 info.align_mask = 0;
637 info.align_offset = 0;
638+ info.threadstack_offset = offset;
639 return vm_unmapped_area(&info);
640 }
641
642@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
643 merely specific addresses, but regions of memory -- perhaps
644 this feature should be incorporated into all ports? */
645
646+#ifdef CONFIG_PAX_RANDMMAP
647+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
648+#endif
649+
650 if (addr) {
651- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
652+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
653 if (addr != (unsigned long) -ENOMEM)
654 return addr;
655 }
656
657 /* Next, try allocating at TASK_UNMAPPED_BASE. */
658- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
659- len, limit);
660+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
661+
662 if (addr != (unsigned long) -ENOMEM)
663 return addr;
664
665 /* Finally, try allocating in low memory. */
666- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
667+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
668
669 return addr;
670 }
671diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
672index 0c4132d..88f0d53 100644
673--- a/arch/alpha/mm/fault.c
674+++ b/arch/alpha/mm/fault.c
675@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
676 __reload_thread(pcb);
677 }
678
679+#ifdef CONFIG_PAX_PAGEEXEC
680+/*
681+ * PaX: decide what to do with offenders (regs->pc = fault address)
682+ *
683+ * returns 1 when task should be killed
684+ * 2 when patched PLT trampoline was detected
685+ * 3 when unpatched PLT trampoline was detected
686+ */
687+static int pax_handle_fetch_fault(struct pt_regs *regs)
688+{
689+
690+#ifdef CONFIG_PAX_EMUPLT
691+ int err;
692+
693+ do { /* PaX: patched PLT emulation #1 */
694+ unsigned int ldah, ldq, jmp;
695+
696+ err = get_user(ldah, (unsigned int *)regs->pc);
697+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
698+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
699+
700+ if (err)
701+ break;
702+
703+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
704+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
705+ jmp == 0x6BFB0000U)
706+ {
707+ unsigned long r27, addr;
708+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
709+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
710+
711+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
712+ err = get_user(r27, (unsigned long *)addr);
713+ if (err)
714+ break;
715+
716+ regs->r27 = r27;
717+ regs->pc = r27;
718+ return 2;
719+ }
720+ } while (0);
721+
722+ do { /* PaX: patched PLT emulation #2 */
723+ unsigned int ldah, lda, br;
724+
725+ err = get_user(ldah, (unsigned int *)regs->pc);
726+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
727+ err |= get_user(br, (unsigned int *)(regs->pc+8));
728+
729+ if (err)
730+ break;
731+
732+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
733+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
734+ (br & 0xFFE00000U) == 0xC3E00000U)
735+ {
736+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
737+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
738+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
739+
740+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
741+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742+ return 2;
743+ }
744+ } while (0);
745+
746+ do { /* PaX: unpatched PLT emulation */
747+ unsigned int br;
748+
749+ err = get_user(br, (unsigned int *)regs->pc);
750+
751+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
752+ unsigned int br2, ldq, nop, jmp;
753+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
754+
755+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
756+ err = get_user(br2, (unsigned int *)addr);
757+ err |= get_user(ldq, (unsigned int *)(addr+4));
758+ err |= get_user(nop, (unsigned int *)(addr+8));
759+ err |= get_user(jmp, (unsigned int *)(addr+12));
760+ err |= get_user(resolver, (unsigned long *)(addr+16));
761+
762+ if (err)
763+ break;
764+
765+ if (br2 == 0xC3600000U &&
766+ ldq == 0xA77B000CU &&
767+ nop == 0x47FF041FU &&
768+ jmp == 0x6B7B0000U)
769+ {
770+ regs->r28 = regs->pc+4;
771+ regs->r27 = addr+16;
772+ regs->pc = resolver;
773+ return 3;
774+ }
775+ }
776+ } while (0);
777+#endif
778+
779+ return 1;
780+}
781+
782+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
783+{
784+ unsigned long i;
785+
786+ printk(KERN_ERR "PAX: bytes at PC: ");
787+ for (i = 0; i < 5; i++) {
788+ unsigned int c;
789+ if (get_user(c, (unsigned int *)pc+i))
790+ printk(KERN_CONT "???????? ");
791+ else
792+ printk(KERN_CONT "%08x ", c);
793+ }
794+ printk("\n");
795+}
796+#endif
797
798 /*
799 * This routine handles page faults. It determines the address,
800@@ -133,8 +251,29 @@ retry:
801 good_area:
802 si_code = SEGV_ACCERR;
803 if (cause < 0) {
804- if (!(vma->vm_flags & VM_EXEC))
805+ if (!(vma->vm_flags & VM_EXEC)) {
806+
807+#ifdef CONFIG_PAX_PAGEEXEC
808+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
809+ goto bad_area;
810+
811+ up_read(&mm->mmap_sem);
812+ switch (pax_handle_fetch_fault(regs)) {
813+
814+#ifdef CONFIG_PAX_EMUPLT
815+ case 2:
816+ case 3:
817+ return;
818+#endif
819+
820+ }
821+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
822+ do_group_exit(SIGKILL);
823+#else
824 goto bad_area;
825+#endif
826+
827+ }
828 } else if (!cause) {
829 /* Allow reads even for write-only mappings */
830 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
831diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
832index 18a9f5e..ca910b7 100644
833--- a/arch/arm/Kconfig
834+++ b/arch/arm/Kconfig
835@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
836
837 config UACCESS_WITH_MEMCPY
838 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
839- depends on MMU
840+ depends on MMU && !PAX_MEMORY_UDEREF
841 default y if CPU_FEROCEON
842 help
843 Implement faster copy_to_user and clear_user methods for CPU
844diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
845index da1c77d..2ee6056 100644
846--- a/arch/arm/include/asm/atomic.h
847+++ b/arch/arm/include/asm/atomic.h
848@@ -17,17 +17,35 @@
849 #include <asm/barrier.h>
850 #include <asm/cmpxchg.h>
851
852+#ifdef CONFIG_GENERIC_ATOMIC64
853+#include <asm-generic/atomic64.h>
854+#endif
855+
856 #define ATOMIC_INIT(i) { (i) }
857
858 #ifdef __KERNEL__
859
860+#define _ASM_EXTABLE(from, to) \
861+" .pushsection __ex_table,\"a\"\n"\
862+" .align 3\n" \
863+" .long " #from ", " #to"\n" \
864+" .popsection"
865+
866 /*
867 * On ARM, ordinary assignment (str instruction) doesn't clear the local
868 * strex/ldrex monitor on some implementations. The reason we can use it for
869 * atomic_set() is the clrex or dummy strex done on every exception return.
870 */
871 #define atomic_read(v) (*(volatile int *)&(v)->counter)
872+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
873+{
874+ return v->counter;
875+}
876 #define atomic_set(v,i) (((v)->counter) = (i))
877+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
878+{
879+ v->counter = i;
880+}
881
882 #if __LINUX_ARM_ARCH__ >= 6
883
884@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
885 int result;
886
887 __asm__ __volatile__("@ atomic_add\n"
888+"1: ldrex %1, [%3]\n"
889+" adds %0, %1, %4\n"
890+
891+#ifdef CONFIG_PAX_REFCOUNT
892+" bvc 3f\n"
893+"2: bkpt 0xf103\n"
894+"3:\n"
895+#endif
896+
897+" strex %1, %0, [%3]\n"
898+" teq %1, #0\n"
899+" bne 1b"
900+
901+#ifdef CONFIG_PAX_REFCOUNT
902+"\n4:\n"
903+ _ASM_EXTABLE(2b, 4b)
904+#endif
905+
906+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
907+ : "r" (&v->counter), "Ir" (i)
908+ : "cc");
909+}
910+
911+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
912+{
913+ unsigned long tmp;
914+ int result;
915+
916+ __asm__ __volatile__("@ atomic_add_unchecked\n"
917 "1: ldrex %0, [%3]\n"
918 " add %0, %0, %4\n"
919 " strex %1, %0, [%3]\n"
920@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
921 smp_mb();
922
923 __asm__ __volatile__("@ atomic_add_return\n"
924+"1: ldrex %1, [%3]\n"
925+" adds %0, %1, %4\n"
926+
927+#ifdef CONFIG_PAX_REFCOUNT
928+" bvc 3f\n"
929+" mov %0, %1\n"
930+"2: bkpt 0xf103\n"
931+"3:\n"
932+#endif
933+
934+" strex %1, %0, [%3]\n"
935+" teq %1, #0\n"
936+" bne 1b"
937+
938+#ifdef CONFIG_PAX_REFCOUNT
939+"\n4:\n"
940+ _ASM_EXTABLE(2b, 4b)
941+#endif
942+
943+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
944+ : "r" (&v->counter), "Ir" (i)
945+ : "cc");
946+
947+ smp_mb();
948+
949+ return result;
950+}
951+
952+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
953+{
954+ unsigned long tmp;
955+ int result;
956+
957+ smp_mb();
958+
959+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
960 "1: ldrex %0, [%3]\n"
961 " add %0, %0, %4\n"
962 " strex %1, %0, [%3]\n"
963@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
964 int result;
965
966 __asm__ __volatile__("@ atomic_sub\n"
967+"1: ldrex %1, [%3]\n"
968+" subs %0, %1, %4\n"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+" bvc 3f\n"
972+"2: bkpt 0xf103\n"
973+"3:\n"
974+#endif
975+
976+" strex %1, %0, [%3]\n"
977+" teq %1, #0\n"
978+" bne 1b"
979+
980+#ifdef CONFIG_PAX_REFCOUNT
981+"\n4:\n"
982+ _ASM_EXTABLE(2b, 4b)
983+#endif
984+
985+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
986+ : "r" (&v->counter), "Ir" (i)
987+ : "cc");
988+}
989+
990+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
991+{
992+ unsigned long tmp;
993+ int result;
994+
995+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
996 "1: ldrex %0, [%3]\n"
997 " sub %0, %0, %4\n"
998 " strex %1, %0, [%3]\n"
999@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1000 smp_mb();
1001
1002 __asm__ __volatile__("@ atomic_sub_return\n"
1003-"1: ldrex %0, [%3]\n"
1004-" sub %0, %0, %4\n"
1005+"1: ldrex %1, [%3]\n"
1006+" subs %0, %1, %4\n"
1007+
1008+#ifdef CONFIG_PAX_REFCOUNT
1009+" bvc 3f\n"
1010+" mov %0, %1\n"
1011+"2: bkpt 0xf103\n"
1012+"3:\n"
1013+#endif
1014+
1015 " strex %1, %0, [%3]\n"
1016 " teq %1, #0\n"
1017 " bne 1b"
1018+
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+"\n4:\n"
1021+ _ASM_EXTABLE(2b, 4b)
1022+#endif
1023+
1024 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1025 : "r" (&v->counter), "Ir" (i)
1026 : "cc");
1027@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1028 return oldval;
1029 }
1030
1031+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1032+{
1033+ unsigned long oldval, res;
1034+
1035+ smp_mb();
1036+
1037+ do {
1038+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1039+ "ldrex %1, [%3]\n"
1040+ "mov %0, #0\n"
1041+ "teq %1, %4\n"
1042+ "strexeq %0, %5, [%3]\n"
1043+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1044+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1045+ : "cc");
1046+ } while (res);
1047+
1048+ smp_mb();
1049+
1050+ return oldval;
1051+}
1052+
1053 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 {
1055 unsigned long tmp, tmp2;
1056@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1057
1058 return val;
1059 }
1060+
1061+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1062+{
1063+ return atomic_add_return(i, v);
1064+}
1065+
1066 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1067+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1068+{
1069+ (void) atomic_add_return(i, v);
1070+}
1071
1072 static inline int atomic_sub_return(int i, atomic_t *v)
1073 {
1074@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1075 return val;
1076 }
1077 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1078+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1079+{
1080+ (void) atomic_sub_return(i, v);
1081+}
1082
1083 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1084 {
1085@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1086 return ret;
1087 }
1088
1089+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1090+{
1091+ return atomic_cmpxchg(v, old, new);
1092+}
1093+
1094 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1095 {
1096 unsigned long flags;
1097@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1098 #endif /* __LINUX_ARM_ARCH__ */
1099
1100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1101+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1102+{
1103+ return xchg(&v->counter, new);
1104+}
1105
1106 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1107 {
1108@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1109 }
1110
1111 #define atomic_inc(v) atomic_add(1, v)
1112+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1113+{
1114+ atomic_add_unchecked(1, v);
1115+}
1116 #define atomic_dec(v) atomic_sub(1, v)
1117+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1118+{
1119+ atomic_sub_unchecked(1, v);
1120+}
1121
1122 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1123+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1124+{
1125+ return atomic_add_return_unchecked(1, v) == 0;
1126+}
1127 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1128 #define atomic_inc_return(v) (atomic_add_return(1, v))
1129+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1130+{
1131+ return atomic_add_return_unchecked(1, v);
1132+}
1133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1134 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1135
1136@@ -241,6 +428,14 @@ typedef struct {
1137 u64 __aligned(8) counter;
1138 } atomic64_t;
1139
1140+#ifdef CONFIG_PAX_REFCOUNT
1141+typedef struct {
1142+ u64 __aligned(8) counter;
1143+} atomic64_unchecked_t;
1144+#else
1145+typedef atomic64_t atomic64_unchecked_t;
1146+#endif
1147+
1148 #define ATOMIC64_INIT(i) { (i) }
1149
1150 #ifdef CONFIG_ARM_LPAE
1151@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1152 return result;
1153 }
1154
1155+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1156+{
1157+ u64 result;
1158+
1159+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1160+" ldrd %0, %H0, [%1]"
1161+ : "=&r" (result)
1162+ : "r" (&v->counter), "Qo" (v->counter)
1163+ );
1164+
1165+ return result;
1166+}
1167+
1168 static inline void atomic64_set(atomic64_t *v, u64 i)
1169 {
1170 __asm__ __volatile__("@ atomic64_set\n"
1171@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1172 : "r" (&v->counter), "r" (i)
1173 );
1174 }
1175+
1176+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1177+{
1178+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1179+" strd %2, %H2, [%1]"
1180+ : "=Qo" (v->counter)
1181+ : "r" (&v->counter), "r" (i)
1182+ );
1183+}
1184 #else
1185 static inline u64 atomic64_read(const atomic64_t *v)
1186 {
1187@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1188 return result;
1189 }
1190
1191+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1192+{
1193+ u64 result;
1194+
1195+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1196+" ldrexd %0, %H0, [%1]"
1197+ : "=&r" (result)
1198+ : "r" (&v->counter), "Qo" (v->counter)
1199+ );
1200+
1201+ return result;
1202+}
1203+
1204 static inline void atomic64_set(atomic64_t *v, u64 i)
1205 {
1206 u64 tmp;
1207@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1208 : "r" (&v->counter), "r" (i)
1209 : "cc");
1210 }
1211+
1212+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1213+{
1214+ u64 tmp;
1215+
1216+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1217+"1: ldrexd %0, %H0, [%2]\n"
1218+" strexd %0, %3, %H3, [%2]\n"
1219+" teq %0, #0\n"
1220+" bne 1b"
1221+ : "=&r" (tmp), "=Qo" (v->counter)
1222+ : "r" (&v->counter), "r" (i)
1223+ : "cc");
1224+}
1225+
1226 #endif
1227
1228 static inline void atomic64_add(u64 i, atomic64_t *v)
1229@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1230 __asm__ __volatile__("@ atomic64_add\n"
1231 "1: ldrexd %0, %H0, [%3]\n"
1232 " adds %0, %0, %4\n"
1233+" adcs %H0, %H0, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+"2: bkpt 0xf103\n"
1238+"3:\n"
1239+#endif
1240+
1241+" strexd %1, %0, %H0, [%3]\n"
1242+" teq %1, #0\n"
1243+" bne 1b"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+"\n4:\n"
1247+ _ASM_EXTABLE(2b, 4b)
1248+#endif
1249+
1250+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1251+ : "r" (&v->counter), "r" (i)
1252+ : "cc");
1253+}
1254+
1255+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1256+{
1257+ u64 result;
1258+ unsigned long tmp;
1259+
1260+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1261+"1: ldrexd %0, %H0, [%3]\n"
1262+" adds %0, %0, %4\n"
1263 " adc %H0, %H0, %H4\n"
1264 " strexd %1, %0, %H0, [%3]\n"
1265 " teq %1, #0\n"
1266@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1267
1268 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1269 {
1270- u64 result;
1271- unsigned long tmp;
1272+ u64 result, tmp;
1273
1274 smp_mb();
1275
1276 __asm__ __volatile__("@ atomic64_add_return\n"
1277+"1: ldrexd %1, %H1, [%3]\n"
1278+" adds %0, %1, %4\n"
1279+" adcs %H0, %H1, %H4\n"
1280+
1281+#ifdef CONFIG_PAX_REFCOUNT
1282+" bvc 3f\n"
1283+" mov %0, %1\n"
1284+" mov %H0, %H1\n"
1285+"2: bkpt 0xf103\n"
1286+"3:\n"
1287+#endif
1288+
1289+" strexd %1, %0, %H0, [%3]\n"
1290+" teq %1, #0\n"
1291+" bne 1b"
1292+
1293+#ifdef CONFIG_PAX_REFCOUNT
1294+"\n4:\n"
1295+ _ASM_EXTABLE(2b, 4b)
1296+#endif
1297+
1298+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299+ : "r" (&v->counter), "r" (i)
1300+ : "cc");
1301+
1302+ smp_mb();
1303+
1304+ return result;
1305+}
1306+
1307+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1308+{
1309+ u64 result;
1310+ unsigned long tmp;
1311+
1312+ smp_mb();
1313+
1314+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1315 "1: ldrexd %0, %H0, [%3]\n"
1316 " adds %0, %0, %4\n"
1317 " adc %H0, %H0, %H4\n"
1318@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_sub\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " subs %0, %0, %4\n"
1322+" sbcs %H0, %H0, %H4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330+" strexd %1, %0, %H0, [%3]\n"
1331+" teq %1, #0\n"
1332+" bne 1b"
1333+
1334+#ifdef CONFIG_PAX_REFCOUNT
1335+"\n4:\n"
1336+ _ASM_EXTABLE(2b, 4b)
1337+#endif
1338+
1339+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1340+ : "r" (&v->counter), "r" (i)
1341+ : "cc");
1342+}
1343+
1344+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1345+{
1346+ u64 result;
1347+ unsigned long tmp;
1348+
1349+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1350+"1: ldrexd %0, %H0, [%3]\n"
1351+" subs %0, %0, %4\n"
1352 " sbc %H0, %H0, %H4\n"
1353 " strexd %1, %0, %H0, [%3]\n"
1354 " teq %1, #0\n"
1355@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1356
1357 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1358 {
1359- u64 result;
1360- unsigned long tmp;
1361+ u64 result, tmp;
1362
1363 smp_mb();
1364
1365 __asm__ __volatile__("@ atomic64_sub_return\n"
1366-"1: ldrexd %0, %H0, [%3]\n"
1367-" subs %0, %0, %4\n"
1368-" sbc %H0, %H0, %H4\n"
1369+"1: ldrexd %1, %H1, [%3]\n"
1370+" subs %0, %1, %4\n"
1371+" sbcs %H0, %H1, %H4\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+" bvc 3f\n"
1375+" mov %0, %1\n"
1376+" mov %H0, %H1\n"
1377+"2: bkpt 0xf103\n"
1378+"3:\n"
1379+#endif
1380+
1381 " strexd %1, %0, %H0, [%3]\n"
1382 " teq %1, #0\n"
1383 " bne 1b"
1384+
1385+#ifdef CONFIG_PAX_REFCOUNT
1386+"\n4:\n"
1387+ _ASM_EXTABLE(2b, 4b)
1388+#endif
1389+
1390 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1391 : "r" (&v->counter), "r" (i)
1392 : "cc");
1393@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1394 return oldval;
1395 }
1396
1397+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1398+{
1399+ u64 oldval;
1400+ unsigned long res;
1401+
1402+ smp_mb();
1403+
1404+ do {
1405+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1406+ "ldrexd %1, %H1, [%3]\n"
1407+ "mov %0, #0\n"
1408+ "teq %1, %4\n"
1409+ "teqeq %H1, %H4\n"
1410+ "strexdeq %0, %5, %H5, [%3]"
1411+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1412+ : "r" (&ptr->counter), "r" (old), "r" (new)
1413+ : "cc");
1414+ } while (res);
1415+
1416+ smp_mb();
1417+
1418+ return oldval;
1419+}
1420+
1421 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1422 {
1423 u64 result;
1424@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1425
1426 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1427 {
1428- u64 result;
1429- unsigned long tmp;
1430+ u64 result, tmp;
1431
1432 smp_mb();
1433
1434 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1435-"1: ldrexd %0, %H0, [%3]\n"
1436-" subs %0, %0, #1\n"
1437-" sbc %H0, %H0, #0\n"
1438+"1: ldrexd %1, %H1, [%3]\n"
1439+" subs %0, %1, #1\n"
1440+" sbcs %H0, %H1, #0\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: bkpt 0xf103\n"
1447+"3:\n"
1448+#endif
1449+
1450 " teq %H0, #0\n"
1451-" bmi 2f\n"
1452+" bmi 4f\n"
1453 " strexd %1, %0, %H0, [%3]\n"
1454 " teq %1, #0\n"
1455 " bne 1b\n"
1456-"2:"
1457+"4:\n"
1458+
1459+#ifdef CONFIG_PAX_REFCOUNT
1460+ _ASM_EXTABLE(2b, 4b)
1461+#endif
1462+
1463 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1464 : "r" (&v->counter)
1465 : "cc");
1466@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1467 " teq %0, %5\n"
1468 " teqeq %H0, %H5\n"
1469 " moveq %1, #0\n"
1470-" beq 2f\n"
1471+" beq 4f\n"
1472 " adds %0, %0, %6\n"
1473-" adc %H0, %H0, %H6\n"
1474+" adcs %H0, %H0, %H6\n"
1475+
1476+#ifdef CONFIG_PAX_REFCOUNT
1477+" bvc 3f\n"
1478+"2: bkpt 0xf103\n"
1479+"3:\n"
1480+#endif
1481+
1482 " strexd %2, %0, %H0, [%4]\n"
1483 " teq %2, #0\n"
1484 " bne 1b\n"
1485-"2:"
1486+"4:\n"
1487+
1488+#ifdef CONFIG_PAX_REFCOUNT
1489+ _ASM_EXTABLE(2b, 4b)
1490+#endif
1491+
1492 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1493 : "r" (&v->counter), "r" (u), "r" (a)
1494 : "cc");
1495@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1496
1497 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1498 #define atomic64_inc(v) atomic64_add(1LL, (v))
1499+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1501+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1502 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1503 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1504 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1505+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1506 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1507 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1508 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1509diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1510index 75fe66b..ba3dee4 100644
1511--- a/arch/arm/include/asm/cache.h
1512+++ b/arch/arm/include/asm/cache.h
1513@@ -4,8 +4,10 @@
1514 #ifndef __ASMARM_CACHE_H
1515 #define __ASMARM_CACHE_H
1516
1517+#include <linux/const.h>
1518+
1519 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1520-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1521+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1522
1523 /*
1524 * Memory returned by kmalloc() may be used for DMA, so we must make
1525@@ -24,5 +26,6 @@
1526 #endif
1527
1528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1529+#define __read_only __attribute__ ((__section__(".data..read_only")))
1530
1531 #endif
1532diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1533index 17d0ae8..014e350 100644
1534--- a/arch/arm/include/asm/cacheflush.h
1535+++ b/arch/arm/include/asm/cacheflush.h
1536@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1537 void (*dma_unmap_area)(const void *, size_t, int);
1538
1539 void (*dma_flush_range)(const void *, const void *);
1540-};
1541+} __no_const;
1542
1543 /*
1544 * Select the calling method
1545diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1546index 6dcc164..b14d917 100644
1547--- a/arch/arm/include/asm/checksum.h
1548+++ b/arch/arm/include/asm/checksum.h
1549@@ -37,7 +37,19 @@ __wsum
1550 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1551
1552 __wsum
1553-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1554+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1555+
1556+static inline __wsum
1557+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1558+{
1559+ __wsum ret;
1560+ pax_open_userland();
1561+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1562+ pax_close_userland();
1563+ return ret;
1564+}
1565+
1566+
1567
1568 /*
1569 * Fold a partial checksum without adding pseudo headers
1570diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1571index 4f009c1..466c59b 100644
1572--- a/arch/arm/include/asm/cmpxchg.h
1573+++ b/arch/arm/include/asm/cmpxchg.h
1574@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1575
1576 #define xchg(ptr,x) \
1577 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1578+#define xchg_unchecked(ptr,x) \
1579+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1580
1581 #include <asm-generic/cmpxchg-local.h>
1582
1583diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1584index 6ddbe44..b5e38b1 100644
1585--- a/arch/arm/include/asm/domain.h
1586+++ b/arch/arm/include/asm/domain.h
1587@@ -48,18 +48,37 @@
1588 * Domain types
1589 */
1590 #define DOMAIN_NOACCESS 0
1591-#define DOMAIN_CLIENT 1
1592 #ifdef CONFIG_CPU_USE_DOMAINS
1593+#define DOMAIN_USERCLIENT 1
1594+#define DOMAIN_KERNELCLIENT 1
1595 #define DOMAIN_MANAGER 3
1596+#define DOMAIN_VECTORS DOMAIN_USER
1597 #else
1598+
1599+#ifdef CONFIG_PAX_KERNEXEC
1600 #define DOMAIN_MANAGER 1
1601+#define DOMAIN_KERNEXEC 3
1602+#else
1603+#define DOMAIN_MANAGER 1
1604+#endif
1605+
1606+#ifdef CONFIG_PAX_MEMORY_UDEREF
1607+#define DOMAIN_USERCLIENT 0
1608+#define DOMAIN_UDEREF 1
1609+#define DOMAIN_VECTORS DOMAIN_KERNEL
1610+#else
1611+#define DOMAIN_USERCLIENT 1
1612+#define DOMAIN_VECTORS DOMAIN_USER
1613+#endif
1614+#define DOMAIN_KERNELCLIENT 1
1615+
1616 #endif
1617
1618 #define domain_val(dom,type) ((type) << (2*(dom)))
1619
1620 #ifndef __ASSEMBLY__
1621
1622-#ifdef CONFIG_CPU_USE_DOMAINS
1623+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1624 static inline void set_domain(unsigned val)
1625 {
1626 asm volatile(
1627@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1628 isb();
1629 }
1630
1631-#define modify_domain(dom,type) \
1632- do { \
1633- struct thread_info *thread = current_thread_info(); \
1634- unsigned int domain = thread->cpu_domain; \
1635- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1636- thread->cpu_domain = domain | domain_val(dom, type); \
1637- set_domain(thread->cpu_domain); \
1638- } while (0)
1639-
1640+extern void modify_domain(unsigned int dom, unsigned int type);
1641 #else
1642 static inline void set_domain(unsigned val) { }
1643 static inline void modify_domain(unsigned dom, unsigned type) { }
1644diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1645index 56211f2..17e8a25 100644
1646--- a/arch/arm/include/asm/elf.h
1647+++ b/arch/arm/include/asm/elf.h
1648@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1649 the loader. We need to make sure that it is out of the way of the program
1650 that it will "exec", and that there is sufficient room for the brk. */
1651
1652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1654+
1655+#ifdef CONFIG_PAX_ASLR
1656+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1657+
1658+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1659+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1660+#endif
1661
1662 /* When the program starts, a1 contains a pointer to a function to be
1663 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1664@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1665 extern void elf_set_personality(const struct elf32_hdr *);
1666 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1667
1668-struct mm_struct;
1669-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1670-#define arch_randomize_brk arch_randomize_brk
1671-
1672 #ifdef CONFIG_MMU
1673 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1674 struct linux_binprm;
1675diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1676index de53547..52b9a28 100644
1677--- a/arch/arm/include/asm/fncpy.h
1678+++ b/arch/arm/include/asm/fncpy.h
1679@@ -81,7 +81,9 @@
1680 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1681 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1682 \
1683+ pax_open_kernel(); \
1684 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1685+ pax_close_kernel(); \
1686 flush_icache_range((unsigned long)(dest_buf), \
1687 (unsigned long)(dest_buf) + (size)); \
1688 \
1689diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1690index e42cf59..7b94b8f 100644
1691--- a/arch/arm/include/asm/futex.h
1692+++ b/arch/arm/include/asm/futex.h
1693@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1694 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1695 return -EFAULT;
1696
1697+ pax_open_userland();
1698+
1699 smp_mb();
1700 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1701 "1: ldrex %1, [%4]\n"
1702@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1703 : "cc", "memory");
1704 smp_mb();
1705
1706+ pax_close_userland();
1707+
1708 *uval = val;
1709 return ret;
1710 }
1711@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1712 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1713 return -EFAULT;
1714
1715+ pax_open_userland();
1716+
1717 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1718 "1: " TUSER(ldr) " %1, [%4]\n"
1719 " teq %1, %2\n"
1720@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1721 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1722 : "cc", "memory");
1723
1724+ pax_close_userland();
1725+
1726 *uval = val;
1727 return ret;
1728 }
1729@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1730 return -EFAULT;
1731
1732 pagefault_disable(); /* implies preempt_disable() */
1733+ pax_open_userland();
1734
1735 switch (op) {
1736 case FUTEX_OP_SET:
1737@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1738 ret = -ENOSYS;
1739 }
1740
1741+ pax_close_userland();
1742 pagefault_enable(); /* subsumes preempt_enable() */
1743
1744 if (!ret) {
1745diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746index 83eb2f7..ed77159 100644
1747--- a/arch/arm/include/asm/kmap_types.h
1748+++ b/arch/arm/include/asm/kmap_types.h
1749@@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753-#define KM_TYPE_NR 16
1754+#define KM_TYPE_NR 17
1755
1756 #endif
1757diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758index 9e614a1..3302cca 100644
1759--- a/arch/arm/include/asm/mach/dma.h
1760+++ b/arch/arm/include/asm/mach/dma.h
1761@@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765-};
1766+} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771index 2fe141f..192dc01 100644
1772--- a/arch/arm/include/asm/mach/map.h
1773+++ b/arch/arm/include/asm/mach/map.h
1774@@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778-#define MT_MEMORY 9
1779+#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781-#define MT_MEMORY_NONCACHED 11
1782+#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787+#define MT_MEMORY_RW 16
1788+#define MT_MEMORY_RX 17
1789+#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794index 12f71a1..04e063c 100644
1795--- a/arch/arm/include/asm/outercache.h
1796+++ b/arch/arm/include/asm/outercache.h
1797@@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801-};
1802+} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807index cbdc7a2..32f44fe 100644
1808--- a/arch/arm/include/asm/page.h
1809+++ b/arch/arm/include/asm/page.h
1810@@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814-};
1815+} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820index 943504f..c37a730 100644
1821--- a/arch/arm/include/asm/pgalloc.h
1822+++ b/arch/arm/include/asm/pgalloc.h
1823@@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827+#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836+{
1837+ pud_populate(mm, pud, pmd);
1838+}
1839+
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847+#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856+{
1857+#ifdef CONFIG_ARM_LPAE
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#else
1860+ if (addr & SECTION_SIZE)
1861+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862+ else
1863+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864+#endif
1865+ flush_pmd_entry(pmdp);
1866+}
1867+
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881index 5cfba15..f415e1a 100644
1882--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884@@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892+
1893 /*
1894 * - section
1895 */
1896+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900@@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908@@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917index f97ee02..cc9fe9e 100644
1918--- a/arch/arm/include/asm/pgtable-2level.h
1919+++ b/arch/arm/include/asm/pgtable-2level.h
1920@@ -126,6 +126,9 @@
1921 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1922 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1923
1924+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1925+#define L_PTE_PXN (_AT(pteval_t, 0))
1926+
1927 /*
1928 * These are the memory types, defined to be compatible with
1929 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1930diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1931index 18f5cef..25b8f43 100644
1932--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1933+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1934@@ -41,6 +41,7 @@
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942@@ -71,6 +72,7 @@
1943 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1944 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1945 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1946+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1948
1949 /*
1950diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1951index 86b8fe3..e25f975 100644
1952--- a/arch/arm/include/asm/pgtable-3level.h
1953+++ b/arch/arm/include/asm/pgtable-3level.h
1954@@ -74,6 +74,7 @@
1955 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1956 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1958+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1959 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1960 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1961 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1962@@ -82,6 +83,7 @@
1963 /*
1964 * To be used in assembly code with the upper page attributes.
1965 */
1966+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1967 #define L_PTE_XN_HIGH (1 << (54 - 32))
1968 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1969
1970diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1971index 9bcd262..1ff999b 100644
1972--- a/arch/arm/include/asm/pgtable.h
1973+++ b/arch/arm/include/asm/pgtable.h
1974@@ -30,6 +30,9 @@
1975 #include <asm/pgtable-2level.h>
1976 #endif
1977
1978+#define ktla_ktva(addr) (addr)
1979+#define ktva_ktla(addr) (addr)
1980+
1981 /*
1982 * Just any arbitrary offset to the start of the vmalloc VM area: the
1983 * current 8MB value just means that there will be a 8MB "hole" after the
1984@@ -45,6 +48,9 @@
1985 #define LIBRARY_TEXT_START 0x0c000000
1986
1987 #ifndef __ASSEMBLY__
1988+extern pteval_t __supported_pte_mask;
1989+extern pmdval_t __supported_pmd_mask;
1990+
1991 extern void __pte_error(const char *file, int line, pte_t);
1992 extern void __pmd_error(const char *file, int line, pmd_t);
1993 extern void __pgd_error(const char *file, int line, pgd_t);
1994@@ -53,6 +59,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1995 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1996 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1997
1998+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1999+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2000+
2001+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2002+#include <asm/domain.h>
2003+#include <linux/thread_info.h>
2004+#include <linux/preempt.h>
2005+
2006+static inline int test_domain(int domain, int domaintype)
2007+{
2008+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2009+}
2010+#endif
2011+
2012+#ifdef CONFIG_PAX_KERNEXEC
2013+static inline unsigned long pax_open_kernel(void) {
2014+#ifdef CONFIG_ARM_LPAE
2015+ /* TODO */
2016+#else
2017+ preempt_disable();
2018+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2019+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2020+#endif
2021+ return 0;
2022+}
2023+
2024+static inline unsigned long pax_close_kernel(void) {
2025+#ifdef CONFIG_ARM_LPAE
2026+ /* TODO */
2027+#else
2028+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2029+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2030+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2031+ preempt_enable_no_resched();
2032+#endif
2033+ return 0;
2034+}
2035+#else
2036+static inline unsigned long pax_open_kernel(void) { return 0; }
2037+static inline unsigned long pax_close_kernel(void) { return 0; }
2038+#endif
2039+
2040 /*
2041 * This is the lowest virtual address we can permit any user space
2042 * mapping to be mapped at. This is particularly important for
2043@@ -72,8 +120,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2044 /*
2045 * The pgprot_* and protection_map entries will be fixed up in runtime
2046 * to include the cachable and bufferable bits based on memory policy,
2047- * as well as any architecture dependent bits like global/ASID and SMP
2048- * shared mapping bits.
2049+ * as well as any architecture dependent bits like global/ASID, PXN,
2050+ * and SMP shared mapping bits.
2051 */
2052 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2053
2054@@ -257,7 +305,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2055 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2056 {
2057 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2058- L_PTE_NONE | L_PTE_VALID;
2059+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2060 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2061 return pte;
2062 }
2063diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2064index f3628fb..a0672dd 100644
2065--- a/arch/arm/include/asm/proc-fns.h
2066+++ b/arch/arm/include/asm/proc-fns.h
2067@@ -75,7 +75,7 @@ extern struct processor {
2068 unsigned int suspend_size;
2069 void (*do_suspend)(void *);
2070 void (*do_resume)(void *);
2071-} processor;
2072+} __do_const processor;
2073
2074 #ifndef MULTI_CPU
2075 extern void cpu_proc_init(void);
2076diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2077index ce0dbe7..c085b6f 100644
2078--- a/arch/arm/include/asm/psci.h
2079+++ b/arch/arm/include/asm/psci.h
2080@@ -29,7 +29,7 @@ struct psci_operations {
2081 int (*cpu_off)(struct psci_power_state state);
2082 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2083 int (*migrate)(unsigned long cpuid);
2084-};
2085+} __no_const;
2086
2087 extern struct psci_operations psci_ops;
2088
2089diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2090index d3a22be..3a69ad5 100644
2091--- a/arch/arm/include/asm/smp.h
2092+++ b/arch/arm/include/asm/smp.h
2093@@ -107,7 +107,7 @@ struct smp_operations {
2094 int (*cpu_disable)(unsigned int cpu);
2095 #endif
2096 #endif
2097-};
2098+} __no_const;
2099
2100 /*
2101 * set platform specific SMP operations
2102diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2103index f00b569..aa5bb41 100644
2104--- a/arch/arm/include/asm/thread_info.h
2105+++ b/arch/arm/include/asm/thread_info.h
2106@@ -77,9 +77,9 @@ struct thread_info {
2107 .flags = 0, \
2108 .preempt_count = INIT_PREEMPT_COUNT, \
2109 .addr_limit = KERNEL_DS, \
2110- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2111- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2112- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2113+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2114+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2115+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2116 .restart_block = { \
2117 .fn = do_no_restart_syscall, \
2118 }, \
2119@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2120 #define TIF_SYSCALL_AUDIT 9
2121 #define TIF_SYSCALL_TRACEPOINT 10
2122 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2123-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2124+/* within 8 bits of TIF_SYSCALL_TRACE
2125+ * to meet flexible second operand requirements
2126+ */
2127+#define TIF_GRSEC_SETXID 12
2128+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2129 #define TIF_USING_IWMMXT 17
2130 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2131 #define TIF_RESTORE_SIGMASK 20
2132@@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2133 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2134 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2135 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2136+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2137
2138 /* Checks for any syscall work in entry-common.S */
2139 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2140- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2141+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2142
2143 /*
2144 * Change these and you break ASM code in entry-common.S
2145diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2146index 7e1f760..de33b13 100644
2147--- a/arch/arm/include/asm/uaccess.h
2148+++ b/arch/arm/include/asm/uaccess.h
2149@@ -18,6 +18,7 @@
2150 #include <asm/domain.h>
2151 #include <asm/unified.h>
2152 #include <asm/compiler.h>
2153+#include <asm/pgtable.h>
2154
2155 #define VERIFY_READ 0
2156 #define VERIFY_WRITE 1
2157@@ -63,11 +64,38 @@ extern int __put_user_bad(void);
2158 static inline void set_fs(mm_segment_t fs)
2159 {
2160 current_thread_info()->addr_limit = fs;
2161- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2162+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2163 }
2164
2165 #define segment_eq(a,b) ((a) == (b))
2166
2167+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2168+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2169+
2170+static inline void pax_open_userland(void)
2171+{
2172+
2173+#ifdef CONFIG_PAX_MEMORY_UDEREF
2174+ if (segment_eq(get_fs(), USER_DS)) {
2175+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2176+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2177+ }
2178+#endif
2179+
2180+}
2181+
2182+static inline void pax_close_userland(void)
2183+{
2184+
2185+#ifdef CONFIG_PAX_MEMORY_UDEREF
2186+ if (segment_eq(get_fs(), USER_DS)) {
2187+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2188+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2189+ }
2190+#endif
2191+
2192+}
2193+
2194 #define __addr_ok(addr) ({ \
2195 unsigned long flag; \
2196 __asm__("cmp %2, %0; movlo %0, #0" \
2197@@ -143,8 +171,12 @@ extern int __get_user_4(void *);
2198
2199 #define get_user(x,p) \
2200 ({ \
2201+ int __e; \
2202 might_fault(); \
2203- __get_user_check(x,p); \
2204+ pax_open_userland(); \
2205+ __e = __get_user_check(x,p); \
2206+ pax_close_userland(); \
2207+ __e; \
2208 })
2209
2210 extern int __put_user_1(void *, unsigned int);
2211@@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
2212
2213 #define put_user(x,p) \
2214 ({ \
2215+ int __e; \
2216 might_fault(); \
2217- __put_user_check(x,p); \
2218+ pax_open_userland(); \
2219+ __e = __put_user_check(x,p); \
2220+ pax_close_userland(); \
2221+ __e; \
2222 })
2223
2224 #else /* CONFIG_MMU */
2225@@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
2226 #define __get_user(x,ptr) \
2227 ({ \
2228 long __gu_err = 0; \
2229+ pax_open_userland(); \
2230 __get_user_err((x),(ptr),__gu_err); \
2231+ pax_close_userland(); \
2232 __gu_err; \
2233 })
2234
2235 #define __get_user_error(x,ptr,err) \
2236 ({ \
2237+ pax_open_userland(); \
2238 __get_user_err((x),(ptr),err); \
2239+ pax_close_userland(); \
2240 (void) 0; \
2241 })
2242
2243@@ -312,13 +352,17 @@ do { \
2244 #define __put_user(x,ptr) \
2245 ({ \
2246 long __pu_err = 0; \
2247+ pax_open_userland(); \
2248 __put_user_err((x),(ptr),__pu_err); \
2249+ pax_close_userland(); \
2250 __pu_err; \
2251 })
2252
2253 #define __put_user_error(x,ptr,err) \
2254 ({ \
2255+ pax_open_userland(); \
2256 __put_user_err((x),(ptr),err); \
2257+ pax_close_userland(); \
2258 (void) 0; \
2259 })
2260
2261@@ -418,11 +462,44 @@ do { \
2262
2263
2264 #ifdef CONFIG_MMU
2265-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2266-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2267+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2268+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2269+
2270+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2271+{
2272+ unsigned long ret;
2273+
2274+ check_object_size(to, n, false);
2275+ pax_open_userland();
2276+ ret = ___copy_from_user(to, from, n);
2277+ pax_close_userland();
2278+ return ret;
2279+}
2280+
2281+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2282+{
2283+ unsigned long ret;
2284+
2285+ check_object_size(from, n, true);
2286+ pax_open_userland();
2287+ ret = ___copy_to_user(to, from, n);
2288+ pax_close_userland();
2289+ return ret;
2290+}
2291+
2292 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2293-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2294+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2295 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2296+
2297+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2298+{
2299+ unsigned long ret;
2300+ pax_open_userland();
2301+ ret = ___clear_user(addr, n);
2302+ pax_close_userland();
2303+ return ret;
2304+}
2305+
2306 #else
2307 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2308 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2309@@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2310
2311 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2312 {
2313+ if ((long)n < 0)
2314+ return n;
2315+
2316 if (access_ok(VERIFY_READ, from, n))
2317 n = __copy_from_user(to, from, n);
2318 else /* security hole - plug it */
2319@@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2320
2321 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2322 {
2323+ if ((long)n < 0)
2324+ return n;
2325+
2326 if (access_ok(VERIFY_WRITE, to, n))
2327 n = __copy_to_user(to, from, n);
2328 return n;
2329diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2330index 96ee092..37f1844 100644
2331--- a/arch/arm/include/uapi/asm/ptrace.h
2332+++ b/arch/arm/include/uapi/asm/ptrace.h
2333@@ -73,7 +73,7 @@
2334 * ARMv7 groups of PSR bits
2335 */
2336 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2337-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2338+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2339 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2340 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2341
2342diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2343index 60d3b73..e5a0f22 100644
2344--- a/arch/arm/kernel/armksyms.c
2345+++ b/arch/arm/kernel/armksyms.c
2346@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2347
2348 /* networking */
2349 EXPORT_SYMBOL(csum_partial);
2350-EXPORT_SYMBOL(csum_partial_copy_from_user);
2351+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2352 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2353 EXPORT_SYMBOL(__csum_ipv6_magic);
2354
2355@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2356 #ifdef CONFIG_MMU
2357 EXPORT_SYMBOL(copy_page);
2358
2359-EXPORT_SYMBOL(__copy_from_user);
2360-EXPORT_SYMBOL(__copy_to_user);
2361-EXPORT_SYMBOL(__clear_user);
2362+EXPORT_SYMBOL(___copy_from_user);
2363+EXPORT_SYMBOL(___copy_to_user);
2364+EXPORT_SYMBOL(___clear_user);
2365
2366 EXPORT_SYMBOL(__get_user_1);
2367 EXPORT_SYMBOL(__get_user_2);
2368diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2369index d43c7e5..257c050 100644
2370--- a/arch/arm/kernel/entry-armv.S
2371+++ b/arch/arm/kernel/entry-armv.S
2372@@ -47,6 +47,87 @@
2373 9997:
2374 .endm
2375
2376+ .macro pax_enter_kernel
2377+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2378+ @ make aligned space for saved DACR
2379+ sub sp, sp, #8
2380+ @ save regs
2381+ stmdb sp!, {r1, r2}
2382+ @ read DACR from cpu_domain into r1
2383+ mov r2, sp
2384+ @ assume 8K pages, since we have to split the immediate in two
2385+ bic r2, r2, #(0x1fc0)
2386+ bic r2, r2, #(0x3f)
2387+ ldr r1, [r2, #TI_CPU_DOMAIN]
2388+ @ store old DACR on stack
2389+ str r1, [sp, #8]
2390+#ifdef CONFIG_PAX_KERNEXEC
2391+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2392+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2393+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2394+#endif
2395+#ifdef CONFIG_PAX_MEMORY_UDEREF
2396+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2397+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2398+#endif
2399+ @ write r1 to current_thread_info()->cpu_domain
2400+ str r1, [r2, #TI_CPU_DOMAIN]
2401+ @ write r1 to DACR
2402+ mcr p15, 0, r1, c3, c0, 0
2403+ @ instruction sync
2404+ instr_sync
2405+ @ restore regs
2406+ ldmia sp!, {r1, r2}
2407+#endif
2408+ .endm
2409+
2410+ .macro pax_open_userland
2411+#ifdef CONFIG_PAX_MEMORY_UDEREF
2412+ @ save regs
2413+ stmdb sp!, {r0, r1}
2414+ @ read DACR from cpu_domain into r1
2415+ mov r0, sp
2416+ @ assume 8K pages, since we have to split the immediate in two
2417+ bic r0, r0, #(0x1fc0)
2418+ bic r0, r0, #(0x3f)
2419+ ldr r1, [r0, #TI_CPU_DOMAIN]
2420+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2421+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2422+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2423+ @ write r1 to current_thread_info()->cpu_domain
2424+ str r1, [r0, #TI_CPU_DOMAIN]
2425+ @ write r1 to DACR
2426+ mcr p15, 0, r1, c3, c0, 0
2427+ @ instruction sync
2428+ instr_sync
2429+ @ restore regs
2430+ ldmia sp!, {r0, r1}
2431+#endif
2432+ .endm
2433+
2434+ .macro pax_close_userland
2435+#ifdef CONFIG_PAX_MEMORY_UDEREF
2436+ @ save regs
2437+ stmdb sp!, {r0, r1}
2438+ @ read DACR from cpu_domain into r1
2439+ mov r0, sp
2440+ @ assume 8K pages, since we have to split the immediate in two
2441+ bic r0, r0, #(0x1fc0)
2442+ bic r0, r0, #(0x3f)
2443+ ldr r1, [r0, #TI_CPU_DOMAIN]
2444+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2445+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2446+ @ write r1 to current_thread_info()->cpu_domain
2447+ str r1, [r0, #TI_CPU_DOMAIN]
2448+ @ write r1 to DACR
2449+ mcr p15, 0, r1, c3, c0, 0
2450+ @ instruction sync
2451+ instr_sync
2452+ @ restore regs
2453+ ldmia sp!, {r0, r1}
2454+#endif
2455+ .endm
2456+
2457 .macro pabt_helper
2458 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2459 #ifdef MULTI_PABORT
2460@@ -89,11 +170,15 @@
2461 * Invalid mode handlers
2462 */
2463 .macro inv_entry, reason
2464+
2465+ pax_enter_kernel
2466+
2467 sub sp, sp, #S_FRAME_SIZE
2468 ARM( stmib sp, {r1 - lr} )
2469 THUMB( stmia sp, {r0 - r12} )
2470 THUMB( str sp, [sp, #S_SP] )
2471 THUMB( str lr, [sp, #S_LR] )
2472+
2473 mov r1, #\reason
2474 .endm
2475
2476@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2477 .macro svc_entry, stack_hole=0
2478 UNWIND(.fnstart )
2479 UNWIND(.save {r0 - pc} )
2480+
2481+ pax_enter_kernel
2482+
2483 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2484+
2485 #ifdef CONFIG_THUMB2_KERNEL
2486 SPFIX( str r0, [sp] ) @ temporarily saved
2487 SPFIX( mov r0, sp )
2488@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2489 ldmia r0, {r3 - r5}
2490 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2491 mov r6, #-1 @ "" "" "" ""
2492+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2493+ @ offset sp by 8 as done in pax_enter_kernel
2494+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2495+#else
2496 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2497+#endif
2498 SPFIX( addeq r2, r2, #4 )
2499 str r3, [sp, #-4]! @ save the "real" r0 copied
2500 @ from the exception stack
2501@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2502 .macro usr_entry
2503 UNWIND(.fnstart )
2504 UNWIND(.cantunwind ) @ don't unwind the user space
2505+
2506+ pax_enter_kernel_user
2507+
2508 sub sp, sp, #S_FRAME_SIZE
2509 ARM( stmib sp, {r1 - r12} )
2510 THUMB( stmia sp, {r0 - r12} )
2511@@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
2512 .endm
2513
2514 .macro kuser_cmpxchg_check
2515-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2516+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
2517+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2518 #ifndef CONFIG_MMU
2519 #warning "NPTL on non MMU needs fixing"
2520 #else
2521@@ -414,7 +512,9 @@ __und_usr:
2522 tst r3, #PSR_T_BIT @ Thumb mode?
2523 bne __und_usr_thumb
2524 sub r4, r2, #4 @ ARM instr at LR - 4
2525+ pax_open_userland
2526 1: ldrt r0, [r4]
2527+ pax_close_userland
2528 #ifdef CONFIG_CPU_ENDIAN_BE8
2529 rev r0, r0 @ little endian instruction
2530 #endif
2531@@ -449,10 +549,14 @@ __und_usr_thumb:
2532 */
2533 .arch armv6t2
2534 #endif
2535+ pax_open_userland
2536 2: ldrht r5, [r4]
2537+ pax_close_userland
2538 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2539 blo __und_usr_fault_16 @ 16bit undefined instruction
2540+ pax_open_userland
2541 3: ldrht r0, [r2]
2542+ pax_close_userland
2543 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2544 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2545 orr r0, r0, r5, lsl #16
2546@@ -481,7 +585,8 @@ ENDPROC(__und_usr)
2547 */
2548 .pushsection .fixup, "ax"
2549 .align 2
2550-4: mov pc, r9
2551+4: pax_close_userland
2552+ mov pc, r9
2553 .popsection
2554 .pushsection __ex_table,"a"
2555 .long 1b, 4b
2556@@ -690,7 +795,7 @@ ENTRY(__switch_to)
2557 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2558 THUMB( str sp, [ip], #4 )
2559 THUMB( str lr, [ip], #4 )
2560-#ifdef CONFIG_CPU_USE_DOMAINS
2561+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2562 ldr r6, [r2, #TI_CPU_DOMAIN]
2563 #endif
2564 set_tls r3, r4, r5
2565@@ -699,7 +804,7 @@ ENTRY(__switch_to)
2566 ldr r8, =__stack_chk_guard
2567 ldr r7, [r7, #TSK_STACK_CANARY]
2568 #endif
2569-#ifdef CONFIG_CPU_USE_DOMAINS
2570+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2571 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2572 #endif
2573 mov r5, r0
2574diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2575index bc5bc0a..d0998ca 100644
2576--- a/arch/arm/kernel/entry-common.S
2577+++ b/arch/arm/kernel/entry-common.S
2578@@ -10,18 +10,46 @@
2579
2580 #include <asm/unistd.h>
2581 #include <asm/ftrace.h>
2582+#include <asm/domain.h>
2583 #include <asm/unwind.h>
2584
2585+#include "entry-header.S"
2586+
2587 #ifdef CONFIG_NEED_RET_TO_USER
2588 #include <mach/entry-macro.S>
2589 #else
2590 .macro arch_ret_to_user, tmp1, tmp2
2591+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2592+ @ save regs
2593+ stmdb sp!, {r1, r2}
2594+ @ read DACR from cpu_domain into r1
2595+ mov r2, sp
2596+ @ assume 8K pages, since we have to split the immediate in two
2597+ bic r2, r2, #(0x1fc0)
2598+ bic r2, r2, #(0x3f)
2599+ ldr r1, [r2, #TI_CPU_DOMAIN]
2600+#ifdef CONFIG_PAX_KERNEXEC
2601+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2602+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2603+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2604+#endif
2605+#ifdef CONFIG_PAX_MEMORY_UDEREF
2606+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2607+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2608+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2609+#endif
2610+ @ write r1 to current_thread_info()->cpu_domain
2611+ str r1, [r2, #TI_CPU_DOMAIN]
2612+ @ write r1 to DACR
2613+ mcr p15, 0, r1, c3, c0, 0
2614+ @ instruction sync
2615+ instr_sync
2616+ @ restore regs
2617+ ldmia sp!, {r1, r2}
2618+#endif
2619 .endm
2620 #endif
2621
2622-#include "entry-header.S"
2623-
2624-
2625 .align 5
2626 /*
2627 * This is the fast syscall return path. We do as little as
2628@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2629
2630 .align 5
2631 ENTRY(vector_swi)
2632+
2633 sub sp, sp, #S_FRAME_SIZE
2634 stmia sp, {r0 - r12} @ Calling r0 - r12
2635 ARM( add r8, sp, #S_PC )
2636@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2637 ldr scno, [lr, #-4] @ get SWI instruction
2638 #endif
2639
2640+ /*
2641+ * do this here to avoid a performance hit of wrapping the code above
2642+ * that directly dereferences userland to parse the SWI instruction
2643+ */
2644+ pax_enter_kernel_user
2645+
2646 #ifdef CONFIG_ALIGNMENT_TRAP
2647 ldr ip, __cr_alignment
2648 ldr ip, [ip]
2649diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2650index 160f337..db67ee4 100644
2651--- a/arch/arm/kernel/entry-header.S
2652+++ b/arch/arm/kernel/entry-header.S
2653@@ -73,6 +73,60 @@
2654 msr cpsr_c, \rtemp @ switch back to the SVC mode
2655 .endm
2656
2657+ .macro pax_enter_kernel_user
2658+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2659+ @ save regs
2660+ stmdb sp!, {r0, r1}
2661+ @ read DACR from cpu_domain into r1
2662+ mov r0, sp
2663+ @ assume 8K pages, since we have to split the immediate in two
2664+ bic r0, r0, #(0x1fc0)
2665+ bic r0, r0, #(0x3f)
2666+ ldr r1, [r0, #TI_CPU_DOMAIN]
2667+#ifdef CONFIG_PAX_MEMORY_UDEREF
2668+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2669+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2670+#endif
2671+#ifdef CONFIG_PAX_KERNEXEC
2672+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2673+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2674+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2675+#endif
2676+ @ write r1 to current_thread_info()->cpu_domain
2677+ str r1, [r0, #TI_CPU_DOMAIN]
2678+ @ write r1 to DACR
2679+ mcr p15, 0, r1, c3, c0, 0
2680+ @ instruction sync
2681+ instr_sync
2682+ @ restore regs
2683+ ldmia sp!, {r0, r1}
2684+#endif
2685+ .endm
2686+
2687+ .macro pax_exit_kernel
2688+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2689+ @ save regs
2690+ stmdb sp!, {r0, r1}
2691+ @ read old DACR from stack into r1
2692+ ldr r1, [sp, #(8 + S_SP)]
2693+ sub r1, r1, #8
2694+ ldr r1, [r1]
2695+
2696+ @ write r1 to current_thread_info()->cpu_domain
2697+ mov r0, sp
2698+ @ assume 8K pages, since we have to split the immediate in two
2699+ bic r0, r0, #(0x1fc0)
2700+ bic r0, r0, #(0x3f)
2701+ str r1, [r0, #TI_CPU_DOMAIN]
2702+ @ write r1 to DACR
2703+ mcr p15, 0, r1, c3, c0, 0
2704+ @ instruction sync
2705+ instr_sync
2706+ @ restore regs
2707+ ldmia sp!, {r0, r1}
2708+#endif
2709+ .endm
2710+
2711 #ifndef CONFIG_THUMB2_KERNEL
2712 .macro svc_exit, rpsr, irq = 0
2713 .if \irq != 0
2714@@ -92,6 +146,9 @@
2715 blne trace_hardirqs_off
2716 #endif
2717 .endif
2718+
2719+ pax_exit_kernel
2720+
2721 msr spsr_cxsf, \rpsr
2722 #if defined(CONFIG_CPU_V6)
2723 ldr r0, [sp]
2724@@ -155,6 +212,9 @@
2725 blne trace_hardirqs_off
2726 #endif
2727 .endif
2728+
2729+ pax_exit_kernel
2730+
2731 ldr lr, [sp, #S_SP] @ top of the stack
2732 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2733 clrex @ clear the exclusive monitor
2734diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2735index 25442f4..d4948fc 100644
2736--- a/arch/arm/kernel/fiq.c
2737+++ b/arch/arm/kernel/fiq.c
2738@@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
2739
2740 void set_fiq_handler(void *start, unsigned int length)
2741 {
2742-#if defined(CONFIG_CPU_USE_DOMAINS)
2743- void *base = (void *)0xffff0000;
2744-#else
2745 void *base = vectors_page;
2746-#endif
2747 unsigned offset = FIQ_OFFSET;
2748
2749+ pax_open_kernel();
2750 memcpy(base + offset, start, length);
2751+ pax_close_kernel();
2752+
2753+ if (!cache_is_vipt_nonaliasing())
2754+ flush_icache_range(base + offset, offset + length);
2755 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
2756- if (!vectors_high())
2757- flush_icache_range(offset, offset + length);
2758 }
2759
2760 int claim_fiq(struct fiq_handler *f)
2761diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2762index 8bac553..caee108 100644
2763--- a/arch/arm/kernel/head.S
2764+++ b/arch/arm/kernel/head.S
2765@@ -52,7 +52,9 @@
2766 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2767
2768 .macro pgtbl, rd, phys
2769- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2770+ mov \rd, #TEXT_OFFSET
2771+ sub \rd, #PG_DIR_SIZE
2772+ add \rd, \rd, \phys
2773 .endm
2774
2775 /*
2776@@ -434,7 +436,7 @@ __enable_mmu:
2777 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2778 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2779 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2780- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2781+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2782 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2783 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2784 #endif
2785diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2786index 1fd749e..47adb08 100644
2787--- a/arch/arm/kernel/hw_breakpoint.c
2788+++ b/arch/arm/kernel/hw_breakpoint.c
2789@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2790 return NOTIFY_OK;
2791 }
2792
2793-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2794+static struct notifier_block dbg_reset_nb = {
2795 .notifier_call = dbg_reset_notify,
2796 };
2797
2798diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2799index 1e9be5d..03edbc2 100644
2800--- a/arch/arm/kernel/module.c
2801+++ b/arch/arm/kernel/module.c
2802@@ -37,12 +37,37 @@
2803 #endif
2804
2805 #ifdef CONFIG_MMU
2806-void *module_alloc(unsigned long size)
2807+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2808 {
2809+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2810+ return NULL;
2811 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2812- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2813+ GFP_KERNEL, prot, -1,
2814 __builtin_return_address(0));
2815 }
2816+
2817+void *module_alloc(unsigned long size)
2818+{
2819+
2820+#ifdef CONFIG_PAX_KERNEXEC
2821+ return __module_alloc(size, PAGE_KERNEL);
2822+#else
2823+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2824+#endif
2825+
2826+}
2827+
2828+#ifdef CONFIG_PAX_KERNEXEC
2829+void module_free_exec(struct module *mod, void *module_region)
2830+{
2831+ module_free(mod, module_region);
2832+}
2833+
2834+void *module_alloc_exec(unsigned long size)
2835+{
2836+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2837+}
2838+#endif
2839 #endif
2840
2841 int
2842diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2843index 07314af..c46655c 100644
2844--- a/arch/arm/kernel/patch.c
2845+++ b/arch/arm/kernel/patch.c
2846@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2847 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2848 int size;
2849
2850+ pax_open_kernel();
2851 if (thumb2 && __opcode_is_thumb16(insn)) {
2852 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2853 size = sizeof(u16);
2854@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2855 *(u32 *)addr = insn;
2856 size = sizeof(u32);
2857 }
2858+ pax_close_kernel();
2859
2860 flush_icache_range((uintptr_t)(addr),
2861 (uintptr_t)(addr) + size);
2862diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2863index e19edc6..e186ee1 100644
2864--- a/arch/arm/kernel/perf_event.c
2865+++ b/arch/arm/kernel/perf_event.c
2866@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
2867 int mapping;
2868
2869 if (config >= PERF_COUNT_HW_MAX)
2870- return -ENOENT;
2871+ return -EINVAL;
2872
2873 mapping = (*event_map)[config];
2874 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
2875diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2876index 1f2740e..b36e225 100644
2877--- a/arch/arm/kernel/perf_event_cpu.c
2878+++ b/arch/arm/kernel/perf_event_cpu.c
2879@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2880 return NOTIFY_OK;
2881 }
2882
2883-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2884+static struct notifier_block cpu_pmu_hotplug_notifier = {
2885 .notifier_call = cpu_pmu_notify,
2886 };
2887
2888diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2889index 5bc2615..dcd439f 100644
2890--- a/arch/arm/kernel/process.c
2891+++ b/arch/arm/kernel/process.c
2892@@ -223,6 +223,7 @@ void machine_power_off(void)
2893
2894 if (pm_power_off)
2895 pm_power_off();
2896+ BUG();
2897 }
2898
2899 /*
2900@@ -236,7 +237,7 @@ void machine_power_off(void)
2901 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2902 * to use. Implementing such co-ordination would be essentially impossible.
2903 */
2904-void machine_restart(char *cmd)
2905+__noreturn void machine_restart(char *cmd)
2906 {
2907 smp_send_stop();
2908
2909@@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
2910
2911 show_regs_print_info(KERN_DEFAULT);
2912
2913- print_symbol("PC is at %s\n", instruction_pointer(regs));
2914- print_symbol("LR is at %s\n", regs->ARM_lr);
2915+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2916+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2917 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2918 "sp : %08lx ip : %08lx fp : %08lx\n",
2919 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2920@@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
2921 return 0;
2922 }
2923
2924-unsigned long arch_randomize_brk(struct mm_struct *mm)
2925-{
2926- unsigned long range_end = mm->brk + 0x02000000;
2927- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2928-}
2929-
2930 #ifdef CONFIG_MMU
2931 #ifdef CONFIG_KUSER_HELPERS
2932 /*
2933@@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
2934
2935 static int __init gate_vma_init(void)
2936 {
2937- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2938+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2939 return 0;
2940 }
2941 arch_initcall(gate_vma_init);
2942@@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
2943 {
2944 return in_gate_area(NULL, addr);
2945 }
2946-#define is_gate_vma(vma) ((vma) = &gate_vma)
2947+#define is_gate_vma(vma) ((vma) == &gate_vma)
2948 #else
2949 #define is_gate_vma(vma) 0
2950 #endif
2951
2952 const char *arch_vma_name(struct vm_area_struct *vma)
2953 {
2954- return is_gate_vma(vma) ? "[vectors]" :
2955- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2956- "[sigpage]" : NULL;
2957+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2958 }
2959
2960-static struct page *signal_page;
2961-extern struct page *get_signal_page(void);
2962-
2963 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2964 {
2965 struct mm_struct *mm = current->mm;
2966- unsigned long addr;
2967- int ret;
2968-
2969- if (!signal_page)
2970- signal_page = get_signal_page();
2971- if (!signal_page)
2972- return -ENOMEM;
2973
2974 down_write(&mm->mmap_sem);
2975- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2976- if (IS_ERR_VALUE(addr)) {
2977- ret = addr;
2978- goto up_fail;
2979- }
2980-
2981- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2982- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2983- &signal_page);
2984-
2985- if (ret == 0)
2986- mm->context.sigpage = addr;
2987-
2988- up_fail:
2989+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2990 up_write(&mm->mmap_sem);
2991- return ret;
2992+ return 0;
2993 }
2994 #endif
2995diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2996index 3653164..d83e55d 100644
2997--- a/arch/arm/kernel/psci.c
2998+++ b/arch/arm/kernel/psci.c
2999@@ -24,7 +24,7 @@
3000 #include <asm/opcodes-virt.h>
3001 #include <asm/psci.h>
3002
3003-struct psci_operations psci_ops;
3004+struct psci_operations psci_ops __read_only;
3005
3006 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3007
3008diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3009index 03deeff..741ce88 100644
3010--- a/arch/arm/kernel/ptrace.c
3011+++ b/arch/arm/kernel/ptrace.c
3012@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
3013 return current_thread_info()->syscall;
3014 }
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+extern void gr_delayed_cred_worker(void);
3018+#endif
3019+
3020 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3021 {
3022 current_thread_info()->syscall = scno;
3023
3024+#ifdef CONFIG_GRKERNSEC_SETXID
3025+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3026+ gr_delayed_cred_worker();
3027+#endif
3028+
3029 /* Do the secure computing check first; failures should be fast. */
3030 if (secure_computing(scno) == -1)
3031 return -1;
3032diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3033index b4b1d39..efdc9be 100644
3034--- a/arch/arm/kernel/setup.c
3035+++ b/arch/arm/kernel/setup.c
3036@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3037 unsigned int elf_hwcap __read_mostly;
3038 EXPORT_SYMBOL(elf_hwcap);
3039
3040+pteval_t __supported_pte_mask __read_only;
3041+pmdval_t __supported_pmd_mask __read_only;
3042
3043 #ifdef MULTI_CPU
3044-struct processor processor __read_mostly;
3045+struct processor processor;
3046 #endif
3047 #ifdef MULTI_TLB
3048-struct cpu_tlb_fns cpu_tlb __read_mostly;
3049+struct cpu_tlb_fns cpu_tlb __read_only;
3050 #endif
3051 #ifdef MULTI_USER
3052-struct cpu_user_fns cpu_user __read_mostly;
3053+struct cpu_user_fns cpu_user __read_only;
3054 #endif
3055 #ifdef MULTI_CACHE
3056-struct cpu_cache_fns cpu_cache __read_mostly;
3057+struct cpu_cache_fns cpu_cache __read_only;
3058 #endif
3059 #ifdef CONFIG_OUTER_CACHE
3060-struct outer_cache_fns outer_cache __read_mostly;
3061+struct outer_cache_fns outer_cache __read_only;
3062 EXPORT_SYMBOL(outer_cache);
3063 #endif
3064
3065@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3066 asm("mrc p15, 0, %0, c0, c1, 4"
3067 : "=r" (mmfr0));
3068 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3069- (mmfr0 & 0x000000f0) >= 0x00000030)
3070+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3071 cpu_arch = CPU_ARCH_ARMv7;
3072- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3073+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3074+ __supported_pte_mask |= L_PTE_PXN;
3075+ __supported_pmd_mask |= PMD_PXNTABLE;
3076+ }
3077+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3078 (mmfr0 & 0x000000f0) == 0x00000020)
3079 cpu_arch = CPU_ARCH_ARMv6;
3080 else
3081@@ -479,7 +485,7 @@ static void __init setup_processor(void)
3082 __cpu_architecture = __get_cpu_architecture();
3083
3084 #ifdef MULTI_CPU
3085- processor = *list->proc;
3086+ memcpy((void *)&processor, list->proc, sizeof processor);
3087 #endif
3088 #ifdef MULTI_TLB
3089 cpu_tlb = *list->tlb;
3090diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3091index 5a42c12..a2bb7c6 100644
3092--- a/arch/arm/kernel/signal.c
3093+++ b/arch/arm/kernel/signal.c
3094@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3095 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3096 };
3097
3098-static unsigned long signal_return_offset;
3099-
3100 #ifdef CONFIG_CRUNCH
3101 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3102 {
3103@@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3104 * except when the MPU has protected the vectors
3105 * page from PL0
3106 */
3107- retcode = mm->context.sigpage + signal_return_offset +
3108- (idx << 2) + thumb;
3109+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3110 } else
3111 #endif
3112 {
3113@@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3114 } while (thread_flags & _TIF_WORK_MASK);
3115 return 0;
3116 }
3117-
3118-struct page *get_signal_page(void)
3119-{
3120- unsigned long ptr;
3121- unsigned offset;
3122- struct page *page;
3123- void *addr;
3124-
3125- page = alloc_pages(GFP_KERNEL, 0);
3126-
3127- if (!page)
3128- return NULL;
3129-
3130- addr = page_address(page);
3131-
3132- /* Give the signal return code some randomness */
3133- offset = 0x200 + (get_random_int() & 0x7fc);
3134- signal_return_offset = offset;
3135-
3136- /*
3137- * Copy signal return handlers into the vector page, and
3138- * set sigreturn to be a pointer to these.
3139- */
3140- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3141-
3142- ptr = (unsigned long)addr + offset;
3143- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3144-
3145- return page;
3146-}
3147diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3148index 5919eb4..b5d6dfe 100644
3149--- a/arch/arm/kernel/smp.c
3150+++ b/arch/arm/kernel/smp.c
3151@@ -70,7 +70,7 @@ enum ipi_msg_type {
3152
3153 static DECLARE_COMPLETION(cpu_running);
3154
3155-static struct smp_operations smp_ops;
3156+static struct smp_operations smp_ops __read_only;
3157
3158 void __init smp_set_ops(struct smp_operations *ops)
3159 {
3160diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3161index 6b9567e..b8af2d6 100644
3162--- a/arch/arm/kernel/traps.c
3163+++ b/arch/arm/kernel/traps.c
3164@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3165 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3166 {
3167 #ifdef CONFIG_KALLSYMS
3168- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3169+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3170 #else
3171 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3172 #endif
3173@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3174 static int die_owner = -1;
3175 static unsigned int die_nest_count;
3176
3177+extern void gr_handle_kernel_exploit(void);
3178+
3179 static unsigned long oops_begin(void)
3180 {
3181 int cpu;
3182@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3183 panic("Fatal exception in interrupt");
3184 if (panic_on_oops)
3185 panic("Fatal exception");
3186+
3187+ gr_handle_kernel_exploit();
3188+
3189 if (signr)
3190 do_exit(signr);
3191 }
3192@@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3193 * The user helper at 0xffff0fe0 must be used instead.
3194 * (see entry-armv.S for details)
3195 */
3196+ pax_open_kernel();
3197 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3198+ pax_close_kernel();
3199 }
3200 return 0;
3201
3202@@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
3203 kuser_init(vectors_base);
3204
3205 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3206- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3207+
3208+#ifndef CONFIG_PAX_MEMORY_UDEREF
3209+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3210+#endif
3211+
3212 }
3213diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3214index 33f2ea3..0b91824 100644
3215--- a/arch/arm/kernel/vmlinux.lds.S
3216+++ b/arch/arm/kernel/vmlinux.lds.S
3217@@ -8,7 +8,11 @@
3218 #include <asm/thread_info.h>
3219 #include <asm/memory.h>
3220 #include <asm/page.h>
3221-
3222+
3223+#ifdef CONFIG_PAX_KERNEXEC
3224+#include <asm/pgtable.h>
3225+#endif
3226+
3227 #define PROC_INFO \
3228 . = ALIGN(4); \
3229 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3230@@ -94,6 +98,11 @@ SECTIONS
3231 _text = .;
3232 HEAD_TEXT
3233 }
3234+
3235+#ifdef CONFIG_PAX_KERNEXEC
3236+ . = ALIGN(1<<SECTION_SHIFT);
3237+#endif
3238+
3239 .text : { /* Real text segment */
3240 _stext = .; /* Text and read-only data */
3241 __exception_text_start = .;
3242@@ -116,6 +125,8 @@ SECTIONS
3243 ARM_CPU_KEEP(PROC_INFO)
3244 }
3245
3246+ _etext = .; /* End of text section */
3247+
3248 RO_DATA(PAGE_SIZE)
3249
3250 . = ALIGN(4);
3251@@ -146,7 +157,9 @@ SECTIONS
3252
3253 NOTES
3254
3255- _etext = .; /* End of text and rodata section */
3256+#ifdef CONFIG_PAX_KERNEXEC
3257+ . = ALIGN(1<<SECTION_SHIFT);
3258+#endif
3259
3260 #ifndef CONFIG_XIP_KERNEL
3261 . = ALIGN(PAGE_SIZE);
3262@@ -224,6 +237,11 @@ SECTIONS
3263 . = PAGE_OFFSET + TEXT_OFFSET;
3264 #else
3265 __init_end = .;
3266+
3267+#ifdef CONFIG_PAX_KERNEXEC
3268+ . = ALIGN(1<<SECTION_SHIFT);
3269+#endif
3270+
3271 . = ALIGN(THREAD_SIZE);
3272 __data_loc = .;
3273 #endif
3274diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3275index 14a0d98..7771a7d 100644
3276--- a/arch/arm/lib/clear_user.S
3277+++ b/arch/arm/lib/clear_user.S
3278@@ -12,14 +12,14 @@
3279
3280 .text
3281
3282-/* Prototype: int __clear_user(void *addr, size_t sz)
3283+/* Prototype: int ___clear_user(void *addr, size_t sz)
3284 * Purpose : clear some user memory
3285 * Params : addr - user memory address to clear
3286 * : sz - number of bytes to clear
3287 * Returns : number of bytes NOT cleared
3288 */
3289 ENTRY(__clear_user_std)
3290-WEAK(__clear_user)
3291+WEAK(___clear_user)
3292 stmfd sp!, {r1, lr}
3293 mov r2, #0
3294 cmp r1, #4
3295@@ -44,7 +44,7 @@ WEAK(__clear_user)
3296 USER( strnebt r2, [r0])
3297 mov r0, #0
3298 ldmfd sp!, {r1, pc}
3299-ENDPROC(__clear_user)
3300+ENDPROC(___clear_user)
3301 ENDPROC(__clear_user_std)
3302
3303 .pushsection .fixup,"ax"
3304diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3305index 66a477a..bee61d3 100644
3306--- a/arch/arm/lib/copy_from_user.S
3307+++ b/arch/arm/lib/copy_from_user.S
3308@@ -16,7 +16,7 @@
3309 /*
3310 * Prototype:
3311 *
3312- * size_t __copy_from_user(void *to, const void *from, size_t n)
3313+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3314 *
3315 * Purpose:
3316 *
3317@@ -84,11 +84,11 @@
3318
3319 .text
3320
3321-ENTRY(__copy_from_user)
3322+ENTRY(___copy_from_user)
3323
3324 #include "copy_template.S"
3325
3326-ENDPROC(__copy_from_user)
3327+ENDPROC(___copy_from_user)
3328
3329 .pushsection .fixup,"ax"
3330 .align 0
3331diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3332index 6ee2f67..d1cce76 100644
3333--- a/arch/arm/lib/copy_page.S
3334+++ b/arch/arm/lib/copy_page.S
3335@@ -10,6 +10,7 @@
3336 * ASM optimised string functions
3337 */
3338 #include <linux/linkage.h>
3339+#include <linux/const.h>
3340 #include <asm/assembler.h>
3341 #include <asm/asm-offsets.h>
3342 #include <asm/cache.h>
3343diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3344index d066df6..df28194 100644
3345--- a/arch/arm/lib/copy_to_user.S
3346+++ b/arch/arm/lib/copy_to_user.S
3347@@ -16,7 +16,7 @@
3348 /*
3349 * Prototype:
3350 *
3351- * size_t __copy_to_user(void *to, const void *from, size_t n)
3352+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3353 *
3354 * Purpose:
3355 *
3356@@ -88,11 +88,11 @@
3357 .text
3358
3359 ENTRY(__copy_to_user_std)
3360-WEAK(__copy_to_user)
3361+WEAK(___copy_to_user)
3362
3363 #include "copy_template.S"
3364
3365-ENDPROC(__copy_to_user)
3366+ENDPROC(___copy_to_user)
3367 ENDPROC(__copy_to_user_std)
3368
3369 .pushsection .fixup,"ax"
3370diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3371index 7d08b43..f7ca7ea 100644
3372--- a/arch/arm/lib/csumpartialcopyuser.S
3373+++ b/arch/arm/lib/csumpartialcopyuser.S
3374@@ -57,8 +57,8 @@
3375 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3376 */
3377
3378-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3379-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3380+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3381+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3382
3383 #include "csumpartialcopygeneric.S"
3384
3385diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3386index 64dbfa5..84a3fd9 100644
3387--- a/arch/arm/lib/delay.c
3388+++ b/arch/arm/lib/delay.c
3389@@ -28,7 +28,7 @@
3390 /*
3391 * Default to the loop-based delay implementation.
3392 */
3393-struct arm_delay_ops arm_delay_ops = {
3394+struct arm_delay_ops arm_delay_ops __read_only = {
3395 .delay = __loop_delay,
3396 .const_udelay = __loop_const_udelay,
3397 .udelay = __loop_udelay,
3398diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3399index 025f742..8432b08 100644
3400--- a/arch/arm/lib/uaccess_with_memcpy.c
3401+++ b/arch/arm/lib/uaccess_with_memcpy.c
3402@@ -104,7 +104,7 @@ out:
3403 }
3404
3405 unsigned long
3406-__copy_to_user(void __user *to, const void *from, unsigned long n)
3407+___copy_to_user(void __user *to, const void *from, unsigned long n)
3408 {
3409 /*
3410 * This test is stubbed out of the main function above to keep
3411diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3412index f389228..592ef66 100644
3413--- a/arch/arm/mach-kirkwood/common.c
3414+++ b/arch/arm/mach-kirkwood/common.c
3415@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3416 clk_gate_ops.disable(hw);
3417 }
3418
3419-static struct clk_ops clk_gate_fn_ops;
3420+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3421+{
3422+ return clk_gate_ops.is_enabled(hw);
3423+}
3424+
3425+static struct clk_ops clk_gate_fn_ops = {
3426+ .enable = clk_gate_fn_enable,
3427+ .disable = clk_gate_fn_disable,
3428+ .is_enabled = clk_gate_fn_is_enabled,
3429+};
3430
3431 static struct clk __init *clk_register_gate_fn(struct device *dev,
3432 const char *name,
3433@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3434 gate_fn->fn_en = fn_en;
3435 gate_fn->fn_dis = fn_dis;
3436
3437- /* ops is the gate ops, but with our enable/disable functions */
3438- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3439- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3440- clk_gate_fn_ops = clk_gate_ops;
3441- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3442- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3443- }
3444-
3445 clk = clk_register(dev, &gate_fn->gate.hw);
3446
3447 if (IS_ERR(clk))
3448diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3449index f6eeb87..cc90868 100644
3450--- a/arch/arm/mach-omap2/board-n8x0.c
3451+++ b/arch/arm/mach-omap2/board-n8x0.c
3452@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3453 }
3454 #endif
3455
3456-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3457+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3458 .late_init = n8x0_menelaus_late_init,
3459 };
3460
3461diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3462index 6c4da12..d9ca72d 100644
3463--- a/arch/arm/mach-omap2/gpmc.c
3464+++ b/arch/arm/mach-omap2/gpmc.c
3465@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3466 };
3467
3468 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3469-static struct irq_chip gpmc_irq_chip;
3470 static unsigned gpmc_irq_start;
3471
3472 static struct resource gpmc_mem_root;
3473@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3474
3475 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3476
3477+static struct irq_chip gpmc_irq_chip = {
3478+ .name = "gpmc",
3479+ .irq_startup = gpmc_irq_noop_ret,
3480+ .irq_enable = gpmc_irq_enable,
3481+ .irq_disable = gpmc_irq_disable,
3482+ .irq_shutdown = gpmc_irq_noop,
3483+ .irq_ack = gpmc_irq_noop,
3484+ .irq_mask = gpmc_irq_noop,
3485+ .irq_unmask = gpmc_irq_noop,
3486+
3487+};
3488+
3489 static int gpmc_setup_irq(void)
3490 {
3491 int i;
3492@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3493 return gpmc_irq_start;
3494 }
3495
3496- gpmc_irq_chip.name = "gpmc";
3497- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3498- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3499- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3500- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3501- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3502- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3503- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3504-
3505 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3506 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3507
3508diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3509index f8bb3b9..831e7b8 100644
3510--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3511+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3512@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3513 return NOTIFY_OK;
3514 }
3515
3516-static struct notifier_block __refdata irq_hotplug_notifier = {
3517+static struct notifier_block irq_hotplug_notifier = {
3518 .notifier_call = irq_cpu_hotplug_notify,
3519 };
3520
3521diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3522index e6d2307..d057195 100644
3523--- a/arch/arm/mach-omap2/omap_device.c
3524+++ b/arch/arm/mach-omap2/omap_device.c
3525@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3526 struct platform_device __init *omap_device_build(const char *pdev_name,
3527 int pdev_id,
3528 struct omap_hwmod *oh,
3529- void *pdata, int pdata_len)
3530+ const void *pdata, int pdata_len)
3531 {
3532 struct omap_hwmod *ohs[] = { oh };
3533
3534@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3535 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3536 int pdev_id,
3537 struct omap_hwmod **ohs,
3538- int oh_cnt, void *pdata,
3539+ int oh_cnt, const void *pdata,
3540 int pdata_len)
3541 {
3542 int ret = -ENOMEM;
3543diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3544index 044c31d..2ee0861 100644
3545--- a/arch/arm/mach-omap2/omap_device.h
3546+++ b/arch/arm/mach-omap2/omap_device.h
3547@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3548 /* Core code interface */
3549
3550 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3551- struct omap_hwmod *oh, void *pdata,
3552+ struct omap_hwmod *oh, const void *pdata,
3553 int pdata_len);
3554
3555 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3556 struct omap_hwmod **oh, int oh_cnt,
3557- void *pdata, int pdata_len);
3558+ const void *pdata, int pdata_len);
3559
3560 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3561 struct omap_hwmod **ohs, int oh_cnt);
3562diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3563index 7341eff..fd75e34 100644
3564--- a/arch/arm/mach-omap2/omap_hwmod.c
3565+++ b/arch/arm/mach-omap2/omap_hwmod.c
3566@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3567 int (*init_clkdm)(struct omap_hwmod *oh);
3568 void (*update_context_lost)(struct omap_hwmod *oh);
3569 int (*get_context_lost)(struct omap_hwmod *oh);
3570-};
3571+} __no_const;
3572
3573 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3574-static struct omap_hwmod_soc_ops soc_ops;
3575+static struct omap_hwmod_soc_ops soc_ops __read_only;
3576
3577 /* omap_hwmod_list contains all registered struct omap_hwmods */
3578 static LIST_HEAD(omap_hwmod_list);
3579diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3580index d15c7bb..b2d1f0c 100644
3581--- a/arch/arm/mach-omap2/wd_timer.c
3582+++ b/arch/arm/mach-omap2/wd_timer.c
3583@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3584 struct omap_hwmod *oh;
3585 char *oh_name = "wd_timer2";
3586 char *dev_name = "omap_wdt";
3587- struct omap_wd_timer_platform_data pdata;
3588+ static struct omap_wd_timer_platform_data pdata = {
3589+ .read_reset_sources = prm_read_reset_sources
3590+ };
3591
3592 if (!cpu_class_is_omap2() || of_have_populated_dt())
3593 return 0;
3594@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3595 return -EINVAL;
3596 }
3597
3598- pdata.read_reset_sources = prm_read_reset_sources;
3599-
3600 pdev = omap_device_build(dev_name, id, oh, &pdata,
3601 sizeof(struct omap_wd_timer_platform_data));
3602 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3603diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3604index 0cdba8d..297993e 100644
3605--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3606+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3607@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3608 bool entered_lp2 = false;
3609
3610 if (tegra_pending_sgi())
3611- ACCESS_ONCE(abort_flag) = true;
3612+ ACCESS_ONCE_RW(abort_flag) = true;
3613
3614 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3615
3616diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3617index cad3ca86..1d79e0f 100644
3618--- a/arch/arm/mach-ux500/setup.h
3619+++ b/arch/arm/mach-ux500/setup.h
3620@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3621 .type = MT_DEVICE, \
3622 }
3623
3624-#define __MEM_DEV_DESC(x, sz) { \
3625- .virtual = IO_ADDRESS(x), \
3626- .pfn = __phys_to_pfn(x), \
3627- .length = sz, \
3628- .type = MT_MEMORY, \
3629-}
3630-
3631 extern struct smp_operations ux500_smp_ops;
3632 extern void ux500_cpu_die(unsigned int cpu);
3633
3634diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3635index 08c9fe9..191320c 100644
3636--- a/arch/arm/mm/Kconfig
3637+++ b/arch/arm/mm/Kconfig
3638@@ -436,7 +436,7 @@ config CPU_32v5
3639
3640 config CPU_32v6
3641 bool
3642- select CPU_USE_DOMAINS if CPU_V6 && MMU
3643+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3644 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3645
3646 config CPU_32v6K
3647@@ -585,6 +585,7 @@ config CPU_CP15_MPU
3648
3649 config CPU_USE_DOMAINS
3650 bool
3651+ depends on !ARM_LPAE && !PAX_KERNEXEC
3652 help
3653 This option enables or disables the use of domain switching
3654 via the set_fs() function.
3655@@ -780,6 +781,7 @@ config NEED_KUSER_HELPERS
3656 config KUSER_HELPERS
3657 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3658 default y
3659+ depends on !(CPU_V6 || CPU_V6K || CPU_V7)
3660 help
3661 Warning: disabling this option may break user programs.
3662
3663@@ -792,7 +794,7 @@ config KUSER_HELPERS
3664 See Documentation/arm/kernel_user_helpers.txt for details.
3665
3666 However, the fixed address nature of these helpers can be used
3667- by ROP (return orientated programming) authors when creating
3668+ by ROP (Return Oriented Programming) authors when creating
3669 exploits.
3670
3671 If all of the binaries and libraries which run on your platform
3672diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3673index 6f4585b..7b6f52b 100644
3674--- a/arch/arm/mm/alignment.c
3675+++ b/arch/arm/mm/alignment.c
3676@@ -211,10 +211,12 @@ union offset_union {
3677 #define __get16_unaligned_check(ins,val,addr) \
3678 do { \
3679 unsigned int err = 0, v, a = addr; \
3680+ pax_open_userland(); \
3681 __get8_unaligned_check(ins,v,a,err); \
3682 val = v << ((BE) ? 8 : 0); \
3683 __get8_unaligned_check(ins,v,a,err); \
3684 val |= v << ((BE) ? 0 : 8); \
3685+ pax_close_userland(); \
3686 if (err) \
3687 goto fault; \
3688 } while (0)
3689@@ -228,6 +230,7 @@ union offset_union {
3690 #define __get32_unaligned_check(ins,val,addr) \
3691 do { \
3692 unsigned int err = 0, v, a = addr; \
3693+ pax_open_userland(); \
3694 __get8_unaligned_check(ins,v,a,err); \
3695 val = v << ((BE) ? 24 : 0); \
3696 __get8_unaligned_check(ins,v,a,err); \
3697@@ -236,6 +239,7 @@ union offset_union {
3698 val |= v << ((BE) ? 8 : 16); \
3699 __get8_unaligned_check(ins,v,a,err); \
3700 val |= v << ((BE) ? 0 : 24); \
3701+ pax_close_userland(); \
3702 if (err) \
3703 goto fault; \
3704 } while (0)
3705@@ -249,6 +253,7 @@ union offset_union {
3706 #define __put16_unaligned_check(ins,val,addr) \
3707 do { \
3708 unsigned int err = 0, v = val, a = addr; \
3709+ pax_open_userland(); \
3710 __asm__( FIRST_BYTE_16 \
3711 ARM( "1: "ins" %1, [%2], #1\n" ) \
3712 THUMB( "1: "ins" %1, [%2]\n" ) \
3713@@ -268,6 +273,7 @@ union offset_union {
3714 " .popsection\n" \
3715 : "=r" (err), "=&r" (v), "=&r" (a) \
3716 : "0" (err), "1" (v), "2" (a)); \
3717+ pax_close_userland(); \
3718 if (err) \
3719 goto fault; \
3720 } while (0)
3721@@ -281,6 +287,7 @@ union offset_union {
3722 #define __put32_unaligned_check(ins,val,addr) \
3723 do { \
3724 unsigned int err = 0, v = val, a = addr; \
3725+ pax_open_userland(); \
3726 __asm__( FIRST_BYTE_32 \
3727 ARM( "1: "ins" %1, [%2], #1\n" ) \
3728 THUMB( "1: "ins" %1, [%2]\n" ) \
3729@@ -310,6 +317,7 @@ union offset_union {
3730 " .popsection\n" \
3731 : "=r" (err), "=&r" (v), "=&r" (a) \
3732 : "0" (err), "1" (v), "2" (a)); \
3733+ pax_close_userland(); \
3734 if (err) \
3735 goto fault; \
3736 } while (0)
3737diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3738index 5dbf13f..a2d1876 100644
3739--- a/arch/arm/mm/fault.c
3740+++ b/arch/arm/mm/fault.c
3741@@ -25,6 +25,7 @@
3742 #include <asm/system_misc.h>
3743 #include <asm/system_info.h>
3744 #include <asm/tlbflush.h>
3745+#include <asm/sections.h>
3746
3747 #include "fault.h"
3748
3749@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3750 if (fixup_exception(regs))
3751 return;
3752
3753+#ifdef CONFIG_PAX_KERNEXEC
3754+ if ((fsr & FSR_WRITE) &&
3755+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3756+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3757+ {
3758+ if (current->signal->curr_ip)
3759+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3760+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3761+ else
3762+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3763+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3764+ }
3765+#endif
3766+
3767 /*
3768 * No handler, we'll have to terminate things with extreme prejudice.
3769 */
3770@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3771 }
3772 #endif
3773
3774+#ifdef CONFIG_PAX_PAGEEXEC
3775+ if (fsr & FSR_LNX_PF) {
3776+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3777+ do_group_exit(SIGKILL);
3778+ }
3779+#endif
3780+
3781 tsk->thread.address = addr;
3782 tsk->thread.error_code = fsr;
3783 tsk->thread.trap_no = 14;
3784@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3785 }
3786 #endif /* CONFIG_MMU */
3787
3788+#ifdef CONFIG_PAX_PAGEEXEC
3789+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3790+{
3791+ long i;
3792+
3793+ printk(KERN_ERR "PAX: bytes at PC: ");
3794+ for (i = 0; i < 20; i++) {
3795+ unsigned char c;
3796+ if (get_user(c, (__force unsigned char __user *)pc+i))
3797+ printk(KERN_CONT "?? ");
3798+ else
3799+ printk(KERN_CONT "%02x ", c);
3800+ }
3801+ printk("\n");
3802+
3803+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3804+ for (i = -1; i < 20; i++) {
3805+ unsigned long c;
3806+ if (get_user(c, (__force unsigned long __user *)sp+i))
3807+ printk(KERN_CONT "???????? ");
3808+ else
3809+ printk(KERN_CONT "%08lx ", c);
3810+ }
3811+ printk("\n");
3812+}
3813+#endif
3814+
3815 /*
3816 * First Level Translation Fault Handler
3817 *
3818@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3819 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3820 struct siginfo info;
3821
3822+#ifdef CONFIG_PAX_MEMORY_UDEREF
3823+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3824+ if (current->signal->curr_ip)
3825+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3826+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3827+ else
3828+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3829+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3830+ goto die;
3831+ }
3832+#endif
3833+
3834 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3835 return;
3836
3837+die:
3838 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3839 inf->name, fsr, addr);
3840
3841@@ -569,15 +631,68 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3842 ifsr_info[nr].name = name;
3843 }
3844
3845+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3846+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3847+
3848 asmlinkage void __exception
3849 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3850 {
3851 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3852 struct siginfo info;
3853+ unsigned long pc = instruction_pointer(regs);
3854+
3855+ if (user_mode(regs)) {
3856+ unsigned long sigpage = current->mm->context.sigpage;
3857+
3858+ if (sigpage <= pc && pc < sigpage + 7*4) {
3859+ if (pc < sigpage + 3*4)
3860+ sys_sigreturn(regs);
3861+ else
3862+ sys_rt_sigreturn(regs);
3863+ return;
3864+ }
3865+ if (pc == 0xffff0fe0UL) {
3866+ /*
3867+ * PaX: __kuser_get_tls emulation
3868+ */
3869+ regs->ARM_r0 = current_thread_info()->tp_value;
3870+ regs->ARM_pc = regs->ARM_lr;
3871+ return;
3872+ }
3873+ }
3874+
3875+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3876+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3877+ if (current->signal->curr_ip)
3878+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3879+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3880+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
3881+ else
3882+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3883+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3884+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
3885+ goto die;
3886+ }
3887+#endif
3888+
3889+#ifdef CONFIG_PAX_REFCOUNT
3890+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3891+ unsigned int bkpt;
3892+
3893+ if (!probe_kernel_address((unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
3894+ current->thread.error_code = ifsr;
3895+ current->thread.trap_no = 0;
3896+ pax_report_refcount_overflow(regs);
3897+ fixup_exception(regs);
3898+ return;
3899+ }
3900+ }
3901+#endif
3902
3903 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3904 return;
3905
3906+die:
3907 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3908 inf->name, ifsr, addr);
3909
3910diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3911index cf08bdf..772656c 100644
3912--- a/arch/arm/mm/fault.h
3913+++ b/arch/arm/mm/fault.h
3914@@ -3,6 +3,7 @@
3915
3916 /*
3917 * Fault status register encodings. We steal bit 31 for our own purposes.
3918+ * Set when the FSR value is from an instruction fault.
3919 */
3920 #define FSR_LNX_PF (1 << 31)
3921 #define FSR_WRITE (1 << 11)
3922@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3923 }
3924 #endif
3925
3926+/* valid for LPAE and !LPAE */
3927+static inline int is_xn_fault(unsigned int fsr)
3928+{
3929+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3930+}
3931+
3932+static inline int is_domain_fault(unsigned int fsr)
3933+{
3934+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3935+}
3936+
3937 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3938 unsigned long search_exception_table(unsigned long addr);
3939
3940diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3941index 0ecc43f..190b956 100644
3942--- a/arch/arm/mm/init.c
3943+++ b/arch/arm/mm/init.c
3944@@ -30,6 +30,8 @@
3945 #include <asm/setup.h>
3946 #include <asm/tlb.h>
3947 #include <asm/fixmap.h>
3948+#include <asm/system_info.h>
3949+#include <asm/cp15.h>
3950
3951 #include <asm/mach/arch.h>
3952 #include <asm/mach/map.h>
3953@@ -726,7 +728,46 @@ void free_initmem(void)
3954 {
3955 #ifdef CONFIG_HAVE_TCM
3956 extern char __tcm_start, __tcm_end;
3957+#endif
3958
3959+#ifdef CONFIG_PAX_KERNEXEC
3960+ unsigned long addr;
3961+ pgd_t *pgd;
3962+ pud_t *pud;
3963+ pmd_t *pmd;
3964+ int cpu_arch = cpu_architecture();
3965+ unsigned int cr = get_cr();
3966+
3967+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3968+ /* make pages tables, etc before .text NX */
3969+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3970+ pgd = pgd_offset_k(addr);
3971+ pud = pud_offset(pgd, addr);
3972+ pmd = pmd_offset(pud, addr);
3973+ __section_update(pmd, addr, PMD_SECT_XN);
3974+ }
3975+ /* make init NX */
3976+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3977+ pgd = pgd_offset_k(addr);
3978+ pud = pud_offset(pgd, addr);
3979+ pmd = pmd_offset(pud, addr);
3980+ __section_update(pmd, addr, PMD_SECT_XN);
3981+ }
3982+ /* make kernel code/rodata RX */
3983+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3984+ pgd = pgd_offset_k(addr);
3985+ pud = pud_offset(pgd, addr);
3986+ pmd = pmd_offset(pud, addr);
3987+#ifdef CONFIG_ARM_LPAE
3988+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3989+#else
3990+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3991+#endif
3992+ }
3993+ }
3994+#endif
3995+
3996+#ifdef CONFIG_HAVE_TCM
3997 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3998 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
3999 #endif
4000diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4001index 04d9006..c547d85 100644
4002--- a/arch/arm/mm/ioremap.c
4003+++ b/arch/arm/mm/ioremap.c
4004@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4005 unsigned int mtype;
4006
4007 if (cached)
4008- mtype = MT_MEMORY;
4009+ mtype = MT_MEMORY_RX;
4010 else
4011- mtype = MT_MEMORY_NONCACHED;
4012+ mtype = MT_MEMORY_NONCACHED_RX;
4013
4014 return __arm_ioremap_caller(phys_addr, size, mtype,
4015 __builtin_return_address(0));
4016diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4017index 10062ce..8695745 100644
4018--- a/arch/arm/mm/mmap.c
4019+++ b/arch/arm/mm/mmap.c
4020@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4021 struct vm_area_struct *vma;
4022 int do_align = 0;
4023 int aliasing = cache_is_vipt_aliasing();
4024+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4025 struct vm_unmapped_area_info info;
4026
4027 /*
4028@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4029 if (len > TASK_SIZE)
4030 return -ENOMEM;
4031
4032+#ifdef CONFIG_PAX_RANDMMAP
4033+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4034+#endif
4035+
4036 if (addr) {
4037 if (do_align)
4038 addr = COLOUR_ALIGN(addr, pgoff);
4039@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4040 addr = PAGE_ALIGN(addr);
4041
4042 vma = find_vma(mm, addr);
4043- if (TASK_SIZE - len >= addr &&
4044- (!vma || addr + len <= vma->vm_start))
4045+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4046 return addr;
4047 }
4048
4049@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4050 info.high_limit = TASK_SIZE;
4051 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4052 info.align_offset = pgoff << PAGE_SHIFT;
4053+ info.threadstack_offset = offset;
4054 return vm_unmapped_area(&info);
4055 }
4056
4057@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4058 unsigned long addr = addr0;
4059 int do_align = 0;
4060 int aliasing = cache_is_vipt_aliasing();
4061+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4062 struct vm_unmapped_area_info info;
4063
4064 /*
4065@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4066 return addr;
4067 }
4068
4069+#ifdef CONFIG_PAX_RANDMMAP
4070+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4071+#endif
4072+
4073 /* requesting a specific address */
4074 if (addr) {
4075 if (do_align)
4076@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4077 else
4078 addr = PAGE_ALIGN(addr);
4079 vma = find_vma(mm, addr);
4080- if (TASK_SIZE - len >= addr &&
4081- (!vma || addr + len <= vma->vm_start))
4082+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4083 return addr;
4084 }
4085
4086@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4087 info.high_limit = mm->mmap_base;
4088 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4089 info.align_offset = pgoff << PAGE_SHIFT;
4090+ info.threadstack_offset = offset;
4091 addr = vm_unmapped_area(&info);
4092
4093 /*
4094@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4095 {
4096 unsigned long random_factor = 0UL;
4097
4098+#ifdef CONFIG_PAX_RANDMMAP
4099+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4100+#endif
4101+
4102 /* 8 bits of randomness in 20 address space bits */
4103 if ((current->flags & PF_RANDOMIZE) &&
4104 !(current->personality & ADDR_NO_RANDOMIZE))
4105@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4106
4107 if (mmap_is_legacy()) {
4108 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4109+
4110+#ifdef CONFIG_PAX_RANDMMAP
4111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4112+ mm->mmap_base += mm->delta_mmap;
4113+#endif
4114+
4115 mm->get_unmapped_area = arch_get_unmapped_area;
4116 mm->unmap_area = arch_unmap_area;
4117 } else {
4118 mm->mmap_base = mmap_base(random_factor);
4119+
4120+#ifdef CONFIG_PAX_RANDMMAP
4121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4123+#endif
4124+
4125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4126 mm->unmap_area = arch_unmap_area_topdown;
4127 }
4128diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4129index daf336f..4e6392c 100644
4130--- a/arch/arm/mm/mmu.c
4131+++ b/arch/arm/mm/mmu.c
4132@@ -36,6 +36,22 @@
4133 #include "mm.h"
4134 #include "tcm.h"
4135
4136+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4137+void modify_domain(unsigned int dom, unsigned int type)
4138+{
4139+ struct thread_info *thread = current_thread_info();
4140+ unsigned int domain = thread->cpu_domain;
4141+ /*
4142+ * DOMAIN_MANAGER might be defined to some other value,
4143+ * use the arch-defined constant
4144+ */
4145+ domain &= ~domain_val(dom, 3);
4146+ thread->cpu_domain = domain | domain_val(dom, type);
4147+ set_domain(thread->cpu_domain);
4148+}
4149+EXPORT_SYMBOL(modify_domain);
4150+#endif
4151+
4152 /*
4153 * empty_zero_page is a special page that is used for
4154 * zero-initialized data and COW.
4155@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4156
4157 #endif /* ifdef CONFIG_CPU_CP15 / else */
4158
4159-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4160+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4161 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4162
4163-static struct mem_type mem_types[] = {
4164+#ifdef CONFIG_PAX_KERNEXEC
4165+#define L_PTE_KERNEXEC L_PTE_RDONLY
4166+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4167+#else
4168+#define L_PTE_KERNEXEC L_PTE_DIRTY
4169+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4170+#endif
4171+
4172+static struct mem_type mem_types[] __read_only = {
4173 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4174 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4175 L_PTE_SHARED,
4176@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4177 [MT_UNCACHED] = {
4178 .prot_pte = PROT_PTE_DEVICE,
4179 .prot_l1 = PMD_TYPE_TABLE,
4180- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4181+ .prot_sect = PROT_SECT_DEVICE,
4182 .domain = DOMAIN_IO,
4183 },
4184 [MT_CACHECLEAN] = {
4185- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4186+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4187 .domain = DOMAIN_KERNEL,
4188 },
4189 #ifndef CONFIG_ARM_LPAE
4190 [MT_MINICLEAN] = {
4191- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4192+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4193 .domain = DOMAIN_KERNEL,
4194 },
4195 #endif
4196@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4197 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4198 L_PTE_RDONLY,
4199 .prot_l1 = PMD_TYPE_TABLE,
4200- .domain = DOMAIN_USER,
4201+ .domain = DOMAIN_VECTORS,
4202 },
4203 [MT_HIGH_VECTORS] = {
4204 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4205 L_PTE_USER | L_PTE_RDONLY,
4206 .prot_l1 = PMD_TYPE_TABLE,
4207- .domain = DOMAIN_USER,
4208+ .domain = DOMAIN_VECTORS,
4209 },
4210- [MT_MEMORY] = {
4211+ [MT_MEMORY_RWX] = {
4212 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4213 .prot_l1 = PMD_TYPE_TABLE,
4214 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4215 .domain = DOMAIN_KERNEL,
4216 },
4217+ [MT_MEMORY_RW] = {
4218+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4219+ .prot_l1 = PMD_TYPE_TABLE,
4220+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4221+ .domain = DOMAIN_KERNEL,
4222+ },
4223+ [MT_MEMORY_RX] = {
4224+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4225+ .prot_l1 = PMD_TYPE_TABLE,
4226+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4227+ .domain = DOMAIN_KERNEL,
4228+ },
4229 [MT_ROM] = {
4230- .prot_sect = PMD_TYPE_SECT,
4231+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4232 .domain = DOMAIN_KERNEL,
4233 },
4234- [MT_MEMORY_NONCACHED] = {
4235+ [MT_MEMORY_NONCACHED_RW] = {
4236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4237 L_PTE_MT_BUFFERABLE,
4238 .prot_l1 = PMD_TYPE_TABLE,
4239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4240 .domain = DOMAIN_KERNEL,
4241 },
4242+ [MT_MEMORY_NONCACHED_RX] = {
4243+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4244+ L_PTE_MT_BUFFERABLE,
4245+ .prot_l1 = PMD_TYPE_TABLE,
4246+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4247+ .domain = DOMAIN_KERNEL,
4248+ },
4249 [MT_MEMORY_DTCM] = {
4250- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4251- L_PTE_XN,
4252+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4253 .prot_l1 = PMD_TYPE_TABLE,
4254- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4255+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4256 .domain = DOMAIN_KERNEL,
4257 },
4258 [MT_MEMORY_ITCM] = {
4259@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4260 },
4261 [MT_MEMORY_SO] = {
4262 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4263- L_PTE_MT_UNCACHED | L_PTE_XN,
4264+ L_PTE_MT_UNCACHED,
4265 .prot_l1 = PMD_TYPE_TABLE,
4266 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4267- PMD_SECT_UNCACHED | PMD_SECT_XN,
4268+ PMD_SECT_UNCACHED,
4269 .domain = DOMAIN_KERNEL,
4270 },
4271 [MT_MEMORY_DMA_READY] = {
4272@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4273 * to prevent speculative instruction fetches.
4274 */
4275 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4276+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4277 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4278+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4279 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4280+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4281 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4282+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4283+
4284+ /* Mark other regions on ARMv6+ as execute-never */
4285+
4286+#ifdef CONFIG_PAX_KERNEXEC
4287+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4288+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4289+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4290+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4291+#ifndef CONFIG_ARM_LPAE
4292+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4293+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4294+#endif
4295+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4296+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4297+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4298+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4299+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4300+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4301+#endif
4302+
4303+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4304+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4305 }
4306 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4307 /*
4308@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4309 * from SVC mode and no access from userspace.
4310 */
4311 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4312+#ifdef CONFIG_PAX_KERNEXEC
4313+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4314+#endif
4315 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4316 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4317 #endif
4318@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4319 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4320 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4321 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4322- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4323- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4324+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4325+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4326+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4327+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4328+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4329+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4330 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4331- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4332- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4333+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4334+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4335+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4336+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4337 }
4338 }
4339
4340@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4341 if (cpu_arch >= CPU_ARCH_ARMv6) {
4342 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4343 /* Non-cacheable Normal is XCB = 001 */
4344- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4345+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4346+ PMD_SECT_BUFFERED;
4347+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4348 PMD_SECT_BUFFERED;
4349 } else {
4350 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4351- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4352+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4353+ PMD_SECT_TEX(1);
4354+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4355 PMD_SECT_TEX(1);
4356 }
4357 } else {
4358- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4359+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4360+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4361 }
4362
4363 #ifdef CONFIG_ARM_LPAE
4364@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4365 vecs_pgprot |= PTE_EXT_AF;
4366 #endif
4367
4368+ user_pgprot |= __supported_pte_mask;
4369+
4370 for (i = 0; i < 16; i++) {
4371 pteval_t v = pgprot_val(protection_map[i]);
4372 protection_map[i] = __pgprot(v | user_pgprot);
4373@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4374
4375 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4376 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4377- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4378- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4379+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4380+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4381+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4382+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4383+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4384+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4385 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4386- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4387+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4388+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4389 mem_types[MT_ROM].prot_sect |= cp->pmd;
4390
4391 switch (cp->pmd) {
4392@@ -1166,18 +1255,15 @@ void __init arm_mm_memblock_reserve(void)
4393 * called function. This means you can't use any function or debugging
4394 * method which may touch any device, otherwise the kernel _will_ crash.
4395 */
4396+
4397+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4398+
4399 static void __init devicemaps_init(struct machine_desc *mdesc)
4400 {
4401 struct map_desc map;
4402 unsigned long addr;
4403- void *vectors;
4404
4405- /*
4406- * Allocate the vector page early.
4407- */
4408- vectors = early_alloc(PAGE_SIZE * 2);
4409-
4410- early_trap_init(vectors);
4411+ early_trap_init(&vectors);
4412
4413 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4414 pmd_clear(pmd_off_k(addr));
4415@@ -1217,7 +1303,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4416 * location (0xffff0000). If we aren't using high-vectors, also
4417 * create a mapping at the low-vectors virtual address.
4418 */
4419- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4420+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4421 map.virtual = 0xffff0000;
4422 map.length = PAGE_SIZE;
4423 #ifdef CONFIG_KUSER_HELPERS
4424@@ -1287,8 +1373,39 @@ static void __init map_lowmem(void)
4425 map.pfn = __phys_to_pfn(start);
4426 map.virtual = __phys_to_virt(start);
4427 map.length = end - start;
4428- map.type = MT_MEMORY;
4429
4430+#ifdef CONFIG_PAX_KERNEXEC
4431+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4432+ struct map_desc kernel;
4433+ struct map_desc initmap;
4434+
4435+ /* when freeing initmem we will make this RW */
4436+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4437+ initmap.virtual = (unsigned long)__init_begin;
4438+ initmap.length = _sdata - __init_begin;
4439+ initmap.type = MT_MEMORY_RWX;
4440+ create_mapping(&initmap);
4441+
4442+ /* when freeing initmem we will make this RX */
4443+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4444+ kernel.virtual = (unsigned long)_stext;
4445+ kernel.length = __init_begin - _stext;
4446+ kernel.type = MT_MEMORY_RWX;
4447+ create_mapping(&kernel);
4448+
4449+ if (map.virtual < (unsigned long)_stext) {
4450+ map.length = (unsigned long)_stext - map.virtual;
4451+ map.type = MT_MEMORY_RWX;
4452+ create_mapping(&map);
4453+ }
4454+
4455+ map.pfn = __phys_to_pfn(__pa(_sdata));
4456+ map.virtual = (unsigned long)_sdata;
4457+ map.length = end - __pa(_sdata);
4458+ }
4459+#endif
4460+
4461+ map.type = MT_MEMORY_RW;
4462 create_mapping(&map);
4463 }
4464 }
4465diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4466index a5bc92d..0bb4730 100644
4467--- a/arch/arm/plat-omap/sram.c
4468+++ b/arch/arm/plat-omap/sram.c
4469@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4470 * Looks like we need to preserve some bootloader code at the
4471 * beginning of SRAM for jumping to flash for reboot to work...
4472 */
4473+ pax_open_kernel();
4474 memset_io(omap_sram_base + omap_sram_skip, 0,
4475 omap_sram_size - omap_sram_skip);
4476+ pax_close_kernel();
4477 }
4478diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4479index ce6d763..cfea917 100644
4480--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4481+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4482@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4483 int (*started)(unsigned ch);
4484 int (*flush)(unsigned ch);
4485 int (*stop)(unsigned ch);
4486-};
4487+} __no_const;
4488
4489 extern void *samsung_dmadev_get_ops(void);
4490 extern void *s3c_dma_get_ops(void);
4491diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4492index f4726dc..39ed646 100644
4493--- a/arch/arm64/kernel/debug-monitors.c
4494+++ b/arch/arm64/kernel/debug-monitors.c
4495@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4496 return NOTIFY_OK;
4497 }
4498
4499-static struct notifier_block __cpuinitdata os_lock_nb = {
4500+static struct notifier_block os_lock_nb = {
4501 .notifier_call = os_lock_notify,
4502 };
4503
4504diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4505index 5ab825c..96aaec8 100644
4506--- a/arch/arm64/kernel/hw_breakpoint.c
4507+++ b/arch/arm64/kernel/hw_breakpoint.c
4508@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4509 return NOTIFY_OK;
4510 }
4511
4512-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4513+static struct notifier_block hw_breakpoint_reset_nb = {
4514 .notifier_call = hw_breakpoint_reset_notify,
4515 };
4516
4517diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4518index c3a58a1..78fbf54 100644
4519--- a/arch/avr32/include/asm/cache.h
4520+++ b/arch/avr32/include/asm/cache.h
4521@@ -1,8 +1,10 @@
4522 #ifndef __ASM_AVR32_CACHE_H
4523 #define __ASM_AVR32_CACHE_H
4524
4525+#include <linux/const.h>
4526+
4527 #define L1_CACHE_SHIFT 5
4528-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4529+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4530
4531 /*
4532 * Memory returned by kmalloc() may be used for DMA, so we must make
4533diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4534index d232888..87c8df1 100644
4535--- a/arch/avr32/include/asm/elf.h
4536+++ b/arch/avr32/include/asm/elf.h
4537@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4538 the loader. We need to make sure that it is out of the way of the program
4539 that it will "exec", and that there is sufficient room for the brk. */
4540
4541-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4542+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4543
4544+#ifdef CONFIG_PAX_ASLR
4545+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4546+
4547+#define PAX_DELTA_MMAP_LEN 15
4548+#define PAX_DELTA_STACK_LEN 15
4549+#endif
4550
4551 /* This yields a mask that user programs can use to figure out what
4552 instruction set this CPU supports. This could be done in user space,
4553diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4554index 479330b..53717a8 100644
4555--- a/arch/avr32/include/asm/kmap_types.h
4556+++ b/arch/avr32/include/asm/kmap_types.h
4557@@ -2,9 +2,9 @@
4558 #define __ASM_AVR32_KMAP_TYPES_H
4559
4560 #ifdef CONFIG_DEBUG_HIGHMEM
4561-# define KM_TYPE_NR 29
4562+# define KM_TYPE_NR 30
4563 #else
4564-# define KM_TYPE_NR 14
4565+# define KM_TYPE_NR 15
4566 #endif
4567
4568 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4569diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4570index b2f2d2d..d1c85cb 100644
4571--- a/arch/avr32/mm/fault.c
4572+++ b/arch/avr32/mm/fault.c
4573@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4574
4575 int exception_trace = 1;
4576
4577+#ifdef CONFIG_PAX_PAGEEXEC
4578+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4579+{
4580+ unsigned long i;
4581+
4582+ printk(KERN_ERR "PAX: bytes at PC: ");
4583+ for (i = 0; i < 20; i++) {
4584+ unsigned char c;
4585+ if (get_user(c, (unsigned char *)pc+i))
4586+ printk(KERN_CONT "???????? ");
4587+ else
4588+ printk(KERN_CONT "%02x ", c);
4589+ }
4590+ printk("\n");
4591+}
4592+#endif
4593+
4594 /*
4595 * This routine handles page faults. It determines the address and the
4596 * problem, and then passes it off to one of the appropriate routines.
4597@@ -174,6 +191,16 @@ bad_area:
4598 up_read(&mm->mmap_sem);
4599
4600 if (user_mode(regs)) {
4601+
4602+#ifdef CONFIG_PAX_PAGEEXEC
4603+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4604+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4605+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4606+ do_group_exit(SIGKILL);
4607+ }
4608+ }
4609+#endif
4610+
4611 if (exception_trace && printk_ratelimit())
4612 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4613 "sp %08lx ecr %lu\n",
4614diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4615index 568885a..f8008df 100644
4616--- a/arch/blackfin/include/asm/cache.h
4617+++ b/arch/blackfin/include/asm/cache.h
4618@@ -7,6 +7,7 @@
4619 #ifndef __ARCH_BLACKFIN_CACHE_H
4620 #define __ARCH_BLACKFIN_CACHE_H
4621
4622+#include <linux/const.h>
4623 #include <linux/linkage.h> /* for asmlinkage */
4624
4625 /*
4626@@ -14,7 +15,7 @@
4627 * Blackfin loads 32 bytes for cache
4628 */
4629 #define L1_CACHE_SHIFT 5
4630-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4631+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4632 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4633
4634 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4635diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4636index aea2718..3639a60 100644
4637--- a/arch/cris/include/arch-v10/arch/cache.h
4638+++ b/arch/cris/include/arch-v10/arch/cache.h
4639@@ -1,8 +1,9 @@
4640 #ifndef _ASM_ARCH_CACHE_H
4641 #define _ASM_ARCH_CACHE_H
4642
4643+#include <linux/const.h>
4644 /* Etrax 100LX have 32-byte cache-lines. */
4645-#define L1_CACHE_BYTES 32
4646 #define L1_CACHE_SHIFT 5
4647+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4648
4649 #endif /* _ASM_ARCH_CACHE_H */
4650diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4651index 7caf25d..ee65ac5 100644
4652--- a/arch/cris/include/arch-v32/arch/cache.h
4653+++ b/arch/cris/include/arch-v32/arch/cache.h
4654@@ -1,11 +1,12 @@
4655 #ifndef _ASM_CRIS_ARCH_CACHE_H
4656 #define _ASM_CRIS_ARCH_CACHE_H
4657
4658+#include <linux/const.h>
4659 #include <arch/hwregs/dma.h>
4660
4661 /* A cache-line is 32 bytes. */
4662-#define L1_CACHE_BYTES 32
4663 #define L1_CACHE_SHIFT 5
4664+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4665
4666 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4667
4668diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4669index b86329d..6709906 100644
4670--- a/arch/frv/include/asm/atomic.h
4671+++ b/arch/frv/include/asm/atomic.h
4672@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4673 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4674 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4675
4676+#define atomic64_read_unchecked(v) atomic64_read(v)
4677+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4678+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4679+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4680+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4681+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4682+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4683+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4684+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4685+
4686 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4687 {
4688 int c, old;
4689diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4690index 2797163..c2a401d 100644
4691--- a/arch/frv/include/asm/cache.h
4692+++ b/arch/frv/include/asm/cache.h
4693@@ -12,10 +12,11 @@
4694 #ifndef __ASM_CACHE_H
4695 #define __ASM_CACHE_H
4696
4697+#include <linux/const.h>
4698
4699 /* bytes per L1 cache line */
4700 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4701-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4702+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4703
4704 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4705 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4706diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4707index 43901f2..0d8b865 100644
4708--- a/arch/frv/include/asm/kmap_types.h
4709+++ b/arch/frv/include/asm/kmap_types.h
4710@@ -2,6 +2,6 @@
4711 #ifndef _ASM_KMAP_TYPES_H
4712 #define _ASM_KMAP_TYPES_H
4713
4714-#define KM_TYPE_NR 17
4715+#define KM_TYPE_NR 18
4716
4717 #endif
4718diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4719index 836f147..4cf23f5 100644
4720--- a/arch/frv/mm/elf-fdpic.c
4721+++ b/arch/frv/mm/elf-fdpic.c
4722@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4723 {
4724 struct vm_area_struct *vma;
4725 struct vm_unmapped_area_info info;
4726+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4727
4728 if (len > TASK_SIZE)
4729 return -ENOMEM;
4730@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4731 if (addr) {
4732 addr = PAGE_ALIGN(addr);
4733 vma = find_vma(current->mm, addr);
4734- if (TASK_SIZE - len >= addr &&
4735- (!vma || addr + len <= vma->vm_start))
4736+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4737 goto success;
4738 }
4739
4740@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4741 info.high_limit = (current->mm->start_stack - 0x00200000);
4742 info.align_mask = 0;
4743 info.align_offset = 0;
4744+ info.threadstack_offset = offset;
4745 addr = vm_unmapped_area(&info);
4746 if (!(addr & ~PAGE_MASK))
4747 goto success;
4748diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4749index f4ca594..adc72fd6 100644
4750--- a/arch/hexagon/include/asm/cache.h
4751+++ b/arch/hexagon/include/asm/cache.h
4752@@ -21,9 +21,11 @@
4753 #ifndef __ASM_CACHE_H
4754 #define __ASM_CACHE_H
4755
4756+#include <linux/const.h>
4757+
4758 /* Bytes per L1 cache line */
4759-#define L1_CACHE_SHIFT (5)
4760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4761+#define L1_CACHE_SHIFT 5
4762+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4763
4764 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4765 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4766diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4767index 6e6fe18..a6ae668 100644
4768--- a/arch/ia64/include/asm/atomic.h
4769+++ b/arch/ia64/include/asm/atomic.h
4770@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4771 #define atomic64_inc(v) atomic64_add(1, (v))
4772 #define atomic64_dec(v) atomic64_sub(1, (v))
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 /* Atomic operations are already serializing */
4785 #define smp_mb__before_atomic_dec() barrier()
4786 #define smp_mb__after_atomic_dec() barrier()
4787diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4788index 988254a..e1ee885 100644
4789--- a/arch/ia64/include/asm/cache.h
4790+++ b/arch/ia64/include/asm/cache.h
4791@@ -1,6 +1,7 @@
4792 #ifndef _ASM_IA64_CACHE_H
4793 #define _ASM_IA64_CACHE_H
4794
4795+#include <linux/const.h>
4796
4797 /*
4798 * Copyright (C) 1998-2000 Hewlett-Packard Co
4799@@ -9,7 +10,7 @@
4800
4801 /* Bytes per L1 (data) cache line. */
4802 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4803-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4804+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4805
4806 #ifdef CONFIG_SMP
4807 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4808diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4809index 5a83c5c..4d7f553 100644
4810--- a/arch/ia64/include/asm/elf.h
4811+++ b/arch/ia64/include/asm/elf.h
4812@@ -42,6 +42,13 @@
4813 */
4814 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4815
4816+#ifdef CONFIG_PAX_ASLR
4817+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4818+
4819+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4820+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4821+#endif
4822+
4823 #define PT_IA_64_UNWIND 0x70000001
4824
4825 /* IA-64 relocations: */
4826diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4827index 96a8d92..617a1cf 100644
4828--- a/arch/ia64/include/asm/pgalloc.h
4829+++ b/arch/ia64/include/asm/pgalloc.h
4830@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4831 pgd_val(*pgd_entry) = __pa(pud);
4832 }
4833
4834+static inline void
4835+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4836+{
4837+ pgd_populate(mm, pgd_entry, pud);
4838+}
4839+
4840 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4841 {
4842 return quicklist_alloc(0, GFP_KERNEL, NULL);
4843@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4844 pud_val(*pud_entry) = __pa(pmd);
4845 }
4846
4847+static inline void
4848+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4849+{
4850+ pud_populate(mm, pud_entry, pmd);
4851+}
4852+
4853 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4854 {
4855 return quicklist_alloc(0, GFP_KERNEL, NULL);
4856diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4857index 815810c..d60bd4c 100644
4858--- a/arch/ia64/include/asm/pgtable.h
4859+++ b/arch/ia64/include/asm/pgtable.h
4860@@ -12,7 +12,7 @@
4861 * David Mosberger-Tang <davidm@hpl.hp.com>
4862 */
4863
4864-
4865+#include <linux/const.h>
4866 #include <asm/mman.h>
4867 #include <asm/page.h>
4868 #include <asm/processor.h>
4869@@ -142,6 +142,17 @@
4870 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4871 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4872 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4873+
4874+#ifdef CONFIG_PAX_PAGEEXEC
4875+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4876+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4877+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4878+#else
4879+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4880+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4881+# define PAGE_COPY_NOEXEC PAGE_COPY
4882+#endif
4883+
4884 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4885 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4886 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4887diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4888index 54ff557..70c88b7 100644
4889--- a/arch/ia64/include/asm/spinlock.h
4890+++ b/arch/ia64/include/asm/spinlock.h
4891@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4892 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4893
4894 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4895- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4896+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4897 }
4898
4899 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4900diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4901index 449c8c0..18965fb 100644
4902--- a/arch/ia64/include/asm/uaccess.h
4903+++ b/arch/ia64/include/asm/uaccess.h
4904@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4905 static inline unsigned long
4906 __copy_to_user (void __user *to, const void *from, unsigned long count)
4907 {
4908+ if (count > INT_MAX)
4909+ return count;
4910+
4911+ if (!__builtin_constant_p(count))
4912+ check_object_size(from, count, true);
4913+
4914 return __copy_user(to, (__force void __user *) from, count);
4915 }
4916
4917 static inline unsigned long
4918 __copy_from_user (void *to, const void __user *from, unsigned long count)
4919 {
4920+ if (count > INT_MAX)
4921+ return count;
4922+
4923+ if (!__builtin_constant_p(count))
4924+ check_object_size(to, count, false);
4925+
4926 return __copy_user((__force void __user *) to, from, count);
4927 }
4928
4929@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4930 ({ \
4931 void __user *__cu_to = (to); \
4932 const void *__cu_from = (from); \
4933- long __cu_len = (n); \
4934+ unsigned long __cu_len = (n); \
4935 \
4936- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4937+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4938+ if (!__builtin_constant_p(n)) \
4939+ check_object_size(__cu_from, __cu_len, true); \
4940 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4941+ } \
4942 __cu_len; \
4943 })
4944
4945@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4946 ({ \
4947 void *__cu_to = (to); \
4948 const void __user *__cu_from = (from); \
4949- long __cu_len = (n); \
4950+ unsigned long __cu_len = (n); \
4951 \
4952 __chk_user_ptr(__cu_from); \
4953- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4954+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4955+ if (!__builtin_constant_p(n)) \
4956+ check_object_size(__cu_to, __cu_len, false); \
4957 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4958+ } \
4959 __cu_len; \
4960 })
4961
4962diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4963index 2d67317..07d8bfa 100644
4964--- a/arch/ia64/kernel/err_inject.c
4965+++ b/arch/ia64/kernel/err_inject.c
4966@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4967 return NOTIFY_OK;
4968 }
4969
4970-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4971+static struct notifier_block err_inject_cpu_notifier =
4972 {
4973 .notifier_call = err_inject_cpu_callback,
4974 };
4975diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4976index d7396db..b33e873 100644
4977--- a/arch/ia64/kernel/mca.c
4978+++ b/arch/ia64/kernel/mca.c
4979@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4980 return NOTIFY_OK;
4981 }
4982
4983-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4984+static struct notifier_block mca_cpu_notifier = {
4985 .notifier_call = mca_cpu_callback
4986 };
4987
4988diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4989index 24603be..948052d 100644
4990--- a/arch/ia64/kernel/module.c
4991+++ b/arch/ia64/kernel/module.c
4992@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4993 void
4994 module_free (struct module *mod, void *module_region)
4995 {
4996- if (mod && mod->arch.init_unw_table &&
4997- module_region == mod->module_init) {
4998+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4999 unw_remove_unwind_table(mod->arch.init_unw_table);
5000 mod->arch.init_unw_table = NULL;
5001 }
5002@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5003 }
5004
5005 static inline int
5006+in_init_rx (const struct module *mod, uint64_t addr)
5007+{
5008+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5009+}
5010+
5011+static inline int
5012+in_init_rw (const struct module *mod, uint64_t addr)
5013+{
5014+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5015+}
5016+
5017+static inline int
5018 in_init (const struct module *mod, uint64_t addr)
5019 {
5020- return addr - (uint64_t) mod->module_init < mod->init_size;
5021+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5022+}
5023+
5024+static inline int
5025+in_core_rx (const struct module *mod, uint64_t addr)
5026+{
5027+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5028+}
5029+
5030+static inline int
5031+in_core_rw (const struct module *mod, uint64_t addr)
5032+{
5033+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5034 }
5035
5036 static inline int
5037 in_core (const struct module *mod, uint64_t addr)
5038 {
5039- return addr - (uint64_t) mod->module_core < mod->core_size;
5040+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5041 }
5042
5043 static inline int
5044@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5045 break;
5046
5047 case RV_BDREL:
5048- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5049+ if (in_init_rx(mod, val))
5050+ val -= (uint64_t) mod->module_init_rx;
5051+ else if (in_init_rw(mod, val))
5052+ val -= (uint64_t) mod->module_init_rw;
5053+ else if (in_core_rx(mod, val))
5054+ val -= (uint64_t) mod->module_core_rx;
5055+ else if (in_core_rw(mod, val))
5056+ val -= (uint64_t) mod->module_core_rw;
5057 break;
5058
5059 case RV_LTV:
5060@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5061 * addresses have been selected...
5062 */
5063 uint64_t gp;
5064- if (mod->core_size > MAX_LTOFF)
5065+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5066 /*
5067 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5068 * at the end of the module.
5069 */
5070- gp = mod->core_size - MAX_LTOFF / 2;
5071+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5072 else
5073- gp = mod->core_size / 2;
5074- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5075+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5076+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5077 mod->arch.gp = gp;
5078 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5079 }
5080diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5081index 2b3c2d7..a318d84 100644
5082--- a/arch/ia64/kernel/palinfo.c
5083+++ b/arch/ia64/kernel/palinfo.c
5084@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5085 return NOTIFY_OK;
5086 }
5087
5088-static struct notifier_block __refdata palinfo_cpu_notifier =
5089+static struct notifier_block palinfo_cpu_notifier =
5090 {
5091 .notifier_call = palinfo_cpu_callback,
5092 .priority = 0,
5093diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5094index 4bc580a..7767f24 100644
5095--- a/arch/ia64/kernel/salinfo.c
5096+++ b/arch/ia64/kernel/salinfo.c
5097@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5098 return NOTIFY_OK;
5099 }
5100
5101-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5102+static struct notifier_block salinfo_cpu_notifier =
5103 {
5104 .notifier_call = salinfo_cpu_callback,
5105 .priority = 0,
5106diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5107index 41e33f8..65180b2 100644
5108--- a/arch/ia64/kernel/sys_ia64.c
5109+++ b/arch/ia64/kernel/sys_ia64.c
5110@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5111 unsigned long align_mask = 0;
5112 struct mm_struct *mm = current->mm;
5113 struct vm_unmapped_area_info info;
5114+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5115
5116 if (len > RGN_MAP_LIMIT)
5117 return -ENOMEM;
5118@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5119 if (REGION_NUMBER(addr) == RGN_HPAGE)
5120 addr = 0;
5121 #endif
5122+
5123+#ifdef CONFIG_PAX_RANDMMAP
5124+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5125+ addr = mm->free_area_cache;
5126+ else
5127+#endif
5128+
5129 if (!addr)
5130 addr = TASK_UNMAPPED_BASE;
5131
5132@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5133 info.high_limit = TASK_SIZE;
5134 info.align_mask = align_mask;
5135 info.align_offset = 0;
5136+ info.threadstack_offset = offset;
5137 return vm_unmapped_area(&info);
5138 }
5139
5140diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5141index dc00b2c..cce53c2 100644
5142--- a/arch/ia64/kernel/topology.c
5143+++ b/arch/ia64/kernel/topology.c
5144@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5145 return NOTIFY_OK;
5146 }
5147
5148-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5149+static struct notifier_block cache_cpu_notifier =
5150 {
5151 .notifier_call = cache_cpu_callback
5152 };
5153diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5154index 0ccb28f..8992469 100644
5155--- a/arch/ia64/kernel/vmlinux.lds.S
5156+++ b/arch/ia64/kernel/vmlinux.lds.S
5157@@ -198,7 +198,7 @@ SECTIONS {
5158 /* Per-cpu data: */
5159 . = ALIGN(PERCPU_PAGE_SIZE);
5160 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5161- __phys_per_cpu_start = __per_cpu_load;
5162+ __phys_per_cpu_start = per_cpu_load;
5163 /*
5164 * ensure percpu data fits
5165 * into percpu page size
5166diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5167index 6cf0341..d352594 100644
5168--- a/arch/ia64/mm/fault.c
5169+++ b/arch/ia64/mm/fault.c
5170@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5171 return pte_present(pte);
5172 }
5173
5174+#ifdef CONFIG_PAX_PAGEEXEC
5175+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5176+{
5177+ unsigned long i;
5178+
5179+ printk(KERN_ERR "PAX: bytes at PC: ");
5180+ for (i = 0; i < 8; i++) {
5181+ unsigned int c;
5182+ if (get_user(c, (unsigned int *)pc+i))
5183+ printk(KERN_CONT "???????? ");
5184+ else
5185+ printk(KERN_CONT "%08x ", c);
5186+ }
5187+ printk("\n");
5188+}
5189+#endif
5190+
5191 # define VM_READ_BIT 0
5192 # define VM_WRITE_BIT 1
5193 # define VM_EXEC_BIT 2
5194@@ -149,8 +166,21 @@ retry:
5195 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5196 goto bad_area;
5197
5198- if ((vma->vm_flags & mask) != mask)
5199+ if ((vma->vm_flags & mask) != mask) {
5200+
5201+#ifdef CONFIG_PAX_PAGEEXEC
5202+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5203+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5204+ goto bad_area;
5205+
5206+ up_read(&mm->mmap_sem);
5207+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5208+ do_group_exit(SIGKILL);
5209+ }
5210+#endif
5211+
5212 goto bad_area;
5213+ }
5214
5215 /*
5216 * If for any reason at all we couldn't handle the fault, make
5217diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5218index 76069c1..c2aa816 100644
5219--- a/arch/ia64/mm/hugetlbpage.c
5220+++ b/arch/ia64/mm/hugetlbpage.c
5221@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5222 unsigned long pgoff, unsigned long flags)
5223 {
5224 struct vm_unmapped_area_info info;
5225+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5226
5227 if (len > RGN_MAP_LIMIT)
5228 return -ENOMEM;
5229@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5230 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5231 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5232 info.align_offset = 0;
5233+ info.threadstack_offset = offset;
5234 return vm_unmapped_area(&info);
5235 }
5236
5237diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5238index d1fe4b4..2628f37 100644
5239--- a/arch/ia64/mm/init.c
5240+++ b/arch/ia64/mm/init.c
5241@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5242 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5243 vma->vm_end = vma->vm_start + PAGE_SIZE;
5244 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5245+
5246+#ifdef CONFIG_PAX_PAGEEXEC
5247+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5248+ vma->vm_flags &= ~VM_EXEC;
5249+
5250+#ifdef CONFIG_PAX_MPROTECT
5251+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5252+ vma->vm_flags &= ~VM_MAYEXEC;
5253+#endif
5254+
5255+ }
5256+#endif
5257+
5258 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5259 down_write(&current->mm->mmap_sem);
5260 if (insert_vm_struct(current->mm, vma)) {
5261diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5262index 40b3ee9..8c2c112 100644
5263--- a/arch/m32r/include/asm/cache.h
5264+++ b/arch/m32r/include/asm/cache.h
5265@@ -1,8 +1,10 @@
5266 #ifndef _ASM_M32R_CACHE_H
5267 #define _ASM_M32R_CACHE_H
5268
5269+#include <linux/const.h>
5270+
5271 /* L1 cache line size */
5272 #define L1_CACHE_SHIFT 4
5273-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5274+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5275
5276 #endif /* _ASM_M32R_CACHE_H */
5277diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5278index 82abd15..d95ae5d 100644
5279--- a/arch/m32r/lib/usercopy.c
5280+++ b/arch/m32r/lib/usercopy.c
5281@@ -14,6 +14,9 @@
5282 unsigned long
5283 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5284 {
5285+ if ((long)n < 0)
5286+ return n;
5287+
5288 prefetch(from);
5289 if (access_ok(VERIFY_WRITE, to, n))
5290 __copy_user(to,from,n);
5291@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5292 unsigned long
5293 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5294 {
5295+ if ((long)n < 0)
5296+ return n;
5297+
5298 prefetchw(to);
5299 if (access_ok(VERIFY_READ, from, n))
5300 __copy_user_zeroing(to,from,n);
5301diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5302index 0395c51..5f26031 100644
5303--- a/arch/m68k/include/asm/cache.h
5304+++ b/arch/m68k/include/asm/cache.h
5305@@ -4,9 +4,11 @@
5306 #ifndef __ARCH_M68K_CACHE_H
5307 #define __ARCH_M68K_CACHE_H
5308
5309+#include <linux/const.h>
5310+
5311 /* bytes per L1 cache line */
5312 #define L1_CACHE_SHIFT 4
5313-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5314+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5315
5316 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5317
5318diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5319index 3c52fa6..11b2ad8 100644
5320--- a/arch/metag/mm/hugetlbpage.c
5321+++ b/arch/metag/mm/hugetlbpage.c
5322@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5323 info.high_limit = TASK_SIZE;
5324 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5325 info.align_offset = 0;
5326+ info.threadstack_offset = 0;
5327 return vm_unmapped_area(&info);
5328 }
5329
5330diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5331index 4efe96a..60e8699 100644
5332--- a/arch/microblaze/include/asm/cache.h
5333+++ b/arch/microblaze/include/asm/cache.h
5334@@ -13,11 +13,12 @@
5335 #ifndef _ASM_MICROBLAZE_CACHE_H
5336 #define _ASM_MICROBLAZE_CACHE_H
5337
5338+#include <linux/const.h>
5339 #include <asm/registers.h>
5340
5341 #define L1_CACHE_SHIFT 5
5342 /* word-granular cache in microblaze */
5343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5345
5346 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5347
5348diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5349index 08b6079..8b554d2 100644
5350--- a/arch/mips/include/asm/atomic.h
5351+++ b/arch/mips/include/asm/atomic.h
5352@@ -21,15 +21,39 @@
5353 #include <asm/cmpxchg.h>
5354 #include <asm/war.h>
5355
5356+#ifdef CONFIG_GENERIC_ATOMIC64
5357+#include <asm-generic/atomic64.h>
5358+#endif
5359+
5360 #define ATOMIC_INIT(i) { (i) }
5361
5362+#ifdef CONFIG_64BIT
5363+#define _ASM_EXTABLE(from, to) \
5364+" .section __ex_table,\"a\"\n" \
5365+" .dword " #from ", " #to"\n" \
5366+" .previous\n"
5367+#else
5368+#define _ASM_EXTABLE(from, to) \
5369+" .section __ex_table,\"a\"\n" \
5370+" .word " #from ", " #to"\n" \
5371+" .previous\n"
5372+#endif
5373+
5374 /*
5375 * atomic_read - read atomic variable
5376 * @v: pointer of type atomic_t
5377 *
5378 * Atomically reads the value of @v.
5379 */
5380-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5381+static inline int atomic_read(const atomic_t *v)
5382+{
5383+ return (*(volatile const int *) &v->counter);
5384+}
5385+
5386+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5387+{
5388+ return (*(volatile const int *) &v->counter);
5389+}
5390
5391 /*
5392 * atomic_set - set atomic variable
5393@@ -38,7 +62,15 @@
5394 *
5395 * Atomically sets the value of @v to @i.
5396 */
5397-#define atomic_set(v, i) ((v)->counter = (i))
5398+static inline void atomic_set(atomic_t *v, int i)
5399+{
5400+ v->counter = i;
5401+}
5402+
5403+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5404+{
5405+ v->counter = i;
5406+}
5407
5408 /*
5409 * atomic_add - add integer to atomic variable
5410@@ -47,7 +79,67 @@
5411 *
5412 * Atomically adds @i to @v.
5413 */
5414-static __inline__ void atomic_add(int i, atomic_t * v)
5415+static __inline__ void atomic_add(int i, atomic_t *v)
5416+{
5417+ int temp;
5418+
5419+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5420+ __asm__ __volatile__(
5421+ " .set mips3 \n"
5422+ "1: ll %0, %1 # atomic_add \n"
5423+#ifdef CONFIG_PAX_REFCOUNT
5424+ /* Exception on overflow. */
5425+ "2: add %0, %2 \n"
5426+#else
5427+ " addu %0, %2 \n"
5428+#endif
5429+ " sc %0, %1 \n"
5430+ " beqzl %0, 1b \n"
5431+#ifdef CONFIG_PAX_REFCOUNT
5432+ "3: \n"
5433+ _ASM_EXTABLE(2b, 3b)
5434+#endif
5435+ " .set mips0 \n"
5436+ : "=&r" (temp), "+m" (v->counter)
5437+ : "Ir" (i));
5438+ } else if (kernel_uses_llsc) {
5439+ __asm__ __volatile__(
5440+ " .set mips3 \n"
5441+ "1: ll %0, %1 # atomic_add \n"
5442+#ifdef CONFIG_PAX_REFCOUNT
5443+ /* Exception on overflow. */
5444+ "2: add %0, %2 \n"
5445+#else
5446+ " addu %0, %2 \n"
5447+#endif
5448+ " sc %0, %1 \n"
5449+ " beqz %0, 1b \n"
5450+#ifdef CONFIG_PAX_REFCOUNT
5451+ "3: \n"
5452+ _ASM_EXTABLE(2b, 3b)
5453+#endif
5454+ " .set mips0 \n"
5455+ : "=&r" (temp), "+m" (v->counter)
5456+ : "Ir" (i));
5457+ } else {
5458+ unsigned long flags;
5459+
5460+ raw_local_irq_save(flags);
5461+ __asm__ __volatile__(
5462+#ifdef CONFIG_PAX_REFCOUNT
5463+ /* Exception on overflow. */
5464+ "1: add %0, %1 \n"
5465+ "2: \n"
5466+ _ASM_EXTABLE(1b, 2b)
5467+#else
5468+ " addu %0, %1 \n"
5469+#endif
5470+ : "+r" (v->counter) : "Ir" (i));
5471+ raw_local_irq_restore(flags);
5472+ }
5473+}
5474+
5475+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5476 {
5477 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5478 int temp;
5479@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5480 *
5481 * Atomically subtracts @i from @v.
5482 */
5483-static __inline__ void atomic_sub(int i, atomic_t * v)
5484+static __inline__ void atomic_sub(int i, atomic_t *v)
5485+{
5486+ int temp;
5487+
5488+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5489+ __asm__ __volatile__(
5490+ " .set mips3 \n"
5491+ "1: ll %0, %1 # atomic64_sub \n"
5492+#ifdef CONFIG_PAX_REFCOUNT
5493+ /* Exception on overflow. */
5494+ "2: sub %0, %2 \n"
5495+#else
5496+ " subu %0, %2 \n"
5497+#endif
5498+ " sc %0, %1 \n"
5499+ " beqzl %0, 1b \n"
5500+#ifdef CONFIG_PAX_REFCOUNT
5501+ "3: \n"
5502+ _ASM_EXTABLE(2b, 3b)
5503+#endif
5504+ " .set mips0 \n"
5505+ : "=&r" (temp), "+m" (v->counter)
5506+ : "Ir" (i));
5507+ } else if (kernel_uses_llsc) {
5508+ __asm__ __volatile__(
5509+ " .set mips3 \n"
5510+ "1: ll %0, %1 # atomic64_sub \n"
5511+#ifdef CONFIG_PAX_REFCOUNT
5512+ /* Exception on overflow. */
5513+ "2: sub %0, %2 \n"
5514+#else
5515+ " subu %0, %2 \n"
5516+#endif
5517+ " sc %0, %1 \n"
5518+ " beqz %0, 1b \n"
5519+#ifdef CONFIG_PAX_REFCOUNT
5520+ "3: \n"
5521+ _ASM_EXTABLE(2b, 3b)
5522+#endif
5523+ " .set mips0 \n"
5524+ : "=&r" (temp), "+m" (v->counter)
5525+ : "Ir" (i));
5526+ } else {
5527+ unsigned long flags;
5528+
5529+ raw_local_irq_save(flags);
5530+ __asm__ __volatile__(
5531+#ifdef CONFIG_PAX_REFCOUNT
5532+ /* Exception on overflow. */
5533+ "1: sub %0, %1 \n"
5534+ "2: \n"
5535+ _ASM_EXTABLE(1b, 2b)
5536+#else
5537+ " subu %0, %1 \n"
5538+#endif
5539+ : "+r" (v->counter) : "Ir" (i));
5540+ raw_local_irq_restore(flags);
5541+ }
5542+}
5543+
5544+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5545 {
5546 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5547 int temp;
5548@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5549 /*
5550 * Same as above, but return the result value
5551 */
5552-static __inline__ int atomic_add_return(int i, atomic_t * v)
5553+static __inline__ int atomic_add_return(int i, atomic_t *v)
5554+{
5555+ int result;
5556+ int temp;
5557+
5558+ smp_mb__before_llsc();
5559+
5560+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5561+ __asm__ __volatile__(
5562+ " .set mips3 \n"
5563+ "1: ll %1, %2 # atomic_add_return \n"
5564+#ifdef CONFIG_PAX_REFCOUNT
5565+ "2: add %0, %1, %3 \n"
5566+#else
5567+ " addu %0, %1, %3 \n"
5568+#endif
5569+ " sc %0, %2 \n"
5570+ " beqzl %0, 1b \n"
5571+#ifdef CONFIG_PAX_REFCOUNT
5572+ " b 4f \n"
5573+ " .set noreorder \n"
5574+ "3: b 5f \n"
5575+ " move %0, %1 \n"
5576+ " .set reorder \n"
5577+ _ASM_EXTABLE(2b, 3b)
5578+#endif
5579+ "4: addu %0, %1, %3 \n"
5580+#ifdef CONFIG_PAX_REFCOUNT
5581+ "5: \n"
5582+#endif
5583+ " .set mips0 \n"
5584+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5585+ : "Ir" (i));
5586+ } else if (kernel_uses_llsc) {
5587+ __asm__ __volatile__(
5588+ " .set mips3 \n"
5589+ "1: ll %1, %2 # atomic_add_return \n"
5590+#ifdef CONFIG_PAX_REFCOUNT
5591+ "2: add %0, %1, %3 \n"
5592+#else
5593+ " addu %0, %1, %3 \n"
5594+#endif
5595+ " sc %0, %2 \n"
5596+ " bnez %0, 4f \n"
5597+ " b 1b \n"
5598+#ifdef CONFIG_PAX_REFCOUNT
5599+ " .set noreorder \n"
5600+ "3: b 5f \n"
5601+ " move %0, %1 \n"
5602+ " .set reorder \n"
5603+ _ASM_EXTABLE(2b, 3b)
5604+#endif
5605+ "4: addu %0, %1, %3 \n"
5606+#ifdef CONFIG_PAX_REFCOUNT
5607+ "5: \n"
5608+#endif
5609+ " .set mips0 \n"
5610+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5611+ : "Ir" (i));
5612+ } else {
5613+ unsigned long flags;
5614+
5615+ raw_local_irq_save(flags);
5616+ __asm__ __volatile__(
5617+ " lw %0, %1 \n"
5618+#ifdef CONFIG_PAX_REFCOUNT
5619+ /* Exception on overflow. */
5620+ "1: add %0, %2 \n"
5621+#else
5622+ " addu %0, %2 \n"
5623+#endif
5624+ " sw %0, %1 \n"
5625+#ifdef CONFIG_PAX_REFCOUNT
5626+ /* Note: Dest reg is not modified on overflow */
5627+ "2: \n"
5628+ _ASM_EXTABLE(1b, 2b)
5629+#endif
5630+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5631+ raw_local_irq_restore(flags);
5632+ }
5633+
5634+ smp_llsc_mb();
5635+
5636+ return result;
5637+}
5638+
5639+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5640 {
5641 int result;
5642
5643@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5644 return result;
5645 }
5646
5647-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5648+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5649+{
5650+ int result;
5651+ int temp;
5652+
5653+ smp_mb__before_llsc();
5654+
5655+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5656+ __asm__ __volatile__(
5657+ " .set mips3 \n"
5658+ "1: ll %1, %2 # atomic_sub_return \n"
5659+#ifdef CONFIG_PAX_REFCOUNT
5660+ "2: sub %0, %1, %3 \n"
5661+#else
5662+ " subu %0, %1, %3 \n"
5663+#endif
5664+ " sc %0, %2 \n"
5665+ " beqzl %0, 1b \n"
5666+#ifdef CONFIG_PAX_REFCOUNT
5667+ " b 4f \n"
5668+ " .set noreorder \n"
5669+ "3: b 5f \n"
5670+ " move %0, %1 \n"
5671+ " .set reorder \n"
5672+ _ASM_EXTABLE(2b, 3b)
5673+#endif
5674+ "4: subu %0, %1, %3 \n"
5675+#ifdef CONFIG_PAX_REFCOUNT
5676+ "5: \n"
5677+#endif
5678+ " .set mips0 \n"
5679+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5680+ : "Ir" (i), "m" (v->counter)
5681+ : "memory");
5682+ } else if (kernel_uses_llsc) {
5683+ __asm__ __volatile__(
5684+ " .set mips3 \n"
5685+ "1: ll %1, %2 # atomic_sub_return \n"
5686+#ifdef CONFIG_PAX_REFCOUNT
5687+ "2: sub %0, %1, %3 \n"
5688+#else
5689+ " subu %0, %1, %3 \n"
5690+#endif
5691+ " sc %0, %2 \n"
5692+ " bnez %0, 4f \n"
5693+ " b 1b \n"
5694+#ifdef CONFIG_PAX_REFCOUNT
5695+ " .set noreorder \n"
5696+ "3: b 5f \n"
5697+ " move %0, %1 \n"
5698+ " .set reorder \n"
5699+ _ASM_EXTABLE(2b, 3b)
5700+#endif
5701+ "4: subu %0, %1, %3 \n"
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ "5: \n"
5704+#endif
5705+ " .set mips0 \n"
5706+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5707+ : "Ir" (i));
5708+ } else {
5709+ unsigned long flags;
5710+
5711+ raw_local_irq_save(flags);
5712+ __asm__ __volatile__(
5713+ " lw %0, %1 \n"
5714+#ifdef CONFIG_PAX_REFCOUNT
5715+ /* Exception on overflow. */
5716+ "1: sub %0, %2 \n"
5717+#else
5718+ " subu %0, %2 \n"
5719+#endif
5720+ " sw %0, %1 \n"
5721+#ifdef CONFIG_PAX_REFCOUNT
5722+ /* Note: Dest reg is not modified on overflow */
5723+ "2: \n"
5724+ _ASM_EXTABLE(1b, 2b)
5725+#endif
5726+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5727+ raw_local_irq_restore(flags);
5728+ }
5729+
5730+ smp_llsc_mb();
5731+
5732+ return result;
5733+}
5734+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5735 {
5736 int result;
5737
5738@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5739 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5740 * The function returns the old value of @v minus @i.
5741 */
5742-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5743+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5744 {
5745 int result;
5746
5747@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5748 return result;
5749 }
5750
5751-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5752-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5753+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5754+{
5755+ return cmpxchg(&v->counter, old, new);
5756+}
5757+
5758+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5759+ int new)
5760+{
5761+ return cmpxchg(&(v->counter), old, new);
5762+}
5763+
5764+static inline int atomic_xchg(atomic_t *v, int new)
5765+{
5766+ return xchg(&v->counter, new);
5767+}
5768+
5769+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5770+{
5771+ return xchg(&(v->counter), new);
5772+}
5773
5774 /**
5775 * __atomic_add_unless - add unless the number is a given value
5776@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5777
5778 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5779 #define atomic_inc_return(v) atomic_add_return(1, (v))
5780+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5781+{
5782+ return atomic_add_return_unchecked(1, v);
5783+}
5784
5785 /*
5786 * atomic_sub_and_test - subtract value from variable and test result
5787@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5788 * other cases.
5789 */
5790 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5791+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5792+{
5793+ return atomic_add_return_unchecked(1, v) == 0;
5794+}
5795
5796 /*
5797 * atomic_dec_and_test - decrement by 1 and test
5798@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5799 * Atomically increments @v by 1.
5800 */
5801 #define atomic_inc(v) atomic_add(1, (v))
5802+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5803+{
5804+ atomic_add_unchecked(1, v);
5805+}
5806
5807 /*
5808 * atomic_dec - decrement and test
5809@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5810 * Atomically decrements @v by 1.
5811 */
5812 #define atomic_dec(v) atomic_sub(1, (v))
5813+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5814+{
5815+ atomic_sub_unchecked(1, v);
5816+}
5817
5818 /*
5819 * atomic_add_negative - add and test if negative
5820@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5821 * @v: pointer of type atomic64_t
5822 *
5823 */
5824-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5825+static inline long atomic64_read(const atomic64_t *v)
5826+{
5827+ return (*(volatile const long *) &v->counter);
5828+}
5829+
5830+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5831+{
5832+ return (*(volatile const long *) &v->counter);
5833+}
5834
5835 /*
5836 * atomic64_set - set atomic variable
5837 * @v: pointer of type atomic64_t
5838 * @i: required value
5839 */
5840-#define atomic64_set(v, i) ((v)->counter = (i))
5841+static inline void atomic64_set(atomic64_t *v, long i)
5842+{
5843+ v->counter = i;
5844+}
5845+
5846+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5847+{
5848+ v->counter = i;
5849+}
5850
5851 /*
5852 * atomic64_add - add integer to atomic variable
5853@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5854 *
5855 * Atomically adds @i to @v.
5856 */
5857-static __inline__ void atomic64_add(long i, atomic64_t * v)
5858+static __inline__ void atomic64_add(long i, atomic64_t *v)
5859+{
5860+ long temp;
5861+
5862+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5863+ __asm__ __volatile__(
5864+ " .set mips3 \n"
5865+ "1: lld %0, %1 # atomic64_add \n"
5866+#ifdef CONFIG_PAX_REFCOUNT
5867+ /* Exception on overflow. */
5868+ "2: dadd %0, %2 \n"
5869+#else
5870+ " daddu %0, %2 \n"
5871+#endif
5872+ " scd %0, %1 \n"
5873+ " beqzl %0, 1b \n"
5874+#ifdef CONFIG_PAX_REFCOUNT
5875+ "3: \n"
5876+ _ASM_EXTABLE(2b, 3b)
5877+#endif
5878+ " .set mips0 \n"
5879+ : "=&r" (temp), "+m" (v->counter)
5880+ : "Ir" (i));
5881+ } else if (kernel_uses_llsc) {
5882+ __asm__ __volatile__(
5883+ " .set mips3 \n"
5884+ "1: lld %0, %1 # atomic64_add \n"
5885+#ifdef CONFIG_PAX_REFCOUNT
5886+ /* Exception on overflow. */
5887+ "2: dadd %0, %2 \n"
5888+#else
5889+ " daddu %0, %2 \n"
5890+#endif
5891+ " scd %0, %1 \n"
5892+ " beqz %0, 1b \n"
5893+#ifdef CONFIG_PAX_REFCOUNT
5894+ "3: \n"
5895+ _ASM_EXTABLE(2b, 3b)
5896+#endif
5897+ " .set mips0 \n"
5898+ : "=&r" (temp), "+m" (v->counter)
5899+ : "Ir" (i));
5900+ } else {
5901+ unsigned long flags;
5902+
5903+ raw_local_irq_save(flags);
5904+ __asm__ __volatile__(
5905+#ifdef CONFIG_PAX_REFCOUNT
5906+ /* Exception on overflow. */
5907+ "1: dadd %0, %1 \n"
5908+ "2: \n"
5909+ _ASM_EXTABLE(1b, 2b)
5910+#else
5911+ " daddu %0, %1 \n"
5912+#endif
5913+ : "+r" (v->counter) : "Ir" (i));
5914+ raw_local_irq_restore(flags);
5915+ }
5916+}
5917+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
5918 {
5919 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5920 long temp;
5921@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
5922 *
5923 * Atomically subtracts @i from @v.
5924 */
5925-static __inline__ void atomic64_sub(long i, atomic64_t * v)
5926+static __inline__ void atomic64_sub(long i, atomic64_t *v)
5927+{
5928+ long temp;
5929+
5930+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5931+ __asm__ __volatile__(
5932+ " .set mips3 \n"
5933+ "1: lld %0, %1 # atomic64_sub \n"
5934+#ifdef CONFIG_PAX_REFCOUNT
5935+ /* Exception on overflow. */
5936+ "2: dsub %0, %2 \n"
5937+#else
5938+ " dsubu %0, %2 \n"
5939+#endif
5940+ " scd %0, %1 \n"
5941+ " beqzl %0, 1b \n"
5942+#ifdef CONFIG_PAX_REFCOUNT
5943+ "3: \n"
5944+ _ASM_EXTABLE(2b, 3b)
5945+#endif
5946+ " .set mips0 \n"
5947+ : "=&r" (temp), "+m" (v->counter)
5948+ : "Ir" (i));
5949+ } else if (kernel_uses_llsc) {
5950+ __asm__ __volatile__(
5951+ " .set mips3 \n"
5952+ "1: lld %0, %1 # atomic64_sub \n"
5953+#ifdef CONFIG_PAX_REFCOUNT
5954+ /* Exception on overflow. */
5955+ "2: dsub %0, %2 \n"
5956+#else
5957+ " dsubu %0, %2 \n"
5958+#endif
5959+ " scd %0, %1 \n"
5960+ " beqz %0, 1b \n"
5961+#ifdef CONFIG_PAX_REFCOUNT
5962+ "3: \n"
5963+ _ASM_EXTABLE(2b, 3b)
5964+#endif
5965+ " .set mips0 \n"
5966+ : "=&r" (temp), "+m" (v->counter)
5967+ : "Ir" (i));
5968+ } else {
5969+ unsigned long flags;
5970+
5971+ raw_local_irq_save(flags);
5972+ __asm__ __volatile__(
5973+#ifdef CONFIG_PAX_REFCOUNT
5974+ /* Exception on overflow. */
5975+ "1: dsub %0, %1 \n"
5976+ "2: \n"
5977+ _ASM_EXTABLE(1b, 2b)
5978+#else
5979+ " dsubu %0, %1 \n"
5980+#endif
5981+ : "+r" (v->counter) : "Ir" (i));
5982+ raw_local_irq_restore(flags);
5983+ }
5984+}
5985+
5986+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
5987 {
5988 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5989 long temp;
5990@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
5991 /*
5992 * Same as above, but return the result value
5993 */
5994-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
5995+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
5996+{
5997+ long result;
5998+ long temp;
5999+
6000+ smp_mb__before_llsc();
6001+
6002+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6003+ __asm__ __volatile__(
6004+ " .set mips3 \n"
6005+ "1: lld %1, %2 # atomic64_add_return \n"
6006+#ifdef CONFIG_PAX_REFCOUNT
6007+ "2: dadd %0, %1, %3 \n"
6008+#else
6009+ " daddu %0, %1, %3 \n"
6010+#endif
6011+ " scd %0, %2 \n"
6012+ " beqzl %0, 1b \n"
6013+#ifdef CONFIG_PAX_REFCOUNT
6014+ " b 4f \n"
6015+ " .set noreorder \n"
6016+ "3: b 5f \n"
6017+ " move %0, %1 \n"
6018+ " .set reorder \n"
6019+ _ASM_EXTABLE(2b, 3b)
6020+#endif
6021+ "4: daddu %0, %1, %3 \n"
6022+#ifdef CONFIG_PAX_REFCOUNT
6023+ "5: \n"
6024+#endif
6025+ " .set mips0 \n"
6026+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6027+ : "Ir" (i));
6028+ } else if (kernel_uses_llsc) {
6029+ __asm__ __volatile__(
6030+ " .set mips3 \n"
6031+ "1: lld %1, %2 # atomic64_add_return \n"
6032+#ifdef CONFIG_PAX_REFCOUNT
6033+ "2: dadd %0, %1, %3 \n"
6034+#else
6035+ " daddu %0, %1, %3 \n"
6036+#endif
6037+ " scd %0, %2 \n"
6038+ " bnez %0, 4f \n"
6039+ " b 1b \n"
6040+#ifdef CONFIG_PAX_REFCOUNT
6041+ " .set noreorder \n"
6042+ "3: b 5f \n"
6043+ " move %0, %1 \n"
6044+ " .set reorder \n"
6045+ _ASM_EXTABLE(2b, 3b)
6046+#endif
6047+ "4: daddu %0, %1, %3 \n"
6048+#ifdef CONFIG_PAX_REFCOUNT
6049+ "5: \n"
6050+#endif
6051+ " .set mips0 \n"
6052+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6053+ : "Ir" (i), "m" (v->counter)
6054+ : "memory");
6055+ } else {
6056+ unsigned long flags;
6057+
6058+ raw_local_irq_save(flags);
6059+ __asm__ __volatile__(
6060+ " ld %0, %1 \n"
6061+#ifdef CONFIG_PAX_REFCOUNT
6062+ /* Exception on overflow. */
6063+ "1: dadd %0, %2 \n"
6064+#else
6065+ " daddu %0, %2 \n"
6066+#endif
6067+ " sd %0, %1 \n"
6068+#ifdef CONFIG_PAX_REFCOUNT
6069+ /* Note: Dest reg is not modified on overflow */
6070+ "2: \n"
6071+ _ASM_EXTABLE(1b, 2b)
6072+#endif
6073+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6074+ raw_local_irq_restore(flags);
6075+ }
6076+
6077+ smp_llsc_mb();
6078+
6079+ return result;
6080+}
6081+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6082 {
6083 long result;
6084
6085@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6086 return result;
6087 }
6088
6089-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6090+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6091+{
6092+ long result;
6093+ long temp;
6094+
6095+ smp_mb__before_llsc();
6096+
6097+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6098+ long temp;
6099+
6100+ __asm__ __volatile__(
6101+ " .set mips3 \n"
6102+ "1: lld %1, %2 # atomic64_sub_return \n"
6103+#ifdef CONFIG_PAX_REFCOUNT
6104+ "2: dsub %0, %1, %3 \n"
6105+#else
6106+ " dsubu %0, %1, %3 \n"
6107+#endif
6108+ " scd %0, %2 \n"
6109+ " beqzl %0, 1b \n"
6110+#ifdef CONFIG_PAX_REFCOUNT
6111+ " b 4f \n"
6112+ " .set noreorder \n"
6113+ "3: b 5f \n"
6114+ " move %0, %1 \n"
6115+ " .set reorder \n"
6116+ _ASM_EXTABLE(2b, 3b)
6117+#endif
6118+ "4: dsubu %0, %1, %3 \n"
6119+#ifdef CONFIG_PAX_REFCOUNT
6120+ "5: \n"
6121+#endif
6122+ " .set mips0 \n"
6123+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6124+ : "Ir" (i), "m" (v->counter)
6125+ : "memory");
6126+ } else if (kernel_uses_llsc) {
6127+ __asm__ __volatile__(
6128+ " .set mips3 \n"
6129+ "1: lld %1, %2 # atomic64_sub_return \n"
6130+#ifdef CONFIG_PAX_REFCOUNT
6131+ "2: dsub %0, %1, %3 \n"
6132+#else
6133+ " dsubu %0, %1, %3 \n"
6134+#endif
6135+ " scd %0, %2 \n"
6136+ " bnez %0, 4f \n"
6137+ " b 1b \n"
6138+#ifdef CONFIG_PAX_REFCOUNT
6139+ " .set noreorder \n"
6140+ "3: b 5f \n"
6141+ " move %0, %1 \n"
6142+ " .set reorder \n"
6143+ _ASM_EXTABLE(2b, 3b)
6144+#endif
6145+ "4: dsubu %0, %1, %3 \n"
6146+#ifdef CONFIG_PAX_REFCOUNT
6147+ "5: \n"
6148+#endif
6149+ " .set mips0 \n"
6150+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6151+ : "Ir" (i), "m" (v->counter)
6152+ : "memory");
6153+ } else {
6154+ unsigned long flags;
6155+
6156+ raw_local_irq_save(flags);
6157+ __asm__ __volatile__(
6158+ " ld %0, %1 \n"
6159+#ifdef CONFIG_PAX_REFCOUNT
6160+ /* Exception on overflow. */
6161+ "1: dsub %0, %2 \n"
6162+#else
6163+ " dsubu %0, %2 \n"
6164+#endif
6165+ " sd %0, %1 \n"
6166+#ifdef CONFIG_PAX_REFCOUNT
6167+ /* Note: Dest reg is not modified on overflow */
6168+ "2: \n"
6169+ _ASM_EXTABLE(1b, 2b)
6170+#endif
6171+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6172+ raw_local_irq_restore(flags);
6173+ }
6174+
6175+ smp_llsc_mb();
6176+
6177+ return result;
6178+}
6179+
6180+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6181 {
6182 long result;
6183
6184@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6185 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6186 * The function returns the old value of @v minus @i.
6187 */
6188-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6189+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6190 {
6191 long result;
6192
6193@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6194 return result;
6195 }
6196
6197-#define atomic64_cmpxchg(v, o, n) \
6198- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6199-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6200+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6201+{
6202+ return cmpxchg(&v->counter, old, new);
6203+}
6204+
6205+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6206+ long new)
6207+{
6208+ return cmpxchg(&(v->counter), old, new);
6209+}
6210+
6211+static inline long atomic64_xchg(atomic64_t *v, long new)
6212+{
6213+ return xchg(&v->counter, new);
6214+}
6215+
6216+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6217+{
6218+ return xchg(&(v->counter), new);
6219+}
6220
6221 /**
6222 * atomic64_add_unless - add unless the number is a given value
6223@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6224
6225 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6226 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6227+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6228
6229 /*
6230 * atomic64_sub_and_test - subtract value from variable and test result
6231@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6232 * other cases.
6233 */
6234 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6235+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6236
6237 /*
6238 * atomic64_dec_and_test - decrement by 1 and test
6239@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6240 * Atomically increments @v by 1.
6241 */
6242 #define atomic64_inc(v) atomic64_add(1, (v))
6243+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6244
6245 /*
6246 * atomic64_dec - decrement and test
6247@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6248 * Atomically decrements @v by 1.
6249 */
6250 #define atomic64_dec(v) atomic64_sub(1, (v))
6251+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6252
6253 /*
6254 * atomic64_add_negative - add and test if negative
6255diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6256index b4db69f..8f3b093 100644
6257--- a/arch/mips/include/asm/cache.h
6258+++ b/arch/mips/include/asm/cache.h
6259@@ -9,10 +9,11 @@
6260 #ifndef _ASM_CACHE_H
6261 #define _ASM_CACHE_H
6262
6263+#include <linux/const.h>
6264 #include <kmalloc.h>
6265
6266 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6267-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6268+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6269
6270 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6271 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6272diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6273index cf3ae24..238d22f 100644
6274--- a/arch/mips/include/asm/elf.h
6275+++ b/arch/mips/include/asm/elf.h
6276@@ -372,13 +372,16 @@ extern const char *__elf_platform;
6277 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6278 #endif
6279
6280+#ifdef CONFIG_PAX_ASLR
6281+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6282+
6283+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6284+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6285+#endif
6286+
6287 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6288 struct linux_binprm;
6289 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6290 int uses_interp);
6291
6292-struct mm_struct;
6293-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6294-#define arch_randomize_brk arch_randomize_brk
6295-
6296 #endif /* _ASM_ELF_H */
6297diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6298index c1f6afa..38cc6e9 100644
6299--- a/arch/mips/include/asm/exec.h
6300+++ b/arch/mips/include/asm/exec.h
6301@@ -12,6 +12,6 @@
6302 #ifndef _ASM_EXEC_H
6303 #define _ASM_EXEC_H
6304
6305-extern unsigned long arch_align_stack(unsigned long sp);
6306+#define arch_align_stack(x) ((x) & ~0xfUL)
6307
6308 #endif /* _ASM_EXEC_H */
6309diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6310index d44622c..64990d2 100644
6311--- a/arch/mips/include/asm/local.h
6312+++ b/arch/mips/include/asm/local.h
6313@@ -12,15 +12,25 @@ typedef struct
6314 atomic_long_t a;
6315 } local_t;
6316
6317+typedef struct {
6318+ atomic_long_unchecked_t a;
6319+} local_unchecked_t;
6320+
6321 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6322
6323 #define local_read(l) atomic_long_read(&(l)->a)
6324+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6325 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6326+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6327
6328 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6329+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6330 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6331+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6332 #define local_inc(l) atomic_long_inc(&(l)->a)
6333+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6334 #define local_dec(l) atomic_long_dec(&(l)->a)
6335+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6336
6337 /*
6338 * Same as above, but return the result value
6339@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6340 return result;
6341 }
6342
6343+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6344+{
6345+ unsigned long result;
6346+
6347+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6348+ unsigned long temp;
6349+
6350+ __asm__ __volatile__(
6351+ " .set mips3 \n"
6352+ "1:" __LL "%1, %2 # local_add_return \n"
6353+ " addu %0, %1, %3 \n"
6354+ __SC "%0, %2 \n"
6355+ " beqzl %0, 1b \n"
6356+ " addu %0, %1, %3 \n"
6357+ " .set mips0 \n"
6358+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6359+ : "Ir" (i), "m" (l->a.counter)
6360+ : "memory");
6361+ } else if (kernel_uses_llsc) {
6362+ unsigned long temp;
6363+
6364+ __asm__ __volatile__(
6365+ " .set mips3 \n"
6366+ "1:" __LL "%1, %2 # local_add_return \n"
6367+ " addu %0, %1, %3 \n"
6368+ __SC "%0, %2 \n"
6369+ " beqz %0, 1b \n"
6370+ " addu %0, %1, %3 \n"
6371+ " .set mips0 \n"
6372+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6373+ : "Ir" (i), "m" (l->a.counter)
6374+ : "memory");
6375+ } else {
6376+ unsigned long flags;
6377+
6378+ local_irq_save(flags);
6379+ result = l->a.counter;
6380+ result += i;
6381+ l->a.counter = result;
6382+ local_irq_restore(flags);
6383+ }
6384+
6385+ return result;
6386+}
6387+
6388 static __inline__ long local_sub_return(long i, local_t * l)
6389 {
6390 unsigned long result;
6391@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6392
6393 #define local_cmpxchg(l, o, n) \
6394 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6395+#define local_cmpxchg_unchecked(l, o, n) \
6396+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6397 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6398
6399 /**
6400diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6401index f59552f..3abe9b9 100644
6402--- a/arch/mips/include/asm/page.h
6403+++ b/arch/mips/include/asm/page.h
6404@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6405 #ifdef CONFIG_CPU_MIPS32
6406 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6407 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6408- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6409+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6410 #else
6411 typedef struct { unsigned long long pte; } pte_t;
6412 #define pte_val(x) ((x).pte)
6413diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6414index 881d18b..cea38bc 100644
6415--- a/arch/mips/include/asm/pgalloc.h
6416+++ b/arch/mips/include/asm/pgalloc.h
6417@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6418 {
6419 set_pud(pud, __pud((unsigned long)pmd));
6420 }
6421+
6422+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6423+{
6424+ pud_populate(mm, pud, pmd);
6425+}
6426 #endif
6427
6428 /*
6429diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6430index 895320e..bf63e10 100644
6431--- a/arch/mips/include/asm/thread_info.h
6432+++ b/arch/mips/include/asm/thread_info.h
6433@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6434 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
6435 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6436 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6437+/* li takes a 32bit immediate */
6438+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6439 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6440
6441 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6442@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
6443 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
6444 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6445 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6446+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6447+
6448+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6449
6450 /* work to do in syscall_trace_leave() */
6451-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
6452+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6453
6454 /* work to do on interrupt/exception return */
6455 #define _TIF_WORK_MASK \
6456 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
6457 /* work to do on any return to u-space */
6458-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
6459+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
6460
6461 #endif /* __KERNEL__ */
6462
6463diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6464index 1188e00..41cf144 100644
6465--- a/arch/mips/kernel/binfmt_elfn32.c
6466+++ b/arch/mips/kernel/binfmt_elfn32.c
6467@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6468 #undef ELF_ET_DYN_BASE
6469 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6470
6471+#ifdef CONFIG_PAX_ASLR
6472+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6473+
6474+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6475+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6476+#endif
6477+
6478 #include <asm/processor.h>
6479 #include <linux/module.h>
6480 #include <linux/elfcore.h>
6481diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6482index 202e581..689ca79 100644
6483--- a/arch/mips/kernel/binfmt_elfo32.c
6484+++ b/arch/mips/kernel/binfmt_elfo32.c
6485@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6486 #undef ELF_ET_DYN_BASE
6487 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6488
6489+#ifdef CONFIG_PAX_ASLR
6490+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6491+
6492+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6493+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6494+#endif
6495+
6496 #include <asm/processor.h>
6497
6498 /*
6499diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6500index c6a041d..b3e7318 100644
6501--- a/arch/mips/kernel/process.c
6502+++ b/arch/mips/kernel/process.c
6503@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
6504 out:
6505 return pc;
6506 }
6507-
6508-/*
6509- * Don't forget that the stack pointer must be aligned on a 8 bytes
6510- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6511- */
6512-unsigned long arch_align_stack(unsigned long sp)
6513-{
6514- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6515- sp -= get_random_int() & ~PAGE_MASK;
6516-
6517- return sp & ALMASK;
6518-}
6519diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6520index 9c6299c..2fb4c22 100644
6521--- a/arch/mips/kernel/ptrace.c
6522+++ b/arch/mips/kernel/ptrace.c
6523@@ -528,6 +528,10 @@ static inline int audit_arch(void)
6524 return arch;
6525 }
6526
6527+#ifdef CONFIG_GRKERNSEC_SETXID
6528+extern void gr_delayed_cred_worker(void);
6529+#endif
6530+
6531 /*
6532 * Notification of system call entry/exit
6533 * - triggered by current->work.syscall_trace
6534@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6535 /* do the secure computing check first */
6536 secure_computing_strict(regs->regs[2]);
6537
6538+#ifdef CONFIG_GRKERNSEC_SETXID
6539+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6540+ gr_delayed_cred_worker();
6541+#endif
6542+
6543 if (!(current->ptrace & PT_PTRACED))
6544 goto out;
6545
6546diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
6547index 9b36424..e7f4154 100644
6548--- a/arch/mips/kernel/scall32-o32.S
6549+++ b/arch/mips/kernel/scall32-o32.S
6550@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
6551
6552 stack_done:
6553 lw t0, TI_FLAGS($28) # syscall tracing enabled?
6554- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6555+ li t1, _TIF_SYSCALL_WORK
6556 and t0, t1
6557 bnez t0, syscall_trace_entry # -> yes
6558
6559diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
6560index 97a5909..59622f8 100644
6561--- a/arch/mips/kernel/scall64-64.S
6562+++ b/arch/mips/kernel/scall64-64.S
6563@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
6564
6565 sd a3, PT_R26(sp) # save a3 for syscall restarting
6566
6567- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6568+ li t1, _TIF_SYSCALL_WORK
6569 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6570 and t0, t1, t0
6571 bnez t0, syscall_trace_entry
6572diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
6573index edcb659..fb2ab09 100644
6574--- a/arch/mips/kernel/scall64-n32.S
6575+++ b/arch/mips/kernel/scall64-n32.S
6576@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
6577
6578 sd a3, PT_R26(sp) # save a3 for syscall restarting
6579
6580- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6581+ li t1, _TIF_SYSCALL_WORK
6582 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6583 and t0, t1, t0
6584 bnez t0, n32_syscall_trace_entry
6585diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
6586index 74f485d..47d2c38 100644
6587--- a/arch/mips/kernel/scall64-o32.S
6588+++ b/arch/mips/kernel/scall64-o32.S
6589@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
6590 PTR 4b, bad_stack
6591 .previous
6592
6593- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6594+ li t1, _TIF_SYSCALL_WORK
6595 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6596 and t0, t1, t0
6597 bnez t0, trace_a_syscall
6598diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6599index a75ae40..0d0f56a 100644
6600--- a/arch/mips/kernel/traps.c
6601+++ b/arch/mips/kernel/traps.c
6602@@ -675,7 +675,17 @@ asmlinkage void do_ov(struct pt_regs *regs)
6603 {
6604 siginfo_t info;
6605
6606- die_if_kernel("Integer overflow", regs);
6607+ if (unlikely(!user_mode(regs))) {
6608+
6609+#ifdef CONFIG_PAX_REFCOUNT
6610+ if (fixup_exception(regs)) {
6611+ pax_report_refcount_overflow(regs);
6612+ return;
6613+ }
6614+#endif
6615+
6616+ die("Integer overflow", regs);
6617+ }
6618
6619 info.si_code = FPE_INTOVF;
6620 info.si_signo = SIGFPE;
6621diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6622index 0fead53..eeb00a6 100644
6623--- a/arch/mips/mm/fault.c
6624+++ b/arch/mips/mm/fault.c
6625@@ -27,6 +27,23 @@
6626 #include <asm/highmem.h> /* For VMALLOC_END */
6627 #include <linux/kdebug.h>
6628
6629+#ifdef CONFIG_PAX_PAGEEXEC
6630+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6631+{
6632+ unsigned long i;
6633+
6634+ printk(KERN_ERR "PAX: bytes at PC: ");
6635+ for (i = 0; i < 5; i++) {
6636+ unsigned int c;
6637+ if (get_user(c, (unsigned int *)pc+i))
6638+ printk(KERN_CONT "???????? ");
6639+ else
6640+ printk(KERN_CONT "%08x ", c);
6641+ }
6642+ printk("\n");
6643+}
6644+#endif
6645+
6646 /*
6647 * This routine handles page faults. It determines the address,
6648 * and the problem, and then passes it off to one of the appropriate
6649@@ -196,6 +213,14 @@ bad_area:
6650 bad_area_nosemaphore:
6651 /* User mode accesses just cause a SIGSEGV */
6652 if (user_mode(regs)) {
6653+
6654+#ifdef CONFIG_PAX_PAGEEXEC
6655+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6656+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6657+ do_group_exit(SIGKILL);
6658+ }
6659+#endif
6660+
6661 tsk->thread.cp0_badvaddr = address;
6662 tsk->thread.error_code = write;
6663 #if 0
6664diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6665index 7e5fe27..9656513 100644
6666--- a/arch/mips/mm/mmap.c
6667+++ b/arch/mips/mm/mmap.c
6668@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6669 struct vm_area_struct *vma;
6670 unsigned long addr = addr0;
6671 int do_color_align;
6672+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6673 struct vm_unmapped_area_info info;
6674
6675 if (unlikely(len > TASK_SIZE))
6676@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6677 do_color_align = 1;
6678
6679 /* requesting a specific address */
6680+
6681+#ifdef CONFIG_PAX_RANDMMAP
6682+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6683+#endif
6684+
6685 if (addr) {
6686 if (do_color_align)
6687 addr = COLOUR_ALIGN(addr, pgoff);
6688@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6689 addr = PAGE_ALIGN(addr);
6690
6691 vma = find_vma(mm, addr);
6692- if (TASK_SIZE - len >= addr &&
6693- (!vma || addr + len <= vma->vm_start))
6694+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6695 return addr;
6696 }
6697
6698 info.length = len;
6699 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6700 info.align_offset = pgoff << PAGE_SHIFT;
6701+ info.threadstack_offset = offset;
6702
6703 if (dir == DOWN) {
6704 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6705@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6706 {
6707 unsigned long random_factor = 0UL;
6708
6709+#ifdef CONFIG_PAX_RANDMMAP
6710+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6711+#endif
6712+
6713 if (current->flags & PF_RANDOMIZE) {
6714 random_factor = get_random_int();
6715 random_factor = random_factor << PAGE_SHIFT;
6716@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6717
6718 if (mmap_is_legacy()) {
6719 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6720+
6721+#ifdef CONFIG_PAX_RANDMMAP
6722+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6723+ mm->mmap_base += mm->delta_mmap;
6724+#endif
6725+
6726 mm->get_unmapped_area = arch_get_unmapped_area;
6727 mm->unmap_area = arch_unmap_area;
6728 } else {
6729 mm->mmap_base = mmap_base(random_factor);
6730+
6731+#ifdef CONFIG_PAX_RANDMMAP
6732+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6733+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6734+#endif
6735+
6736 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6737 mm->unmap_area = arch_unmap_area_topdown;
6738 }
6739 }
6740
6741-static inline unsigned long brk_rnd(void)
6742-{
6743- unsigned long rnd = get_random_int();
6744-
6745- rnd = rnd << PAGE_SHIFT;
6746- /* 8MB for 32bit, 256MB for 64bit */
6747- if (TASK_IS_32BIT_ADDR)
6748- rnd = rnd & 0x7ffffful;
6749- else
6750- rnd = rnd & 0xffffffful;
6751-
6752- return rnd;
6753-}
6754-
6755-unsigned long arch_randomize_brk(struct mm_struct *mm)
6756-{
6757- unsigned long base = mm->brk;
6758- unsigned long ret;
6759-
6760- ret = PAGE_ALIGN(base + brk_rnd());
6761-
6762- if (ret < mm->brk)
6763- return mm->brk;
6764-
6765- return ret;
6766-}
6767-
6768 int __virt_addr_valid(const volatile void *kaddr)
6769 {
6770 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6771diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6772index 967d144..db12197 100644
6773--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
6774+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6775@@ -11,12 +11,14 @@
6776 #ifndef _ASM_PROC_CACHE_H
6777 #define _ASM_PROC_CACHE_H
6778
6779+#include <linux/const.h>
6780+
6781 /* L1 cache */
6782
6783 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6784 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6785-#define L1_CACHE_BYTES 16 /* bytes per entry */
6786 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6787+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6788 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6789
6790 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6791diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6792index bcb5df2..84fabd2 100644
6793--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6794+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6795@@ -16,13 +16,15 @@
6796 #ifndef _ASM_PROC_CACHE_H
6797 #define _ASM_PROC_CACHE_H
6798
6799+#include <linux/const.h>
6800+
6801 /*
6802 * L1 cache
6803 */
6804 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6805 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6806-#define L1_CACHE_BYTES 32 /* bytes per entry */
6807 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
6808+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6809 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
6810
6811 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6812diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
6813index 4ce7a01..449202a 100644
6814--- a/arch/openrisc/include/asm/cache.h
6815+++ b/arch/openrisc/include/asm/cache.h
6816@@ -19,11 +19,13 @@
6817 #ifndef __ASM_OPENRISC_CACHE_H
6818 #define __ASM_OPENRISC_CACHE_H
6819
6820+#include <linux/const.h>
6821+
6822 /* FIXME: How can we replace these with values from the CPU...
6823 * they shouldn't be hard-coded!
6824 */
6825
6826-#define L1_CACHE_BYTES 16
6827 #define L1_CACHE_SHIFT 4
6828+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6829
6830 #endif /* __ASM_OPENRISC_CACHE_H */
6831diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
6832index 472886c..00e7df9 100644
6833--- a/arch/parisc/include/asm/atomic.h
6834+++ b/arch/parisc/include/asm/atomic.h
6835@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
6836 return dec;
6837 }
6838
6839+#define atomic64_read_unchecked(v) atomic64_read(v)
6840+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6841+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6842+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6843+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6844+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6845+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6846+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6847+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6848+
6849 #endif /* !CONFIG_64BIT */
6850
6851
6852diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
6853index 47f11c7..3420df2 100644
6854--- a/arch/parisc/include/asm/cache.h
6855+++ b/arch/parisc/include/asm/cache.h
6856@@ -5,6 +5,7 @@
6857 #ifndef __ARCH_PARISC_CACHE_H
6858 #define __ARCH_PARISC_CACHE_H
6859
6860+#include <linux/const.h>
6861
6862 /*
6863 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
6864@@ -15,13 +16,13 @@
6865 * just ruin performance.
6866 */
6867 #ifdef CONFIG_PA20
6868-#define L1_CACHE_BYTES 64
6869 #define L1_CACHE_SHIFT 6
6870 #else
6871-#define L1_CACHE_BYTES 32
6872 #define L1_CACHE_SHIFT 5
6873 #endif
6874
6875+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6876+
6877 #ifndef __ASSEMBLY__
6878
6879 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6880diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
6881index ad2b503..bdf1651 100644
6882--- a/arch/parisc/include/asm/elf.h
6883+++ b/arch/parisc/include/asm/elf.h
6884@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
6885
6886 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
6887
6888+#ifdef CONFIG_PAX_ASLR
6889+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6890+
6891+#define PAX_DELTA_MMAP_LEN 16
6892+#define PAX_DELTA_STACK_LEN 16
6893+#endif
6894+
6895 /* This yields a mask that user programs can use to figure out what
6896 instruction set this CPU supports. This could be done in user space,
6897 but it's not easy, and we've already done it here. */
6898diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6899index fc987a1..6e068ef 100644
6900--- a/arch/parisc/include/asm/pgalloc.h
6901+++ b/arch/parisc/include/asm/pgalloc.h
6902@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6903 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6904 }
6905
6906+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6907+{
6908+ pgd_populate(mm, pgd, pmd);
6909+}
6910+
6911 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6912 {
6913 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6914@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6915 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6916 #define pmd_free(mm, x) do { } while (0)
6917 #define pgd_populate(mm, pmd, pte) BUG()
6918+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6919
6920 #endif
6921
6922diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6923index 1e40d7f..a3eb445 100644
6924--- a/arch/parisc/include/asm/pgtable.h
6925+++ b/arch/parisc/include/asm/pgtable.h
6926@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6927 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6928 #define PAGE_COPY PAGE_EXECREAD
6929 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6930+
6931+#ifdef CONFIG_PAX_PAGEEXEC
6932+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6933+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6934+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6935+#else
6936+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6937+# define PAGE_COPY_NOEXEC PAGE_COPY
6938+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6939+#endif
6940+
6941 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6942 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6943 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6944diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6945index e0a8235..ce2f1e1 100644
6946--- a/arch/parisc/include/asm/uaccess.h
6947+++ b/arch/parisc/include/asm/uaccess.h
6948@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6949 const void __user *from,
6950 unsigned long n)
6951 {
6952- int sz = __compiletime_object_size(to);
6953+ size_t sz = __compiletime_object_size(to);
6954 int ret = -EFAULT;
6955
6956- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6957+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6958 ret = __copy_from_user(to, from, n);
6959 else
6960 copy_from_user_overflow();
6961diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6962index 2a625fb..9908930 100644
6963--- a/arch/parisc/kernel/module.c
6964+++ b/arch/parisc/kernel/module.c
6965@@ -98,16 +98,38 @@
6966
6967 /* three functions to determine where in the module core
6968 * or init pieces the location is */
6969+static inline int in_init_rx(struct module *me, void *loc)
6970+{
6971+ return (loc >= me->module_init_rx &&
6972+ loc < (me->module_init_rx + me->init_size_rx));
6973+}
6974+
6975+static inline int in_init_rw(struct module *me, void *loc)
6976+{
6977+ return (loc >= me->module_init_rw &&
6978+ loc < (me->module_init_rw + me->init_size_rw));
6979+}
6980+
6981 static inline int in_init(struct module *me, void *loc)
6982 {
6983- return (loc >= me->module_init &&
6984- loc <= (me->module_init + me->init_size));
6985+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6986+}
6987+
6988+static inline int in_core_rx(struct module *me, void *loc)
6989+{
6990+ return (loc >= me->module_core_rx &&
6991+ loc < (me->module_core_rx + me->core_size_rx));
6992+}
6993+
6994+static inline int in_core_rw(struct module *me, void *loc)
6995+{
6996+ return (loc >= me->module_core_rw &&
6997+ loc < (me->module_core_rw + me->core_size_rw));
6998 }
6999
7000 static inline int in_core(struct module *me, void *loc)
7001 {
7002- return (loc >= me->module_core &&
7003- loc <= (me->module_core + me->core_size));
7004+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7005 }
7006
7007 static inline int in_local(struct module *me, void *loc)
7008@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7009 }
7010
7011 /* align things a bit */
7012- me->core_size = ALIGN(me->core_size, 16);
7013- me->arch.got_offset = me->core_size;
7014- me->core_size += gots * sizeof(struct got_entry);
7015+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7016+ me->arch.got_offset = me->core_size_rw;
7017+ me->core_size_rw += gots * sizeof(struct got_entry);
7018
7019- me->core_size = ALIGN(me->core_size, 16);
7020- me->arch.fdesc_offset = me->core_size;
7021- me->core_size += fdescs * sizeof(Elf_Fdesc);
7022+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7023+ me->arch.fdesc_offset = me->core_size_rw;
7024+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7025
7026 me->arch.got_max = gots;
7027 me->arch.fdesc_max = fdescs;
7028@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7029
7030 BUG_ON(value == 0);
7031
7032- got = me->module_core + me->arch.got_offset;
7033+ got = me->module_core_rw + me->arch.got_offset;
7034 for (i = 0; got[i].addr; i++)
7035 if (got[i].addr == value)
7036 goto out;
7037@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7038 #ifdef CONFIG_64BIT
7039 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7040 {
7041- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7042+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7043
7044 if (!value) {
7045 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7046@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7047
7048 /* Create new one */
7049 fdesc->addr = value;
7050- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7051+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7052 return (Elf_Addr)fdesc;
7053 }
7054 #endif /* CONFIG_64BIT */
7055@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7056
7057 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7058 end = table + sechdrs[me->arch.unwind_section].sh_size;
7059- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7060+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7061
7062 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7063 me->arch.unwind_section, table, end, gp);
7064diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7065index 5dfd248..64914ac 100644
7066--- a/arch/parisc/kernel/sys_parisc.c
7067+++ b/arch/parisc/kernel/sys_parisc.c
7068@@ -33,9 +33,11 @@
7069 #include <linux/utsname.h>
7070 #include <linux/personality.h>
7071
7072-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7073+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
7074+ unsigned long flags)
7075 {
7076 struct vm_unmapped_area_info info;
7077+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7078
7079 info.flags = 0;
7080 info.length = len;
7081@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7082 info.high_limit = TASK_SIZE;
7083 info.align_mask = 0;
7084 info.align_offset = 0;
7085+ info.threadstack_offset = offset;
7086 return vm_unmapped_area(&info);
7087 }
7088
7089@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
7090 return (unsigned long) mapping >> 8;
7091 }
7092
7093-static unsigned long get_shared_area(struct address_space *mapping,
7094- unsigned long addr, unsigned long len, unsigned long pgoff)
7095+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
7096+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
7097 {
7098 struct vm_unmapped_area_info info;
7099+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7100
7101 info.flags = 0;
7102 info.length = len;
7103@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
7104 info.high_limit = TASK_SIZE;
7105 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7106 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
7107+ info.threadstack_offset = offset;
7108 return vm_unmapped_area(&info);
7109 }
7110
7111@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7112 return -EINVAL;
7113 return addr;
7114 }
7115- if (!addr)
7116+ if (!addr) {
7117 addr = TASK_UNMAPPED_BASE;
7118
7119+#ifdef CONFIG_PAX_RANDMMAP
7120+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7121+ addr += current->mm->delta_mmap;
7122+#endif
7123+
7124+ }
7125+
7126 if (filp) {
7127- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
7128+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
7129 } else if(flags & MAP_SHARED) {
7130- addr = get_shared_area(NULL, addr, len, pgoff);
7131+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
7132 } else {
7133- addr = get_unshared_area(addr, len);
7134+ addr = get_unshared_area(filp, addr, len, flags);
7135 }
7136 return addr;
7137 }
7138diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7139index 04e47c6..7a8faf6 100644
7140--- a/arch/parisc/kernel/traps.c
7141+++ b/arch/parisc/kernel/traps.c
7142@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7143
7144 down_read(&current->mm->mmap_sem);
7145 vma = find_vma(current->mm,regs->iaoq[0]);
7146- if (vma && (regs->iaoq[0] >= vma->vm_start)
7147- && (vma->vm_flags & VM_EXEC)) {
7148-
7149+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7150 fault_address = regs->iaoq[0];
7151 fault_space = regs->iasq[0];
7152
7153diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7154index f247a34..dc0f219 100644
7155--- a/arch/parisc/mm/fault.c
7156+++ b/arch/parisc/mm/fault.c
7157@@ -15,6 +15,7 @@
7158 #include <linux/sched.h>
7159 #include <linux/interrupt.h>
7160 #include <linux/module.h>
7161+#include <linux/unistd.h>
7162
7163 #include <asm/uaccess.h>
7164 #include <asm/traps.h>
7165@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7166 static unsigned long
7167 parisc_acctyp(unsigned long code, unsigned int inst)
7168 {
7169- if (code == 6 || code == 16)
7170+ if (code == 6 || code == 7 || code == 16)
7171 return VM_EXEC;
7172
7173 switch (inst & 0xf0000000) {
7174@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7175 }
7176 #endif
7177
7178+#ifdef CONFIG_PAX_PAGEEXEC
7179+/*
7180+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7181+ *
7182+ * returns 1 when task should be killed
7183+ * 2 when rt_sigreturn trampoline was detected
7184+ * 3 when unpatched PLT trampoline was detected
7185+ */
7186+static int pax_handle_fetch_fault(struct pt_regs *regs)
7187+{
7188+
7189+#ifdef CONFIG_PAX_EMUPLT
7190+ int err;
7191+
7192+ do { /* PaX: unpatched PLT emulation */
7193+ unsigned int bl, depwi;
7194+
7195+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7196+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7197+
7198+ if (err)
7199+ break;
7200+
7201+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7202+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7203+
7204+ err = get_user(ldw, (unsigned int *)addr);
7205+ err |= get_user(bv, (unsigned int *)(addr+4));
7206+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7207+
7208+ if (err)
7209+ break;
7210+
7211+ if (ldw == 0x0E801096U &&
7212+ bv == 0xEAC0C000U &&
7213+ ldw2 == 0x0E881095U)
7214+ {
7215+ unsigned int resolver, map;
7216+
7217+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7218+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7219+ if (err)
7220+ break;
7221+
7222+ regs->gr[20] = instruction_pointer(regs)+8;
7223+ regs->gr[21] = map;
7224+ regs->gr[22] = resolver;
7225+ regs->iaoq[0] = resolver | 3UL;
7226+ regs->iaoq[1] = regs->iaoq[0] + 4;
7227+ return 3;
7228+ }
7229+ }
7230+ } while (0);
7231+#endif
7232+
7233+#ifdef CONFIG_PAX_EMUTRAMP
7234+
7235+#ifndef CONFIG_PAX_EMUSIGRT
7236+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7237+ return 1;
7238+#endif
7239+
7240+ do { /* PaX: rt_sigreturn emulation */
7241+ unsigned int ldi1, ldi2, bel, nop;
7242+
7243+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7244+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7245+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7246+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7247+
7248+ if (err)
7249+ break;
7250+
7251+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7252+ ldi2 == 0x3414015AU &&
7253+ bel == 0xE4008200U &&
7254+ nop == 0x08000240U)
7255+ {
7256+ regs->gr[25] = (ldi1 & 2) >> 1;
7257+ regs->gr[20] = __NR_rt_sigreturn;
7258+ regs->gr[31] = regs->iaoq[1] + 16;
7259+ regs->sr[0] = regs->iasq[1];
7260+ regs->iaoq[0] = 0x100UL;
7261+ regs->iaoq[1] = regs->iaoq[0] + 4;
7262+ regs->iasq[0] = regs->sr[2];
7263+ regs->iasq[1] = regs->sr[2];
7264+ return 2;
7265+ }
7266+ } while (0);
7267+#endif
7268+
7269+ return 1;
7270+}
7271+
7272+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7273+{
7274+ unsigned long i;
7275+
7276+ printk(KERN_ERR "PAX: bytes at PC: ");
7277+ for (i = 0; i < 5; i++) {
7278+ unsigned int c;
7279+ if (get_user(c, (unsigned int *)pc+i))
7280+ printk(KERN_CONT "???????? ");
7281+ else
7282+ printk(KERN_CONT "%08x ", c);
7283+ }
7284+ printk("\n");
7285+}
7286+#endif
7287+
7288 int fixup_exception(struct pt_regs *regs)
7289 {
7290 const struct exception_table_entry *fix;
7291@@ -194,8 +305,33 @@ good_area:
7292
7293 acc_type = parisc_acctyp(code,regs->iir);
7294
7295- if ((vma->vm_flags & acc_type) != acc_type)
7296+ if ((vma->vm_flags & acc_type) != acc_type) {
7297+
7298+#ifdef CONFIG_PAX_PAGEEXEC
7299+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7300+ (address & ~3UL) == instruction_pointer(regs))
7301+ {
7302+ up_read(&mm->mmap_sem);
7303+ switch (pax_handle_fetch_fault(regs)) {
7304+
7305+#ifdef CONFIG_PAX_EMUPLT
7306+ case 3:
7307+ return;
7308+#endif
7309+
7310+#ifdef CONFIG_PAX_EMUTRAMP
7311+ case 2:
7312+ return;
7313+#endif
7314+
7315+ }
7316+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7317+ do_group_exit(SIGKILL);
7318+ }
7319+#endif
7320+
7321 goto bad_area;
7322+ }
7323
7324 /*
7325 * If for any reason at all we couldn't handle the fault, make
7326diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7327index e3b1d41..8e81edf 100644
7328--- a/arch/powerpc/include/asm/atomic.h
7329+++ b/arch/powerpc/include/asm/atomic.h
7330@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7331 return t1;
7332 }
7333
7334+#define atomic64_read_unchecked(v) atomic64_read(v)
7335+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7336+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7337+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7338+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7339+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7340+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7341+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7342+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7343+
7344 #endif /* __powerpc64__ */
7345
7346 #endif /* __KERNEL__ */
7347diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7348index 9e495c9..b6878e5 100644
7349--- a/arch/powerpc/include/asm/cache.h
7350+++ b/arch/powerpc/include/asm/cache.h
7351@@ -3,6 +3,7 @@
7352
7353 #ifdef __KERNEL__
7354
7355+#include <linux/const.h>
7356
7357 /* bytes per L1 cache line */
7358 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7359@@ -22,7 +23,7 @@
7360 #define L1_CACHE_SHIFT 7
7361 #endif
7362
7363-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7364+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7365
7366 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7367
7368diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7369index cc0655a..13eac2e 100644
7370--- a/arch/powerpc/include/asm/elf.h
7371+++ b/arch/powerpc/include/asm/elf.h
7372@@ -28,8 +28,19 @@
7373 the loader. We need to make sure that it is out of the way of the program
7374 that it will "exec", and that there is sufficient room for the brk. */
7375
7376-extern unsigned long randomize_et_dyn(unsigned long base);
7377-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7378+#define ELF_ET_DYN_BASE (0x20000000)
7379+
7380+#ifdef CONFIG_PAX_ASLR
7381+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7382+
7383+#ifdef __powerpc64__
7384+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7385+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7386+#else
7387+#define PAX_DELTA_MMAP_LEN 15
7388+#define PAX_DELTA_STACK_LEN 15
7389+#endif
7390+#endif
7391
7392 /*
7393 * Our registers are always unsigned longs, whether we're a 32 bit
7394@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7395 (0x7ff >> (PAGE_SHIFT - 12)) : \
7396 (0x3ffff >> (PAGE_SHIFT - 12)))
7397
7398-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7399-#define arch_randomize_brk arch_randomize_brk
7400-
7401-
7402 #ifdef CONFIG_SPU_BASE
7403 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7404 #define NT_SPU 1
7405diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7406index 8196e9c..d83a9f3 100644
7407--- a/arch/powerpc/include/asm/exec.h
7408+++ b/arch/powerpc/include/asm/exec.h
7409@@ -4,6 +4,6 @@
7410 #ifndef _ASM_POWERPC_EXEC_H
7411 #define _ASM_POWERPC_EXEC_H
7412
7413-extern unsigned long arch_align_stack(unsigned long sp);
7414+#define arch_align_stack(x) ((x) & ~0xfUL)
7415
7416 #endif /* _ASM_POWERPC_EXEC_H */
7417diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7418index 5acabbd..7ea14fa 100644
7419--- a/arch/powerpc/include/asm/kmap_types.h
7420+++ b/arch/powerpc/include/asm/kmap_types.h
7421@@ -10,7 +10,7 @@
7422 * 2 of the License, or (at your option) any later version.
7423 */
7424
7425-#define KM_TYPE_NR 16
7426+#define KM_TYPE_NR 17
7427
7428 #endif /* __KERNEL__ */
7429 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7430diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7431index 8565c25..2865190 100644
7432--- a/arch/powerpc/include/asm/mman.h
7433+++ b/arch/powerpc/include/asm/mman.h
7434@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7435 }
7436 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7437
7438-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7439+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7440 {
7441 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7442 }
7443diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7444index 988c812..63c7d70 100644
7445--- a/arch/powerpc/include/asm/page.h
7446+++ b/arch/powerpc/include/asm/page.h
7447@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
7448 * and needs to be executable. This means the whole heap ends
7449 * up being executable.
7450 */
7451-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7452- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7453+#define VM_DATA_DEFAULT_FLAGS32 \
7454+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7455+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7456
7457 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7458 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7459@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
7460 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7461 #endif
7462
7463+#define ktla_ktva(addr) (addr)
7464+#define ktva_ktla(addr) (addr)
7465+
7466 #ifndef CONFIG_PPC_BOOK3S_64
7467 /*
7468 * Use the top bit of the higher-level page table entries to indicate whether
7469diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7470index 88693ce..ac6f9ab 100644
7471--- a/arch/powerpc/include/asm/page_64.h
7472+++ b/arch/powerpc/include/asm/page_64.h
7473@@ -153,15 +153,18 @@ do { \
7474 * stack by default, so in the absence of a PT_GNU_STACK program header
7475 * we turn execute permission off.
7476 */
7477-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7478- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7479+#define VM_STACK_DEFAULT_FLAGS32 \
7480+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7481+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7482
7483 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7484 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7485
7486+#ifndef CONFIG_PAX_PAGEEXEC
7487 #define VM_STACK_DEFAULT_FLAGS \
7488 (is_32bit_task() ? \
7489 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7490+#endif
7491
7492 #include <asm-generic/getorder.h>
7493
7494diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7495index b66ae72..4a378cd 100644
7496--- a/arch/powerpc/include/asm/pgalloc-64.h
7497+++ b/arch/powerpc/include/asm/pgalloc-64.h
7498@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7499 #ifndef CONFIG_PPC_64K_PAGES
7500
7501 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7502+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7503
7504 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7505 {
7506@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7507 pud_set(pud, (unsigned long)pmd);
7508 }
7509
7510+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7511+{
7512+ pud_populate(mm, pud, pmd);
7513+}
7514+
7515 #define pmd_populate(mm, pmd, pte_page) \
7516 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7517 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7518@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
7519 #endif
7520
7521 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7522+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7523
7524 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7525 pte_t *pte)
7526diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7527index 7aeb955..19f748e 100644
7528--- a/arch/powerpc/include/asm/pgtable.h
7529+++ b/arch/powerpc/include/asm/pgtable.h
7530@@ -2,6 +2,7 @@
7531 #define _ASM_POWERPC_PGTABLE_H
7532 #ifdef __KERNEL__
7533
7534+#include <linux/const.h>
7535 #ifndef __ASSEMBLY__
7536 #include <asm/processor.h> /* For TASK_SIZE */
7537 #include <asm/mmu.h>
7538diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7539index 4aad413..85d86bf 100644
7540--- a/arch/powerpc/include/asm/pte-hash32.h
7541+++ b/arch/powerpc/include/asm/pte-hash32.h
7542@@ -21,6 +21,7 @@
7543 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7544 #define _PAGE_USER 0x004 /* usermode access allowed */
7545 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7546+#define _PAGE_EXEC _PAGE_GUARDED
7547 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7548 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7549 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7550diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7551index e1fb161..2290d1d 100644
7552--- a/arch/powerpc/include/asm/reg.h
7553+++ b/arch/powerpc/include/asm/reg.h
7554@@ -234,6 +234,7 @@
7555 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7556 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7557 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7558+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7559 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7560 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7561 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7562diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7563index 48cfc85..891382f 100644
7564--- a/arch/powerpc/include/asm/smp.h
7565+++ b/arch/powerpc/include/asm/smp.h
7566@@ -50,7 +50,7 @@ struct smp_ops_t {
7567 int (*cpu_disable)(void);
7568 void (*cpu_die)(unsigned int nr);
7569 int (*cpu_bootable)(unsigned int nr);
7570-};
7571+} __no_const;
7572
7573 extern void smp_send_debugger_break(void);
7574 extern void start_secondary_resume(void);
7575diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7576index ba7b197..d292e26 100644
7577--- a/arch/powerpc/include/asm/thread_info.h
7578+++ b/arch/powerpc/include/asm/thread_info.h
7579@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
7580 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7581 TIF_NEED_RESCHED */
7582 #define TIF_32BIT 4 /* 32 bit binary */
7583-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7584 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7585 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7586 #define TIF_SINGLESTEP 8 /* singlestepping active */
7587@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
7588 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
7589 for stack store? */
7590 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
7591+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
7592+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7593+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7594
7595 /* as above, but as bit values */
7596 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7597@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
7598 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7599 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7600 #define _TIF_NOHZ (1<<TIF_NOHZ)
7601+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7602 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7603 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7604- _TIF_NOHZ)
7605+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
7606
7607 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7608 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7609diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7610index 4db4959..aba5c41 100644
7611--- a/arch/powerpc/include/asm/uaccess.h
7612+++ b/arch/powerpc/include/asm/uaccess.h
7613@@ -318,52 +318,6 @@ do { \
7614 extern unsigned long __copy_tofrom_user(void __user *to,
7615 const void __user *from, unsigned long size);
7616
7617-#ifndef __powerpc64__
7618-
7619-static inline unsigned long copy_from_user(void *to,
7620- const void __user *from, unsigned long n)
7621-{
7622- unsigned long over;
7623-
7624- if (access_ok(VERIFY_READ, from, n))
7625- return __copy_tofrom_user((__force void __user *)to, from, n);
7626- if ((unsigned long)from < TASK_SIZE) {
7627- over = (unsigned long)from + n - TASK_SIZE;
7628- return __copy_tofrom_user((__force void __user *)to, from,
7629- n - over) + over;
7630- }
7631- return n;
7632-}
7633-
7634-static inline unsigned long copy_to_user(void __user *to,
7635- const void *from, unsigned long n)
7636-{
7637- unsigned long over;
7638-
7639- if (access_ok(VERIFY_WRITE, to, n))
7640- return __copy_tofrom_user(to, (__force void __user *)from, n);
7641- if ((unsigned long)to < TASK_SIZE) {
7642- over = (unsigned long)to + n - TASK_SIZE;
7643- return __copy_tofrom_user(to, (__force void __user *)from,
7644- n - over) + over;
7645- }
7646- return n;
7647-}
7648-
7649-#else /* __powerpc64__ */
7650-
7651-#define __copy_in_user(to, from, size) \
7652- __copy_tofrom_user((to), (from), (size))
7653-
7654-extern unsigned long copy_from_user(void *to, const void __user *from,
7655- unsigned long n);
7656-extern unsigned long copy_to_user(void __user *to, const void *from,
7657- unsigned long n);
7658-extern unsigned long copy_in_user(void __user *to, const void __user *from,
7659- unsigned long n);
7660-
7661-#endif /* __powerpc64__ */
7662-
7663 static inline unsigned long __copy_from_user_inatomic(void *to,
7664 const void __user *from, unsigned long n)
7665 {
7666@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7667 if (ret == 0)
7668 return 0;
7669 }
7670+
7671+ if (!__builtin_constant_p(n))
7672+ check_object_size(to, n, false);
7673+
7674 return __copy_tofrom_user((__force void __user *)to, from, n);
7675 }
7676
7677@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7678 if (ret == 0)
7679 return 0;
7680 }
7681+
7682+ if (!__builtin_constant_p(n))
7683+ check_object_size(from, n, true);
7684+
7685 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7686 }
7687
7688@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7689 return __copy_to_user_inatomic(to, from, size);
7690 }
7691
7692+#ifndef __powerpc64__
7693+
7694+static inline unsigned long __must_check copy_from_user(void *to,
7695+ const void __user *from, unsigned long n)
7696+{
7697+ unsigned long over;
7698+
7699+ if ((long)n < 0)
7700+ return n;
7701+
7702+ if (access_ok(VERIFY_READ, from, n)) {
7703+ if (!__builtin_constant_p(n))
7704+ check_object_size(to, n, false);
7705+ return __copy_tofrom_user((__force void __user *)to, from, n);
7706+ }
7707+ if ((unsigned long)from < TASK_SIZE) {
7708+ over = (unsigned long)from + n - TASK_SIZE;
7709+ if (!__builtin_constant_p(n - over))
7710+ check_object_size(to, n - over, false);
7711+ return __copy_tofrom_user((__force void __user *)to, from,
7712+ n - over) + over;
7713+ }
7714+ return n;
7715+}
7716+
7717+static inline unsigned long __must_check copy_to_user(void __user *to,
7718+ const void *from, unsigned long n)
7719+{
7720+ unsigned long over;
7721+
7722+ if ((long)n < 0)
7723+ return n;
7724+
7725+ if (access_ok(VERIFY_WRITE, to, n)) {
7726+ if (!__builtin_constant_p(n))
7727+ check_object_size(from, n, true);
7728+ return __copy_tofrom_user(to, (__force void __user *)from, n);
7729+ }
7730+ if ((unsigned long)to < TASK_SIZE) {
7731+ over = (unsigned long)to + n - TASK_SIZE;
7732+ if (!__builtin_constant_p(n))
7733+ check_object_size(from, n - over, true);
7734+ return __copy_tofrom_user(to, (__force void __user *)from,
7735+ n - over) + over;
7736+ }
7737+ return n;
7738+}
7739+
7740+#else /* __powerpc64__ */
7741+
7742+#define __copy_in_user(to, from, size) \
7743+ __copy_tofrom_user((to), (from), (size))
7744+
7745+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7746+{
7747+ if ((long)n < 0 || n > INT_MAX)
7748+ return n;
7749+
7750+ if (!__builtin_constant_p(n))
7751+ check_object_size(to, n, false);
7752+
7753+ if (likely(access_ok(VERIFY_READ, from, n)))
7754+ n = __copy_from_user(to, from, n);
7755+ else
7756+ memset(to, 0, n);
7757+ return n;
7758+}
7759+
7760+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
7761+{
7762+ if ((long)n < 0 || n > INT_MAX)
7763+ return n;
7764+
7765+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
7766+ if (!__builtin_constant_p(n))
7767+ check_object_size(from, n, true);
7768+ n = __copy_to_user(to, from, n);
7769+ }
7770+ return n;
7771+}
7772+
7773+extern unsigned long copy_in_user(void __user *to, const void __user *from,
7774+ unsigned long n);
7775+
7776+#endif /* __powerpc64__ */
7777+
7778 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7779
7780 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7781diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7782index 645170a..6cf0271 100644
7783--- a/arch/powerpc/kernel/exceptions-64e.S
7784+++ b/arch/powerpc/kernel/exceptions-64e.S
7785@@ -757,6 +757,7 @@ storage_fault_common:
7786 std r14,_DAR(r1)
7787 std r15,_DSISR(r1)
7788 addi r3,r1,STACK_FRAME_OVERHEAD
7789+ bl .save_nvgprs
7790 mr r4,r14
7791 mr r5,r15
7792 ld r14,PACA_EXGEN+EX_R14(r13)
7793@@ -765,8 +766,7 @@ storage_fault_common:
7794 cmpdi r3,0
7795 bne- 1f
7796 b .ret_from_except_lite
7797-1: bl .save_nvgprs
7798- mr r5,r3
7799+1: mr r5,r3
7800 addi r3,r1,STACK_FRAME_OVERHEAD
7801 ld r4,_DAR(r1)
7802 bl .bad_page_fault
7803diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
7804index 902ca3c..e942155 100644
7805--- a/arch/powerpc/kernel/exceptions-64s.S
7806+++ b/arch/powerpc/kernel/exceptions-64s.S
7807@@ -1357,10 +1357,10 @@ handle_page_fault:
7808 11: ld r4,_DAR(r1)
7809 ld r5,_DSISR(r1)
7810 addi r3,r1,STACK_FRAME_OVERHEAD
7811+ bl .save_nvgprs
7812 bl .do_page_fault
7813 cmpdi r3,0
7814 beq+ 12f
7815- bl .save_nvgprs
7816 mr r5,r3
7817 addi r3,r1,STACK_FRAME_OVERHEAD
7818 lwz r4,_DAR(r1)
7819diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
7820index 2e3200c..72095ce 100644
7821--- a/arch/powerpc/kernel/module_32.c
7822+++ b/arch/powerpc/kernel/module_32.c
7823@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
7824 me->arch.core_plt_section = i;
7825 }
7826 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
7827- printk("Module doesn't contain .plt or .init.plt sections.\n");
7828+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
7829 return -ENOEXEC;
7830 }
7831
7832@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
7833
7834 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
7835 /* Init, or core PLT? */
7836- if (location >= mod->module_core
7837- && location < mod->module_core + mod->core_size)
7838+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
7839+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
7840 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
7841- else
7842+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
7843+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
7844 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
7845+ else {
7846+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
7847+ return ~0UL;
7848+ }
7849
7850 /* Find this entry, or if that fails, the next avail. entry */
7851 while (entry->jump[0]) {
7852diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
7853index 7baa27b..f6b394a 100644
7854--- a/arch/powerpc/kernel/process.c
7855+++ b/arch/powerpc/kernel/process.c
7856@@ -884,8 +884,8 @@ void show_regs(struct pt_regs * regs)
7857 * Lookup NIP late so we have the best change of getting the
7858 * above info out without failing
7859 */
7860- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
7861- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
7862+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
7863+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
7864 #endif
7865 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7866 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
7867@@ -1345,10 +1345,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7868 newsp = stack[0];
7869 ip = stack[STACK_FRAME_LR_SAVE];
7870 if (!firstframe || ip != lr) {
7871- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
7872+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
7873 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7874 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
7875- printk(" (%pS)",
7876+ printk(" (%pA)",
7877 (void *)current->ret_stack[curr_frame].ret);
7878 curr_frame--;
7879 }
7880@@ -1368,7 +1368,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7881 struct pt_regs *regs = (struct pt_regs *)
7882 (sp + STACK_FRAME_OVERHEAD);
7883 lr = regs->link;
7884- printk("--- Exception: %lx at %pS\n LR = %pS\n",
7885+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
7886 regs->trap, (void *)regs->nip, (void *)lr);
7887 firstframe = 1;
7888 }
7889@@ -1404,58 +1404,3 @@ void notrace __ppc64_runlatch_off(void)
7890 mtspr(SPRN_CTRLT, ctrl);
7891 }
7892 #endif /* CONFIG_PPC64 */
7893-
7894-unsigned long arch_align_stack(unsigned long sp)
7895-{
7896- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7897- sp -= get_random_int() & ~PAGE_MASK;
7898- return sp & ~0xf;
7899-}
7900-
7901-static inline unsigned long brk_rnd(void)
7902-{
7903- unsigned long rnd = 0;
7904-
7905- /* 8MB for 32bit, 1GB for 64bit */
7906- if (is_32bit_task())
7907- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7908- else
7909- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7910-
7911- return rnd << PAGE_SHIFT;
7912-}
7913-
7914-unsigned long arch_randomize_brk(struct mm_struct *mm)
7915-{
7916- unsigned long base = mm->brk;
7917- unsigned long ret;
7918-
7919-#ifdef CONFIG_PPC_STD_MMU_64
7920- /*
7921- * If we are using 1TB segments and we are allowed to randomise
7922- * the heap, we can put it above 1TB so it is backed by a 1TB
7923- * segment. Otherwise the heap will be in the bottom 1TB
7924- * which always uses 256MB segments and this may result in a
7925- * performance penalty.
7926- */
7927- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7928- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7929-#endif
7930-
7931- ret = PAGE_ALIGN(base + brk_rnd());
7932-
7933- if (ret < mm->brk)
7934- return mm->brk;
7935-
7936- return ret;
7937-}
7938-
7939-unsigned long randomize_et_dyn(unsigned long base)
7940-{
7941- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7942-
7943- if (ret < base)
7944- return base;
7945-
7946- return ret;
7947-}
7948diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7949index 64f7bd5..8dd550f 100644
7950--- a/arch/powerpc/kernel/ptrace.c
7951+++ b/arch/powerpc/kernel/ptrace.c
7952@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
7953 return ret;
7954 }
7955
7956+#ifdef CONFIG_GRKERNSEC_SETXID
7957+extern void gr_delayed_cred_worker(void);
7958+#endif
7959+
7960 /*
7961 * We must return the syscall number to actually look up in the table.
7962 * This can be -1L to skip running any syscall at all.
7963@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7964
7965 secure_computing_strict(regs->gpr[0]);
7966
7967+#ifdef CONFIG_GRKERNSEC_SETXID
7968+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7969+ gr_delayed_cred_worker();
7970+#endif
7971+
7972 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7973 tracehook_report_syscall_entry(regs))
7974 /*
7975@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7976 {
7977 int step;
7978
7979+#ifdef CONFIG_GRKERNSEC_SETXID
7980+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7981+ gr_delayed_cred_worker();
7982+#endif
7983+
7984 audit_syscall_exit(regs);
7985
7986 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7987diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7988index 0f83122..c0aca6a 100644
7989--- a/arch/powerpc/kernel/signal_32.c
7990+++ b/arch/powerpc/kernel/signal_32.c
7991@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7992 /* Save user registers on the stack */
7993 frame = &rt_sf->uc.uc_mcontext;
7994 addr = frame;
7995- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7996+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7997 sigret = 0;
7998 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7999 } else {
8000diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8001index 887e99d..310bc11 100644
8002--- a/arch/powerpc/kernel/signal_64.c
8003+++ b/arch/powerpc/kernel/signal_64.c
8004@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8005 #endif
8006
8007 /* Set up to return from userspace. */
8008- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8009+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8010 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8011 } else {
8012 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8013diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
8014index e68a845..8b140e6 100644
8015--- a/arch/powerpc/kernel/sysfs.c
8016+++ b/arch/powerpc/kernel/sysfs.c
8017@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8018 return NOTIFY_OK;
8019 }
8020
8021-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8022+static struct notifier_block sysfs_cpu_nb = {
8023 .notifier_call = sysfs_cpu_notify,
8024 };
8025
8026diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8027index 88929b1..bece8f8 100644
8028--- a/arch/powerpc/kernel/traps.c
8029+++ b/arch/powerpc/kernel/traps.c
8030@@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8031 return flags;
8032 }
8033
8034+extern void gr_handle_kernel_exploit(void);
8035+
8036 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8037 int signr)
8038 {
8039@@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8040 panic("Fatal exception in interrupt");
8041 if (panic_on_oops)
8042 panic("Fatal exception");
8043+
8044+ gr_handle_kernel_exploit();
8045+
8046 do_exit(signr);
8047 }
8048
8049diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8050index d4f463a..8fb7431 100644
8051--- a/arch/powerpc/kernel/vdso.c
8052+++ b/arch/powerpc/kernel/vdso.c
8053@@ -34,6 +34,7 @@
8054 #include <asm/firmware.h>
8055 #include <asm/vdso.h>
8056 #include <asm/vdso_datapage.h>
8057+#include <asm/mman.h>
8058
8059 #include "setup.h"
8060
8061@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8062 vdso_base = VDSO32_MBASE;
8063 #endif
8064
8065- current->mm->context.vdso_base = 0;
8066+ current->mm->context.vdso_base = ~0UL;
8067
8068 /* vDSO has a problem and was disabled, just don't "enable" it for the
8069 * process
8070@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8071 vdso_base = get_unmapped_area(NULL, vdso_base,
8072 (vdso_pages << PAGE_SHIFT) +
8073 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8074- 0, 0);
8075+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8076 if (IS_ERR_VALUE(vdso_base)) {
8077 rc = vdso_base;
8078 goto fail_mmapsem;
8079diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8080index 5eea6f3..5d10396 100644
8081--- a/arch/powerpc/lib/usercopy_64.c
8082+++ b/arch/powerpc/lib/usercopy_64.c
8083@@ -9,22 +9,6 @@
8084 #include <linux/module.h>
8085 #include <asm/uaccess.h>
8086
8087-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8088-{
8089- if (likely(access_ok(VERIFY_READ, from, n)))
8090- n = __copy_from_user(to, from, n);
8091- else
8092- memset(to, 0, n);
8093- return n;
8094-}
8095-
8096-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8097-{
8098- if (likely(access_ok(VERIFY_WRITE, to, n)))
8099- n = __copy_to_user(to, from, n);
8100- return n;
8101-}
8102-
8103 unsigned long copy_in_user(void __user *to, const void __user *from,
8104 unsigned long n)
8105 {
8106@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8107 return n;
8108 }
8109
8110-EXPORT_SYMBOL(copy_from_user);
8111-EXPORT_SYMBOL(copy_to_user);
8112 EXPORT_SYMBOL(copy_in_user);
8113
8114diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8115index 8726779..a33c512 100644
8116--- a/arch/powerpc/mm/fault.c
8117+++ b/arch/powerpc/mm/fault.c
8118@@ -33,6 +33,10 @@
8119 #include <linux/magic.h>
8120 #include <linux/ratelimit.h>
8121 #include <linux/context_tracking.h>
8122+#include <linux/slab.h>
8123+#include <linux/pagemap.h>
8124+#include <linux/compiler.h>
8125+#include <linux/unistd.h>
8126
8127 #include <asm/firmware.h>
8128 #include <asm/page.h>
8129@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8130 }
8131 #endif
8132
8133+#ifdef CONFIG_PAX_PAGEEXEC
8134+/*
8135+ * PaX: decide what to do with offenders (regs->nip = fault address)
8136+ *
8137+ * returns 1 when task should be killed
8138+ */
8139+static int pax_handle_fetch_fault(struct pt_regs *regs)
8140+{
8141+ return 1;
8142+}
8143+
8144+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8145+{
8146+ unsigned long i;
8147+
8148+ printk(KERN_ERR "PAX: bytes at PC: ");
8149+ for (i = 0; i < 5; i++) {
8150+ unsigned int c;
8151+ if (get_user(c, (unsigned int __user *)pc+i))
8152+ printk(KERN_CONT "???????? ");
8153+ else
8154+ printk(KERN_CONT "%08x ", c);
8155+ }
8156+ printk("\n");
8157+}
8158+#endif
8159+
8160 /*
8161 * Check whether the instruction at regs->nip is a store using
8162 * an update addressing form which will update r1.
8163@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8164 * indicate errors in DSISR but can validly be set in SRR1.
8165 */
8166 if (trap == 0x400)
8167- error_code &= 0x48200000;
8168+ error_code &= 0x58200000;
8169 else
8170 is_write = error_code & DSISR_ISSTORE;
8171 #else
8172@@ -371,7 +402,7 @@ good_area:
8173 * "undefined". Of those that can be set, this is the only
8174 * one which seems bad.
8175 */
8176- if (error_code & 0x10000000)
8177+ if (error_code & DSISR_GUARDED)
8178 /* Guarded storage error. */
8179 goto bad_area;
8180 #endif /* CONFIG_8xx */
8181@@ -386,7 +417,7 @@ good_area:
8182 * processors use the same I/D cache coherency mechanism
8183 * as embedded.
8184 */
8185- if (error_code & DSISR_PROTFAULT)
8186+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8187 goto bad_area;
8188 #endif /* CONFIG_PPC_STD_MMU */
8189
8190@@ -471,6 +502,23 @@ bad_area:
8191 bad_area_nosemaphore:
8192 /* User mode accesses cause a SIGSEGV */
8193 if (user_mode(regs)) {
8194+
8195+#ifdef CONFIG_PAX_PAGEEXEC
8196+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8197+#ifdef CONFIG_PPC_STD_MMU
8198+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8199+#else
8200+ if (is_exec && regs->nip == address) {
8201+#endif
8202+ switch (pax_handle_fetch_fault(regs)) {
8203+ }
8204+
8205+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8206+ do_group_exit(SIGKILL);
8207+ }
8208+ }
8209+#endif
8210+
8211 _exception(SIGSEGV, regs, code, address);
8212 goto bail;
8213 }
8214diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
8215index 67a42ed..cd463e0 100644
8216--- a/arch/powerpc/mm/mmap_64.c
8217+++ b/arch/powerpc/mm/mmap_64.c
8218@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8219 {
8220 unsigned long rnd = 0;
8221
8222+#ifdef CONFIG_PAX_RANDMMAP
8223+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8224+#endif
8225+
8226 if (current->flags & PF_RANDOMIZE) {
8227 /* 8MB for 32bit, 1GB for 64bit */
8228 if (is_32bit_task())
8229@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8230 */
8231 if (mmap_is_legacy()) {
8232 mm->mmap_base = TASK_UNMAPPED_BASE;
8233+
8234+#ifdef CONFIG_PAX_RANDMMAP
8235+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8236+ mm->mmap_base += mm->delta_mmap;
8237+#endif
8238+
8239 mm->get_unmapped_area = arch_get_unmapped_area;
8240 mm->unmap_area = arch_unmap_area;
8241 } else {
8242 mm->mmap_base = mmap_base();
8243+
8244+#ifdef CONFIG_PAX_RANDMMAP
8245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8246+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8247+#endif
8248+
8249 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8250 mm->unmap_area = arch_unmap_area_topdown;
8251 }
8252diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
8253index e779642..e5bb889 100644
8254--- a/arch/powerpc/mm/mmu_context_nohash.c
8255+++ b/arch/powerpc/mm/mmu_context_nohash.c
8256@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
8257 return NOTIFY_OK;
8258 }
8259
8260-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
8261+static struct notifier_block mmu_context_cpu_nb = {
8262 .notifier_call = mmu_context_cpu_notify,
8263 };
8264
8265diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
8266index cafad40..9cbc0fc 100644
8267--- a/arch/powerpc/mm/numa.c
8268+++ b/arch/powerpc/mm/numa.c
8269@@ -920,7 +920,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
8270 return ret;
8271 }
8272
8273-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
8274+static struct notifier_block ppc64_numa_nb = {
8275 .notifier_call = cpu_numa_callback,
8276 .priority = 1 /* Must run before sched domains notifier. */
8277 };
8278diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8279index 3e99c14..f00953c 100644
8280--- a/arch/powerpc/mm/slice.c
8281+++ b/arch/powerpc/mm/slice.c
8282@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8283 if ((mm->task_size - len) < addr)
8284 return 0;
8285 vma = find_vma(mm, addr);
8286- return (!vma || (addr + len) <= vma->vm_start);
8287+ return check_heap_stack_gap(vma, addr, len, 0);
8288 }
8289
8290 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8291@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8292 info.align_offset = 0;
8293
8294 addr = TASK_UNMAPPED_BASE;
8295+
8296+#ifdef CONFIG_PAX_RANDMMAP
8297+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8298+ addr += mm->delta_mmap;
8299+#endif
8300+
8301 while (addr < TASK_SIZE) {
8302 info.low_limit = addr;
8303 if (!slice_scan_available(addr, available, 1, &addr))
8304@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8305 if (fixed && addr > (mm->task_size - len))
8306 return -EINVAL;
8307
8308+#ifdef CONFIG_PAX_RANDMMAP
8309+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8310+ addr = 0;
8311+#endif
8312+
8313 /* If hint, make sure it matches our alignment restrictions */
8314 if (!fixed && addr) {
8315 addr = _ALIGN_UP(addr, 1ul << pshift);
8316diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8317index 9098692..3d54cd1 100644
8318--- a/arch/powerpc/platforms/cell/spufs/file.c
8319+++ b/arch/powerpc/platforms/cell/spufs/file.c
8320@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8321 return VM_FAULT_NOPAGE;
8322 }
8323
8324-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8325+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8326 unsigned long address,
8327- void *buf, int len, int write)
8328+ void *buf, size_t len, int write)
8329 {
8330 struct spu_context *ctx = vma->vm_file->private_data;
8331 unsigned long offset = address - vma->vm_start;
8332diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
8333index bdb738a..49c9f95 100644
8334--- a/arch/powerpc/platforms/powermac/smp.c
8335+++ b/arch/powerpc/platforms/powermac/smp.c
8336@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
8337 return NOTIFY_OK;
8338 }
8339
8340-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
8341+static struct notifier_block smp_core99_cpu_nb = {
8342 .notifier_call = smp_core99_cpu_notify,
8343 };
8344 #endif /* CONFIG_HOTPLUG_CPU */
8345diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8346index c797832..ce575c8 100644
8347--- a/arch/s390/include/asm/atomic.h
8348+++ b/arch/s390/include/asm/atomic.h
8349@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8350 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8351 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8352
8353+#define atomic64_read_unchecked(v) atomic64_read(v)
8354+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8355+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8356+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8357+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8358+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8359+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8360+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8361+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8362+
8363 #define smp_mb__before_atomic_dec() smp_mb()
8364 #define smp_mb__after_atomic_dec() smp_mb()
8365 #define smp_mb__before_atomic_inc() smp_mb()
8366diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8367index 4d7ccac..d03d0ad 100644
8368--- a/arch/s390/include/asm/cache.h
8369+++ b/arch/s390/include/asm/cache.h
8370@@ -9,8 +9,10 @@
8371 #ifndef __ARCH_S390_CACHE_H
8372 #define __ARCH_S390_CACHE_H
8373
8374-#define L1_CACHE_BYTES 256
8375+#include <linux/const.h>
8376+
8377 #define L1_CACHE_SHIFT 8
8378+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8379 #define NET_SKB_PAD 32
8380
8381 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8382diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8383index 78f4f87..598ce39 100644
8384--- a/arch/s390/include/asm/elf.h
8385+++ b/arch/s390/include/asm/elf.h
8386@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8387 the loader. We need to make sure that it is out of the way of the program
8388 that it will "exec", and that there is sufficient room for the brk. */
8389
8390-extern unsigned long randomize_et_dyn(unsigned long base);
8391-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8392+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8393+
8394+#ifdef CONFIG_PAX_ASLR
8395+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8396+
8397+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8398+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8399+#endif
8400
8401 /* This yields a mask that user programs can use to figure out what
8402 instruction set this CPU supports. */
8403@@ -222,9 +228,6 @@ struct linux_binprm;
8404 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8405 int arch_setup_additional_pages(struct linux_binprm *, int);
8406
8407-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8408-#define arch_randomize_brk arch_randomize_brk
8409-
8410 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8411
8412 #endif
8413diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8414index c4a93d6..4d2a9b4 100644
8415--- a/arch/s390/include/asm/exec.h
8416+++ b/arch/s390/include/asm/exec.h
8417@@ -7,6 +7,6 @@
8418 #ifndef __ASM_EXEC_H
8419 #define __ASM_EXEC_H
8420
8421-extern unsigned long arch_align_stack(unsigned long sp);
8422+#define arch_align_stack(x) ((x) & ~0xfUL)
8423
8424 #endif /* __ASM_EXEC_H */
8425diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8426index 9c33ed4..e40cbef 100644
8427--- a/arch/s390/include/asm/uaccess.h
8428+++ b/arch/s390/include/asm/uaccess.h
8429@@ -252,6 +252,10 @@ static inline unsigned long __must_check
8430 copy_to_user(void __user *to, const void *from, unsigned long n)
8431 {
8432 might_fault();
8433+
8434+ if ((long)n < 0)
8435+ return n;
8436+
8437 return __copy_to_user(to, from, n);
8438 }
8439
8440@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8441 static inline unsigned long __must_check
8442 __copy_from_user(void *to, const void __user *from, unsigned long n)
8443 {
8444+ if ((long)n < 0)
8445+ return n;
8446+
8447 if (__builtin_constant_p(n) && (n <= 256))
8448 return uaccess.copy_from_user_small(n, from, to);
8449 else
8450@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8451 static inline unsigned long __must_check
8452 copy_from_user(void *to, const void __user *from, unsigned long n)
8453 {
8454- unsigned int sz = __compiletime_object_size(to);
8455+ size_t sz = __compiletime_object_size(to);
8456
8457 might_fault();
8458- if (unlikely(sz != -1 && sz < n)) {
8459+
8460+ if ((long)n < 0)
8461+ return n;
8462+
8463+ if (unlikely(sz != (size_t)-1 && sz < n)) {
8464 copy_from_user_overflow();
8465 return n;
8466 }
8467diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8468index 7845e15..59c4353 100644
8469--- a/arch/s390/kernel/module.c
8470+++ b/arch/s390/kernel/module.c
8471@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8472
8473 /* Increase core size by size of got & plt and set start
8474 offsets for got and plt. */
8475- me->core_size = ALIGN(me->core_size, 4);
8476- me->arch.got_offset = me->core_size;
8477- me->core_size += me->arch.got_size;
8478- me->arch.plt_offset = me->core_size;
8479- me->core_size += me->arch.plt_size;
8480+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
8481+ me->arch.got_offset = me->core_size_rw;
8482+ me->core_size_rw += me->arch.got_size;
8483+ me->arch.plt_offset = me->core_size_rx;
8484+ me->core_size_rx += me->arch.plt_size;
8485 return 0;
8486 }
8487
8488@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8489 if (info->got_initialized == 0) {
8490 Elf_Addr *gotent;
8491
8492- gotent = me->module_core + me->arch.got_offset +
8493+ gotent = me->module_core_rw + me->arch.got_offset +
8494 info->got_offset;
8495 *gotent = val;
8496 info->got_initialized = 1;
8497@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8498 rc = apply_rela_bits(loc, val, 0, 64, 0);
8499 else if (r_type == R_390_GOTENT ||
8500 r_type == R_390_GOTPLTENT) {
8501- val += (Elf_Addr) me->module_core - loc;
8502+ val += (Elf_Addr) me->module_core_rw - loc;
8503 rc = apply_rela_bits(loc, val, 1, 32, 1);
8504 }
8505 break;
8506@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8507 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8508 if (info->plt_initialized == 0) {
8509 unsigned int *ip;
8510- ip = me->module_core + me->arch.plt_offset +
8511+ ip = me->module_core_rx + me->arch.plt_offset +
8512 info->plt_offset;
8513 #ifndef CONFIG_64BIT
8514 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8515@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8516 val - loc + 0xffffUL < 0x1ffffeUL) ||
8517 (r_type == R_390_PLT32DBL &&
8518 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8519- val = (Elf_Addr) me->module_core +
8520+ val = (Elf_Addr) me->module_core_rx +
8521 me->arch.plt_offset +
8522 info->plt_offset;
8523 val += rela->r_addend - loc;
8524@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8525 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8526 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8527 val = val + rela->r_addend -
8528- ((Elf_Addr) me->module_core + me->arch.got_offset);
8529+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8530 if (r_type == R_390_GOTOFF16)
8531 rc = apply_rela_bits(loc, val, 0, 16, 0);
8532 else if (r_type == R_390_GOTOFF32)
8533@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8534 break;
8535 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8536 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8537- val = (Elf_Addr) me->module_core + me->arch.got_offset +
8538+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8539 rela->r_addend - loc;
8540 if (r_type == R_390_GOTPC)
8541 rc = apply_rela_bits(loc, val, 1, 32, 0);
8542diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8543index 2bc3edd..ab9d598 100644
8544--- a/arch/s390/kernel/process.c
8545+++ b/arch/s390/kernel/process.c
8546@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
8547 }
8548 return 0;
8549 }
8550-
8551-unsigned long arch_align_stack(unsigned long sp)
8552-{
8553- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8554- sp -= get_random_int() & ~PAGE_MASK;
8555- return sp & ~0xf;
8556-}
8557-
8558-static inline unsigned long brk_rnd(void)
8559-{
8560- /* 8MB for 32bit, 1GB for 64bit */
8561- if (is_32bit_task())
8562- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8563- else
8564- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8565-}
8566-
8567-unsigned long arch_randomize_brk(struct mm_struct *mm)
8568-{
8569- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8570-
8571- if (ret < mm->brk)
8572- return mm->brk;
8573- return ret;
8574-}
8575-
8576-unsigned long randomize_et_dyn(unsigned long base)
8577-{
8578- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8579-
8580- if (!(current->flags & PF_RANDOMIZE))
8581- return base;
8582- if (ret < base)
8583- return base;
8584- return ret;
8585-}
8586diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8587index 06bafec..2bca531 100644
8588--- a/arch/s390/mm/mmap.c
8589+++ b/arch/s390/mm/mmap.c
8590@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8591 */
8592 if (mmap_is_legacy()) {
8593 mm->mmap_base = TASK_UNMAPPED_BASE;
8594+
8595+#ifdef CONFIG_PAX_RANDMMAP
8596+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8597+ mm->mmap_base += mm->delta_mmap;
8598+#endif
8599+
8600 mm->get_unmapped_area = arch_get_unmapped_area;
8601 mm->unmap_area = arch_unmap_area;
8602 } else {
8603 mm->mmap_base = mmap_base();
8604+
8605+#ifdef CONFIG_PAX_RANDMMAP
8606+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8607+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8608+#endif
8609+
8610 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8611 mm->unmap_area = arch_unmap_area_topdown;
8612 }
8613@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8614 */
8615 if (mmap_is_legacy()) {
8616 mm->mmap_base = TASK_UNMAPPED_BASE;
8617+
8618+#ifdef CONFIG_PAX_RANDMMAP
8619+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8620+ mm->mmap_base += mm->delta_mmap;
8621+#endif
8622+
8623 mm->get_unmapped_area = s390_get_unmapped_area;
8624 mm->unmap_area = arch_unmap_area;
8625 } else {
8626 mm->mmap_base = mmap_base();
8627+
8628+#ifdef CONFIG_PAX_RANDMMAP
8629+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8630+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8631+#endif
8632+
8633 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8634 mm->unmap_area = arch_unmap_area_topdown;
8635 }
8636diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8637index ae3d59f..f65f075 100644
8638--- a/arch/score/include/asm/cache.h
8639+++ b/arch/score/include/asm/cache.h
8640@@ -1,7 +1,9 @@
8641 #ifndef _ASM_SCORE_CACHE_H
8642 #define _ASM_SCORE_CACHE_H
8643
8644+#include <linux/const.h>
8645+
8646 #define L1_CACHE_SHIFT 4
8647-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8648+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8649
8650 #endif /* _ASM_SCORE_CACHE_H */
8651diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8652index f9f3cd5..58ff438 100644
8653--- a/arch/score/include/asm/exec.h
8654+++ b/arch/score/include/asm/exec.h
8655@@ -1,6 +1,6 @@
8656 #ifndef _ASM_SCORE_EXEC_H
8657 #define _ASM_SCORE_EXEC_H
8658
8659-extern unsigned long arch_align_stack(unsigned long sp);
8660+#define arch_align_stack(x) (x)
8661
8662 #endif /* _ASM_SCORE_EXEC_H */
8663diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8664index f4c6d02..e9355c3 100644
8665--- a/arch/score/kernel/process.c
8666+++ b/arch/score/kernel/process.c
8667@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8668
8669 return task_pt_regs(task)->cp0_epc;
8670 }
8671-
8672-unsigned long arch_align_stack(unsigned long sp)
8673-{
8674- return sp;
8675-}
8676diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8677index ef9e555..331bd29 100644
8678--- a/arch/sh/include/asm/cache.h
8679+++ b/arch/sh/include/asm/cache.h
8680@@ -9,10 +9,11 @@
8681 #define __ASM_SH_CACHE_H
8682 #ifdef __KERNEL__
8683
8684+#include <linux/const.h>
8685 #include <linux/init.h>
8686 #include <cpu/cache.h>
8687
8688-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8689+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8690
8691 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8692
8693diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8694index 03f2b55..b0270327 100644
8695--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8696+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8697@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
8698 return NOTIFY_OK;
8699 }
8700
8701-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
8702+static struct notifier_block shx3_cpu_notifier = {
8703 .notifier_call = shx3_cpu_callback,
8704 };
8705
8706diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8707index 6777177..cb5e44f 100644
8708--- a/arch/sh/mm/mmap.c
8709+++ b/arch/sh/mm/mmap.c
8710@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8711 struct mm_struct *mm = current->mm;
8712 struct vm_area_struct *vma;
8713 int do_colour_align;
8714+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8715 struct vm_unmapped_area_info info;
8716
8717 if (flags & MAP_FIXED) {
8718@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8719 if (filp || (flags & MAP_SHARED))
8720 do_colour_align = 1;
8721
8722+#ifdef CONFIG_PAX_RANDMMAP
8723+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8724+#endif
8725+
8726 if (addr) {
8727 if (do_colour_align)
8728 addr = COLOUR_ALIGN(addr, pgoff);
8729@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8730 addr = PAGE_ALIGN(addr);
8731
8732 vma = find_vma(mm, addr);
8733- if (TASK_SIZE - len >= addr &&
8734- (!vma || addr + len <= vma->vm_start))
8735+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8736 return addr;
8737 }
8738
8739 info.flags = 0;
8740 info.length = len;
8741- info.low_limit = TASK_UNMAPPED_BASE;
8742+ info.low_limit = mm->mmap_base;
8743 info.high_limit = TASK_SIZE;
8744 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8745 info.align_offset = pgoff << PAGE_SHIFT;
8746@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8747 struct mm_struct *mm = current->mm;
8748 unsigned long addr = addr0;
8749 int do_colour_align;
8750+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8751 struct vm_unmapped_area_info info;
8752
8753 if (flags & MAP_FIXED) {
8754@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8755 if (filp || (flags & MAP_SHARED))
8756 do_colour_align = 1;
8757
8758+#ifdef CONFIG_PAX_RANDMMAP
8759+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8760+#endif
8761+
8762 /* requesting a specific address */
8763 if (addr) {
8764 if (do_colour_align)
8765@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8766 addr = PAGE_ALIGN(addr);
8767
8768 vma = find_vma(mm, addr);
8769- if (TASK_SIZE - len >= addr &&
8770- (!vma || addr + len <= vma->vm_start))
8771+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8772 return addr;
8773 }
8774
8775@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8776 VM_BUG_ON(addr != -ENOMEM);
8777 info.flags = 0;
8778 info.low_limit = TASK_UNMAPPED_BASE;
8779+
8780+#ifdef CONFIG_PAX_RANDMMAP
8781+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8782+ info.low_limit += mm->delta_mmap;
8783+#endif
8784+
8785 info.high_limit = TASK_SIZE;
8786 addr = vm_unmapped_area(&info);
8787 }
8788diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8789index be56a24..443328f 100644
8790--- a/arch/sparc/include/asm/atomic_64.h
8791+++ b/arch/sparc/include/asm/atomic_64.h
8792@@ -14,18 +14,40 @@
8793 #define ATOMIC64_INIT(i) { (i) }
8794
8795 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8796+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8797+{
8798+ return v->counter;
8799+}
8800 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8801+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8802+{
8803+ return v->counter;
8804+}
8805
8806 #define atomic_set(v, i) (((v)->counter) = i)
8807+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8808+{
8809+ v->counter = i;
8810+}
8811 #define atomic64_set(v, i) (((v)->counter) = i)
8812+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8813+{
8814+ v->counter = i;
8815+}
8816
8817 extern void atomic_add(int, atomic_t *);
8818+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8819 extern void atomic64_add(long, atomic64_t *);
8820+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8821 extern void atomic_sub(int, atomic_t *);
8822+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8823 extern void atomic64_sub(long, atomic64_t *);
8824+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8825
8826 extern int atomic_add_ret(int, atomic_t *);
8827+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8828 extern long atomic64_add_ret(long, atomic64_t *);
8829+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8830 extern int atomic_sub_ret(int, atomic_t *);
8831 extern long atomic64_sub_ret(long, atomic64_t *);
8832
8833@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8834 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8835
8836 #define atomic_inc_return(v) atomic_add_ret(1, v)
8837+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8838+{
8839+ return atomic_add_ret_unchecked(1, v);
8840+}
8841 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8842+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8843+{
8844+ return atomic64_add_ret_unchecked(1, v);
8845+}
8846
8847 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8848 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8849
8850 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8851+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8852+{
8853+ return atomic_add_ret_unchecked(i, v);
8854+}
8855 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8856+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8857+{
8858+ return atomic64_add_ret_unchecked(i, v);
8859+}
8860
8861 /*
8862 * atomic_inc_and_test - increment and test
8863@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8864 * other cases.
8865 */
8866 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8867+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8868+{
8869+ return atomic_inc_return_unchecked(v) == 0;
8870+}
8871 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8872
8873 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8874@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8875 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8876
8877 #define atomic_inc(v) atomic_add(1, v)
8878+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8879+{
8880+ atomic_add_unchecked(1, v);
8881+}
8882 #define atomic64_inc(v) atomic64_add(1, v)
8883+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8884+{
8885+ atomic64_add_unchecked(1, v);
8886+}
8887
8888 #define atomic_dec(v) atomic_sub(1, v)
8889+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8890+{
8891+ atomic_sub_unchecked(1, v);
8892+}
8893 #define atomic64_dec(v) atomic64_sub(1, v)
8894+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8895+{
8896+ atomic64_sub_unchecked(1, v);
8897+}
8898
8899 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8900 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8901
8902 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8903+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8904+{
8905+ return cmpxchg(&v->counter, old, new);
8906+}
8907 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8908+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8909+{
8910+ return xchg(&v->counter, new);
8911+}
8912
8913 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8914 {
8915- int c, old;
8916+ int c, old, new;
8917 c = atomic_read(v);
8918 for (;;) {
8919- if (unlikely(c == (u)))
8920+ if (unlikely(c == u))
8921 break;
8922- old = atomic_cmpxchg((v), c, c + (a));
8923+
8924+ asm volatile("addcc %2, %0, %0\n"
8925+
8926+#ifdef CONFIG_PAX_REFCOUNT
8927+ "tvs %%icc, 6\n"
8928+#endif
8929+
8930+ : "=r" (new)
8931+ : "0" (c), "ir" (a)
8932+ : "cc");
8933+
8934+ old = atomic_cmpxchg(v, c, new);
8935 if (likely(old == c))
8936 break;
8937 c = old;
8938@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8939 #define atomic64_cmpxchg(v, o, n) \
8940 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8941 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8942+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8943+{
8944+ return xchg(&v->counter, new);
8945+}
8946
8947 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8948 {
8949- long c, old;
8950+ long c, old, new;
8951 c = atomic64_read(v);
8952 for (;;) {
8953- if (unlikely(c == (u)))
8954+ if (unlikely(c == u))
8955 break;
8956- old = atomic64_cmpxchg((v), c, c + (a));
8957+
8958+ asm volatile("addcc %2, %0, %0\n"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+ "tvs %%xcc, 6\n"
8962+#endif
8963+
8964+ : "=r" (new)
8965+ : "0" (c), "ir" (a)
8966+ : "cc");
8967+
8968+ old = atomic64_cmpxchg(v, c, new);
8969 if (likely(old == c))
8970 break;
8971 c = old;
8972 }
8973- return c != (u);
8974+ return c != u;
8975 }
8976
8977 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8978diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8979index 5bb6991..5c2132e 100644
8980--- a/arch/sparc/include/asm/cache.h
8981+++ b/arch/sparc/include/asm/cache.h
8982@@ -7,10 +7,12 @@
8983 #ifndef _SPARC_CACHE_H
8984 #define _SPARC_CACHE_H
8985
8986+#include <linux/const.h>
8987+
8988 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8989
8990 #define L1_CACHE_SHIFT 5
8991-#define L1_CACHE_BYTES 32
8992+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8993
8994 #ifdef CONFIG_SPARC32
8995 #define SMP_CACHE_BYTES_SHIFT 5
8996diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8997index a24e41f..47677ff 100644
8998--- a/arch/sparc/include/asm/elf_32.h
8999+++ b/arch/sparc/include/asm/elf_32.h
9000@@ -114,6 +114,13 @@ typedef struct {
9001
9002 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9003
9004+#ifdef CONFIG_PAX_ASLR
9005+#define PAX_ELF_ET_DYN_BASE 0x10000UL
9006+
9007+#define PAX_DELTA_MMAP_LEN 16
9008+#define PAX_DELTA_STACK_LEN 16
9009+#endif
9010+
9011 /* This yields a mask that user programs can use to figure out what
9012 instruction set this cpu supports. This can NOT be done in userspace
9013 on Sparc. */
9014diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9015index 370ca1e..d4f4a98 100644
9016--- a/arch/sparc/include/asm/elf_64.h
9017+++ b/arch/sparc/include/asm/elf_64.h
9018@@ -189,6 +189,13 @@ typedef struct {
9019 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9020 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9021
9022+#ifdef CONFIG_PAX_ASLR
9023+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9024+
9025+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9026+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9027+#endif
9028+
9029 extern unsigned long sparc64_elf_hwcap;
9030 #define ELF_HWCAP sparc64_elf_hwcap
9031
9032diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9033index 9b1c36d..209298b 100644
9034--- a/arch/sparc/include/asm/pgalloc_32.h
9035+++ b/arch/sparc/include/asm/pgalloc_32.h
9036@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9037 }
9038
9039 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9040+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9041
9042 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9043 unsigned long address)
9044diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9045index bcfe063..b333142 100644
9046--- a/arch/sparc/include/asm/pgalloc_64.h
9047+++ b/arch/sparc/include/asm/pgalloc_64.h
9048@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9049 }
9050
9051 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9052+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9053
9054 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9055 {
9056diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9057index 6fc1348..390c50a 100644
9058--- a/arch/sparc/include/asm/pgtable_32.h
9059+++ b/arch/sparc/include/asm/pgtable_32.h
9060@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9061 #define PAGE_SHARED SRMMU_PAGE_SHARED
9062 #define PAGE_COPY SRMMU_PAGE_COPY
9063 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9064+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9065+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9066+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9067 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9068
9069 /* Top-level page directory - dummy used by init-mm.
9070@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9071
9072 /* xwr */
9073 #define __P000 PAGE_NONE
9074-#define __P001 PAGE_READONLY
9075-#define __P010 PAGE_COPY
9076-#define __P011 PAGE_COPY
9077+#define __P001 PAGE_READONLY_NOEXEC
9078+#define __P010 PAGE_COPY_NOEXEC
9079+#define __P011 PAGE_COPY_NOEXEC
9080 #define __P100 PAGE_READONLY
9081 #define __P101 PAGE_READONLY
9082 #define __P110 PAGE_COPY
9083 #define __P111 PAGE_COPY
9084
9085 #define __S000 PAGE_NONE
9086-#define __S001 PAGE_READONLY
9087-#define __S010 PAGE_SHARED
9088-#define __S011 PAGE_SHARED
9089+#define __S001 PAGE_READONLY_NOEXEC
9090+#define __S010 PAGE_SHARED_NOEXEC
9091+#define __S011 PAGE_SHARED_NOEXEC
9092 #define __S100 PAGE_READONLY
9093 #define __S101 PAGE_READONLY
9094 #define __S110 PAGE_SHARED
9095diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9096index 79da178..c2eede8 100644
9097--- a/arch/sparc/include/asm/pgtsrmmu.h
9098+++ b/arch/sparc/include/asm/pgtsrmmu.h
9099@@ -115,6 +115,11 @@
9100 SRMMU_EXEC | SRMMU_REF)
9101 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9102 SRMMU_EXEC | SRMMU_REF)
9103+
9104+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9105+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9106+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9107+
9108 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9109 SRMMU_DIRTY | SRMMU_REF)
9110
9111diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9112index 9689176..63c18ea 100644
9113--- a/arch/sparc/include/asm/spinlock_64.h
9114+++ b/arch/sparc/include/asm/spinlock_64.h
9115@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9116
9117 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9118
9119-static void inline arch_read_lock(arch_rwlock_t *lock)
9120+static inline void arch_read_lock(arch_rwlock_t *lock)
9121 {
9122 unsigned long tmp1, tmp2;
9123
9124 __asm__ __volatile__ (
9125 "1: ldsw [%2], %0\n"
9126 " brlz,pn %0, 2f\n"
9127-"4: add %0, 1, %1\n"
9128+"4: addcc %0, 1, %1\n"
9129+
9130+#ifdef CONFIG_PAX_REFCOUNT
9131+" tvs %%icc, 6\n"
9132+#endif
9133+
9134 " cas [%2], %0, %1\n"
9135 " cmp %0, %1\n"
9136 " bne,pn %%icc, 1b\n"
9137@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9138 " .previous"
9139 : "=&r" (tmp1), "=&r" (tmp2)
9140 : "r" (lock)
9141- : "memory");
9142+ : "memory", "cc");
9143 }
9144
9145-static int inline arch_read_trylock(arch_rwlock_t *lock)
9146+static inline int arch_read_trylock(arch_rwlock_t *lock)
9147 {
9148 int tmp1, tmp2;
9149
9150@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9151 "1: ldsw [%2], %0\n"
9152 " brlz,a,pn %0, 2f\n"
9153 " mov 0, %0\n"
9154-" add %0, 1, %1\n"
9155+" addcc %0, 1, %1\n"
9156+
9157+#ifdef CONFIG_PAX_REFCOUNT
9158+" tvs %%icc, 6\n"
9159+#endif
9160+
9161 " cas [%2], %0, %1\n"
9162 " cmp %0, %1\n"
9163 " bne,pn %%icc, 1b\n"
9164@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9165 return tmp1;
9166 }
9167
9168-static void inline arch_read_unlock(arch_rwlock_t *lock)
9169+static inline void arch_read_unlock(arch_rwlock_t *lock)
9170 {
9171 unsigned long tmp1, tmp2;
9172
9173 __asm__ __volatile__(
9174 "1: lduw [%2], %0\n"
9175-" sub %0, 1, %1\n"
9176+" subcc %0, 1, %1\n"
9177+
9178+#ifdef CONFIG_PAX_REFCOUNT
9179+" tvs %%icc, 6\n"
9180+#endif
9181+
9182 " cas [%2], %0, %1\n"
9183 " cmp %0, %1\n"
9184 " bne,pn %%xcc, 1b\n"
9185@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9186 : "memory");
9187 }
9188
9189-static void inline arch_write_lock(arch_rwlock_t *lock)
9190+static inline void arch_write_lock(arch_rwlock_t *lock)
9191 {
9192 unsigned long mask, tmp1, tmp2;
9193
9194@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9195 : "memory");
9196 }
9197
9198-static void inline arch_write_unlock(arch_rwlock_t *lock)
9199+static inline void arch_write_unlock(arch_rwlock_t *lock)
9200 {
9201 __asm__ __volatile__(
9202 " stw %%g0, [%0]"
9203@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9204 : "memory");
9205 }
9206
9207-static int inline arch_write_trylock(arch_rwlock_t *lock)
9208+static inline int arch_write_trylock(arch_rwlock_t *lock)
9209 {
9210 unsigned long mask, tmp1, tmp2, result;
9211
9212diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9213index dd38075..e7cac83 100644
9214--- a/arch/sparc/include/asm/thread_info_32.h
9215+++ b/arch/sparc/include/asm/thread_info_32.h
9216@@ -49,6 +49,8 @@ struct thread_info {
9217 unsigned long w_saved;
9218
9219 struct restart_block restart_block;
9220+
9221+ unsigned long lowest_stack;
9222 };
9223
9224 /*
9225diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9226index d5e5042..9bfee76 100644
9227--- a/arch/sparc/include/asm/thread_info_64.h
9228+++ b/arch/sparc/include/asm/thread_info_64.h
9229@@ -63,6 +63,8 @@ struct thread_info {
9230 struct pt_regs *kern_una_regs;
9231 unsigned int kern_una_insn;
9232
9233+ unsigned long lowest_stack;
9234+
9235 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9236 };
9237
9238@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
9239 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9240 /* flag bit 6 is available */
9241 #define TIF_32BIT 7 /* 32-bit binary */
9242-/* flag bit 8 is available */
9243+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
9244 #define TIF_SECCOMP 9 /* secure computing */
9245 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9246 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9247+
9248 /* NOTE: Thread flags >= 12 should be ones we have no interest
9249 * in using in assembly, else we can't use the mask as
9250 * an immediate value in instructions such as andcc.
9251@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9252 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9253 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9254 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9255+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9256
9257 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9258 _TIF_DO_NOTIFY_RESUME_MASK | \
9259 _TIF_NEED_RESCHED)
9260 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9261
9262+#define _TIF_WORK_SYSCALL \
9263+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9264+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
9265+
9266+
9267 /*
9268 * Thread-synchronous status.
9269 *
9270diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9271index 0167d26..767bb0c 100644
9272--- a/arch/sparc/include/asm/uaccess.h
9273+++ b/arch/sparc/include/asm/uaccess.h
9274@@ -1,5 +1,6 @@
9275 #ifndef ___ASM_SPARC_UACCESS_H
9276 #define ___ASM_SPARC_UACCESS_H
9277+
9278 #if defined(__sparc__) && defined(__arch64__)
9279 #include <asm/uaccess_64.h>
9280 #else
9281diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9282index 53a28dd..50c38c3 100644
9283--- a/arch/sparc/include/asm/uaccess_32.h
9284+++ b/arch/sparc/include/asm/uaccess_32.h
9285@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9286
9287 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9288 {
9289- if (n && __access_ok((unsigned long) to, n))
9290+ if ((long)n < 0)
9291+ return n;
9292+
9293+ if (n && __access_ok((unsigned long) to, n)) {
9294+ if (!__builtin_constant_p(n))
9295+ check_object_size(from, n, true);
9296 return __copy_user(to, (__force void __user *) from, n);
9297- else
9298+ } else
9299 return n;
9300 }
9301
9302 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9303 {
9304+ if ((long)n < 0)
9305+ return n;
9306+
9307+ if (!__builtin_constant_p(n))
9308+ check_object_size(from, n, true);
9309+
9310 return __copy_user(to, (__force void __user *) from, n);
9311 }
9312
9313 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9314 {
9315- if (n && __access_ok((unsigned long) from, n))
9316+ if ((long)n < 0)
9317+ return n;
9318+
9319+ if (n && __access_ok((unsigned long) from, n)) {
9320+ if (!__builtin_constant_p(n))
9321+ check_object_size(to, n, false);
9322 return __copy_user((__force void __user *) to, from, n);
9323- else
9324+ } else
9325 return n;
9326 }
9327
9328 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9329 {
9330+ if ((long)n < 0)
9331+ return n;
9332+
9333 return __copy_user((__force void __user *) to, from, n);
9334 }
9335
9336diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9337index e562d3c..191f176 100644
9338--- a/arch/sparc/include/asm/uaccess_64.h
9339+++ b/arch/sparc/include/asm/uaccess_64.h
9340@@ -10,6 +10,7 @@
9341 #include <linux/compiler.h>
9342 #include <linux/string.h>
9343 #include <linux/thread_info.h>
9344+#include <linux/kernel.h>
9345 #include <asm/asi.h>
9346 #include <asm/spitfire.h>
9347 #include <asm-generic/uaccess-unaligned.h>
9348@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9349 static inline unsigned long __must_check
9350 copy_from_user(void *to, const void __user *from, unsigned long size)
9351 {
9352- unsigned long ret = ___copy_from_user(to, from, size);
9353+ unsigned long ret;
9354
9355+ if ((long)size < 0 || size > INT_MAX)
9356+ return size;
9357+
9358+ if (!__builtin_constant_p(size))
9359+ check_object_size(to, size, false);
9360+
9361+ ret = ___copy_from_user(to, from, size);
9362 if (unlikely(ret))
9363 ret = copy_from_user_fixup(to, from, size);
9364
9365@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9366 static inline unsigned long __must_check
9367 copy_to_user(void __user *to, const void *from, unsigned long size)
9368 {
9369- unsigned long ret = ___copy_to_user(to, from, size);
9370+ unsigned long ret;
9371
9372+ if ((long)size < 0 || size > INT_MAX)
9373+ return size;
9374+
9375+ if (!__builtin_constant_p(size))
9376+ check_object_size(from, size, true);
9377+
9378+ ret = ___copy_to_user(to, from, size);
9379 if (unlikely(ret))
9380 ret = copy_to_user_fixup(to, from, size);
9381 return ret;
9382diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9383index d432fb2..6056af1 100644
9384--- a/arch/sparc/kernel/Makefile
9385+++ b/arch/sparc/kernel/Makefile
9386@@ -3,7 +3,7 @@
9387 #
9388
9389 asflags-y := -ansi
9390-ccflags-y := -Werror
9391+#ccflags-y := -Werror
9392
9393 extra-y := head_$(BITS).o
9394
9395diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
9396index 5ef48da..11d460f 100644
9397--- a/arch/sparc/kernel/ds.c
9398+++ b/arch/sparc/kernel/ds.c
9399@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
9400 char *base, *p;
9401 int msg_len, loops;
9402
9403+ if (strlen(var) + strlen(value) + 2 >
9404+ sizeof(pkt) - sizeof(pkt.header)) {
9405+ printk(KERN_ERR PFX
9406+ "contents length: %zu, which more than max: %lu,"
9407+ "so could not set (%s) variable to (%s).\n",
9408+ strlen(var) + strlen(value) + 2,
9409+ sizeof(pkt) - sizeof(pkt.header), var, value);
9410+ return;
9411+ }
9412+
9413 memset(&pkt, 0, sizeof(pkt));
9414 pkt.header.data.tag.type = DS_DATA;
9415 pkt.header.data.handle = cp->handle;
9416diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9417index fdd819d..5af08c8 100644
9418--- a/arch/sparc/kernel/process_32.c
9419+++ b/arch/sparc/kernel/process_32.c
9420@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9421
9422 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9423 r->psr, r->pc, r->npc, r->y, print_tainted());
9424- printk("PC: <%pS>\n", (void *) r->pc);
9425+ printk("PC: <%pA>\n", (void *) r->pc);
9426 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9427 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9428 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9429 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9430 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9431 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9432- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9433+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9434
9435 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9436 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9437@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9438 rw = (struct reg_window32 *) fp;
9439 pc = rw->ins[7];
9440 printk("[%08lx : ", pc);
9441- printk("%pS ] ", (void *) pc);
9442+ printk("%pA ] ", (void *) pc);
9443 fp = rw->ins[6];
9444 } while (++count < 16);
9445 printk("\n");
9446diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9447index baebab2..9cd13b1 100644
9448--- a/arch/sparc/kernel/process_64.c
9449+++ b/arch/sparc/kernel/process_64.c
9450@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
9451 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9452 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9453 if (regs->tstate & TSTATE_PRIV)
9454- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9455+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9456 }
9457
9458 void show_regs(struct pt_regs *regs)
9459@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
9460
9461 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9462 regs->tpc, regs->tnpc, regs->y, print_tainted());
9463- printk("TPC: <%pS>\n", (void *) regs->tpc);
9464+ printk("TPC: <%pA>\n", (void *) regs->tpc);
9465 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9466 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9467 regs->u_regs[3]);
9468@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
9469 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9470 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9471 regs->u_regs[15]);
9472- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9473+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9474 show_regwindow(regs);
9475 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9476 }
9477@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
9478 ((tp && tp->task) ? tp->task->pid : -1));
9479
9480 if (gp->tstate & TSTATE_PRIV) {
9481- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9482+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9483 (void *) gp->tpc,
9484 (void *) gp->o7,
9485 (void *) gp->i7,
9486diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9487index 79cc0d1..ec62734 100644
9488--- a/arch/sparc/kernel/prom_common.c
9489+++ b/arch/sparc/kernel/prom_common.c
9490@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9491
9492 unsigned int prom_early_allocated __initdata;
9493
9494-static struct of_pdt_ops prom_sparc_ops __initdata = {
9495+static struct of_pdt_ops prom_sparc_ops __initconst = {
9496 .nextprop = prom_common_nextprop,
9497 .getproplen = prom_getproplen,
9498 .getproperty = prom_getproperty,
9499diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9500index 7ff45e4..a58f271 100644
9501--- a/arch/sparc/kernel/ptrace_64.c
9502+++ b/arch/sparc/kernel/ptrace_64.c
9503@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
9504 return ret;
9505 }
9506
9507+#ifdef CONFIG_GRKERNSEC_SETXID
9508+extern void gr_delayed_cred_worker(void);
9509+#endif
9510+
9511 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9512 {
9513 int ret = 0;
9514@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9515 /* do the secure computing check first */
9516 secure_computing_strict(regs->u_regs[UREG_G1]);
9517
9518+#ifdef CONFIG_GRKERNSEC_SETXID
9519+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9520+ gr_delayed_cred_worker();
9521+#endif
9522+
9523 if (test_thread_flag(TIF_SYSCALL_TRACE))
9524 ret = tracehook_report_syscall_entry(regs);
9525
9526@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9527
9528 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9529 {
9530+#ifdef CONFIG_GRKERNSEC_SETXID
9531+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9532+ gr_delayed_cred_worker();
9533+#endif
9534+
9535 audit_syscall_exit(regs);
9536
9537 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9538diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9539index 3a8d184..49498a8 100644
9540--- a/arch/sparc/kernel/sys_sparc_32.c
9541+++ b/arch/sparc/kernel/sys_sparc_32.c
9542@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9543 if (len > TASK_SIZE - PAGE_SIZE)
9544 return -ENOMEM;
9545 if (!addr)
9546- addr = TASK_UNMAPPED_BASE;
9547+ addr = current->mm->mmap_base;
9548
9549 info.flags = 0;
9550 info.length = len;
9551diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9552index 2daaaa6..4fb84dc 100644
9553--- a/arch/sparc/kernel/sys_sparc_64.c
9554+++ b/arch/sparc/kernel/sys_sparc_64.c
9555@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9556 struct vm_area_struct * vma;
9557 unsigned long task_size = TASK_SIZE;
9558 int do_color_align;
9559+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9560 struct vm_unmapped_area_info info;
9561
9562 if (flags & MAP_FIXED) {
9563 /* We do not accept a shared mapping if it would violate
9564 * cache aliasing constraints.
9565 */
9566- if ((flags & MAP_SHARED) &&
9567+ if ((filp || (flags & MAP_SHARED)) &&
9568 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9569 return -EINVAL;
9570 return addr;
9571@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9572 if (filp || (flags & MAP_SHARED))
9573 do_color_align = 1;
9574
9575+#ifdef CONFIG_PAX_RANDMMAP
9576+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9577+#endif
9578+
9579 if (addr) {
9580 if (do_color_align)
9581 addr = COLOR_ALIGN(addr, pgoff);
9582@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9583 addr = PAGE_ALIGN(addr);
9584
9585 vma = find_vma(mm, addr);
9586- if (task_size - len >= addr &&
9587- (!vma || addr + len <= vma->vm_start))
9588+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9589 return addr;
9590 }
9591
9592 info.flags = 0;
9593 info.length = len;
9594- info.low_limit = TASK_UNMAPPED_BASE;
9595+ info.low_limit = mm->mmap_base;
9596 info.high_limit = min(task_size, VA_EXCLUDE_START);
9597 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9598 info.align_offset = pgoff << PAGE_SHIFT;
9599+ info.threadstack_offset = offset;
9600 addr = vm_unmapped_area(&info);
9601
9602 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9603 VM_BUG_ON(addr != -ENOMEM);
9604 info.low_limit = VA_EXCLUDE_END;
9605+
9606+#ifdef CONFIG_PAX_RANDMMAP
9607+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9608+ info.low_limit += mm->delta_mmap;
9609+#endif
9610+
9611 info.high_limit = task_size;
9612 addr = vm_unmapped_area(&info);
9613 }
9614@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9615 unsigned long task_size = STACK_TOP32;
9616 unsigned long addr = addr0;
9617 int do_color_align;
9618+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9619 struct vm_unmapped_area_info info;
9620
9621 /* This should only ever run for 32-bit processes. */
9622@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9623 /* We do not accept a shared mapping if it would violate
9624 * cache aliasing constraints.
9625 */
9626- if ((flags & MAP_SHARED) &&
9627+ if ((filp || (flags & MAP_SHARED)) &&
9628 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9629 return -EINVAL;
9630 return addr;
9631@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9632 if (filp || (flags & MAP_SHARED))
9633 do_color_align = 1;
9634
9635+#ifdef CONFIG_PAX_RANDMMAP
9636+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9637+#endif
9638+
9639 /* requesting a specific address */
9640 if (addr) {
9641 if (do_color_align)
9642@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9643 addr = PAGE_ALIGN(addr);
9644
9645 vma = find_vma(mm, addr);
9646- if (task_size - len >= addr &&
9647- (!vma || addr + len <= vma->vm_start))
9648+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9649 return addr;
9650 }
9651
9652@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9653 info.high_limit = mm->mmap_base;
9654 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9655 info.align_offset = pgoff << PAGE_SHIFT;
9656+ info.threadstack_offset = offset;
9657 addr = vm_unmapped_area(&info);
9658
9659 /*
9660@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9661 VM_BUG_ON(addr != -ENOMEM);
9662 info.flags = 0;
9663 info.low_limit = TASK_UNMAPPED_BASE;
9664+
9665+#ifdef CONFIG_PAX_RANDMMAP
9666+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9667+ info.low_limit += mm->delta_mmap;
9668+#endif
9669+
9670 info.high_limit = STACK_TOP32;
9671 addr = vm_unmapped_area(&info);
9672 }
9673@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9674 EXPORT_SYMBOL(get_fb_unmapped_area);
9675
9676 /* Essentially the same as PowerPC. */
9677-static unsigned long mmap_rnd(void)
9678+static unsigned long mmap_rnd(struct mm_struct *mm)
9679 {
9680 unsigned long rnd = 0UL;
9681
9682+#ifdef CONFIG_PAX_RANDMMAP
9683+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9684+#endif
9685+
9686 if (current->flags & PF_RANDOMIZE) {
9687 unsigned long val = get_random_int();
9688 if (test_thread_flag(TIF_32BIT))
9689@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
9690
9691 void arch_pick_mmap_layout(struct mm_struct *mm)
9692 {
9693- unsigned long random_factor = mmap_rnd();
9694+ unsigned long random_factor = mmap_rnd(mm);
9695 unsigned long gap;
9696
9697 /*
9698@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9699 gap == RLIM_INFINITY ||
9700 sysctl_legacy_va_layout) {
9701 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9702+
9703+#ifdef CONFIG_PAX_RANDMMAP
9704+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9705+ mm->mmap_base += mm->delta_mmap;
9706+#endif
9707+
9708 mm->get_unmapped_area = arch_get_unmapped_area;
9709 mm->unmap_area = arch_unmap_area;
9710 } else {
9711@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9712 gap = (task_size / 6 * 5);
9713
9714 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9715+
9716+#ifdef CONFIG_PAX_RANDMMAP
9717+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9718+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9719+#endif
9720+
9721 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9722 mm->unmap_area = arch_unmap_area_topdown;
9723 }
9724diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9725index 22a1098..6255eb9 100644
9726--- a/arch/sparc/kernel/syscalls.S
9727+++ b/arch/sparc/kernel/syscalls.S
9728@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9729 #endif
9730 .align 32
9731 1: ldx [%g6 + TI_FLAGS], %l5
9732- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9733+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9734 be,pt %icc, rtrap
9735 nop
9736 call syscall_trace_leave
9737@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9738
9739 srl %i5, 0, %o5 ! IEU1
9740 srl %i2, 0, %o2 ! IEU0 Group
9741- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9742+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9743 bne,pn %icc, linux_syscall_trace32 ! CTI
9744 mov %i0, %l5 ! IEU1
9745 call %l7 ! CTI Group brk forced
9746@@ -207,7 +207,7 @@ linux_sparc_syscall:
9747
9748 mov %i3, %o3 ! IEU1
9749 mov %i4, %o4 ! IEU0 Group
9750- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9751+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9752 bne,pn %icc, linux_syscall_trace ! CTI Group
9753 mov %i0, %l5 ! IEU0
9754 2: call %l7 ! CTI Group brk forced
9755@@ -223,7 +223,7 @@ ret_sys_call:
9756
9757 cmp %o0, -ERESTART_RESTARTBLOCK
9758 bgeu,pn %xcc, 1f
9759- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9760+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9761 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9762
9763 2:
9764diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9765index 654e8aa..45f431b 100644
9766--- a/arch/sparc/kernel/sysfs.c
9767+++ b/arch/sparc/kernel/sysfs.c
9768@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9769 return NOTIFY_OK;
9770 }
9771
9772-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9773+static struct notifier_block sysfs_cpu_nb = {
9774 .notifier_call = sysfs_cpu_notify,
9775 };
9776
9777diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9778index 6629829..036032d 100644
9779--- a/arch/sparc/kernel/traps_32.c
9780+++ b/arch/sparc/kernel/traps_32.c
9781@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9782 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9783 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9784
9785+extern void gr_handle_kernel_exploit(void);
9786+
9787 void die_if_kernel(char *str, struct pt_regs *regs)
9788 {
9789 static int die_counter;
9790@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9791 count++ < 30 &&
9792 (((unsigned long) rw) >= PAGE_OFFSET) &&
9793 !(((unsigned long) rw) & 0x7)) {
9794- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9795+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9796 (void *) rw->ins[7]);
9797 rw = (struct reg_window32 *)rw->ins[6];
9798 }
9799 }
9800 printk("Instruction DUMP:");
9801 instruction_dump ((unsigned long *) regs->pc);
9802- if(regs->psr & PSR_PS)
9803+ if(regs->psr & PSR_PS) {
9804+ gr_handle_kernel_exploit();
9805 do_exit(SIGKILL);
9806+ }
9807 do_exit(SIGSEGV);
9808 }
9809
9810diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9811index b3f833a..ac74b2d 100644
9812--- a/arch/sparc/kernel/traps_64.c
9813+++ b/arch/sparc/kernel/traps_64.c
9814@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9815 i + 1,
9816 p->trapstack[i].tstate, p->trapstack[i].tpc,
9817 p->trapstack[i].tnpc, p->trapstack[i].tt);
9818- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9819+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9820 }
9821 }
9822
9823@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9824
9825 lvl -= 0x100;
9826 if (regs->tstate & TSTATE_PRIV) {
9827+
9828+#ifdef CONFIG_PAX_REFCOUNT
9829+ if (lvl == 6)
9830+ pax_report_refcount_overflow(regs);
9831+#endif
9832+
9833 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9834 die_if_kernel(buffer, regs);
9835 }
9836@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9837 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9838 {
9839 char buffer[32];
9840-
9841+
9842 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9843 0, lvl, SIGTRAP) == NOTIFY_STOP)
9844 return;
9845
9846+#ifdef CONFIG_PAX_REFCOUNT
9847+ if (lvl == 6)
9848+ pax_report_refcount_overflow(regs);
9849+#endif
9850+
9851 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
9852
9853 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
9854@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
9855 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
9856 printk("%s" "ERROR(%d): ",
9857 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
9858- printk("TPC<%pS>\n", (void *) regs->tpc);
9859+ printk("TPC<%pA>\n", (void *) regs->tpc);
9860 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
9861 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
9862 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
9863@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9864 smp_processor_id(),
9865 (type & 0x1) ? 'I' : 'D',
9866 regs->tpc);
9867- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
9868+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
9869 panic("Irrecoverable Cheetah+ parity error.");
9870 }
9871
9872@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9873 smp_processor_id(),
9874 (type & 0x1) ? 'I' : 'D',
9875 regs->tpc);
9876- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
9877+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
9878 }
9879
9880 struct sun4v_error_entry {
9881@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
9882
9883 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
9884 regs->tpc, tl);
9885- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
9886+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
9887 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9888- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
9889+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
9890 (void *) regs->u_regs[UREG_I7]);
9891 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
9892 "pte[%lx] error[%lx]\n",
9893@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
9894
9895 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
9896 regs->tpc, tl);
9897- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
9898+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
9899 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9900- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
9901+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
9902 (void *) regs->u_regs[UREG_I7]);
9903 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
9904 "pte[%lx] error[%lx]\n",
9905@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9906 fp = (unsigned long)sf->fp + STACK_BIAS;
9907 }
9908
9909- printk(" [%016lx] %pS\n", pc, (void *) pc);
9910+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9911 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9912 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
9913 int index = tsk->curr_ret_stack;
9914 if (tsk->ret_stack && index >= graph) {
9915 pc = tsk->ret_stack[index - graph].ret;
9916- printk(" [%016lx] %pS\n", pc, (void *) pc);
9917+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9918 graph++;
9919 }
9920 }
9921@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9922 return (struct reg_window *) (fp + STACK_BIAS);
9923 }
9924
9925+extern void gr_handle_kernel_exploit(void);
9926+
9927 void die_if_kernel(char *str, struct pt_regs *regs)
9928 {
9929 static int die_counter;
9930@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9931 while (rw &&
9932 count++ < 30 &&
9933 kstack_valid(tp, (unsigned long) rw)) {
9934- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9935+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9936 (void *) rw->ins[7]);
9937
9938 rw = kernel_stack_up(rw);
9939@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9940 }
9941 user_instruction_dump ((unsigned int __user *) regs->tpc);
9942 }
9943- if (regs->tstate & TSTATE_PRIV)
9944+ if (regs->tstate & TSTATE_PRIV) {
9945+ gr_handle_kernel_exploit();
9946 do_exit(SIGKILL);
9947+ }
9948 do_exit(SIGSEGV);
9949 }
9950 EXPORT_SYMBOL(die_if_kernel);
9951diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9952index 8201c25e..072a2a7 100644
9953--- a/arch/sparc/kernel/unaligned_64.c
9954+++ b/arch/sparc/kernel/unaligned_64.c
9955@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9956 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9957
9958 if (__ratelimit(&ratelimit)) {
9959- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9960+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9961 regs->tpc, (void *) regs->tpc);
9962 }
9963 }
9964diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9965index dbe119b..089c7c1 100644
9966--- a/arch/sparc/lib/Makefile
9967+++ b/arch/sparc/lib/Makefile
9968@@ -2,7 +2,7 @@
9969 #
9970
9971 asflags-y := -ansi -DST_DIV0=0x02
9972-ccflags-y := -Werror
9973+#ccflags-y := -Werror
9974
9975 lib-$(CONFIG_SPARC32) += ashrdi3.o
9976 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9977diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9978index 85c233d..68500e0 100644
9979--- a/arch/sparc/lib/atomic_64.S
9980+++ b/arch/sparc/lib/atomic_64.S
9981@@ -17,7 +17,12 @@
9982 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9983 BACKOFF_SETUP(%o2)
9984 1: lduw [%o1], %g1
9985- add %g1, %o0, %g7
9986+ addcc %g1, %o0, %g7
9987+
9988+#ifdef CONFIG_PAX_REFCOUNT
9989+ tvs %icc, 6
9990+#endif
9991+
9992 cas [%o1], %g1, %g7
9993 cmp %g1, %g7
9994 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9995@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9996 2: BACKOFF_SPIN(%o2, %o3, 1b)
9997 ENDPROC(atomic_add)
9998
9999+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10000+ BACKOFF_SETUP(%o2)
10001+1: lduw [%o1], %g1
10002+ add %g1, %o0, %g7
10003+ cas [%o1], %g1, %g7
10004+ cmp %g1, %g7
10005+ bne,pn %icc, 2f
10006+ nop
10007+ retl
10008+ nop
10009+2: BACKOFF_SPIN(%o2, %o3, 1b)
10010+ENDPROC(atomic_add_unchecked)
10011+
10012 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10013 BACKOFF_SETUP(%o2)
10014 1: lduw [%o1], %g1
10015- sub %g1, %o0, %g7
10016+ subcc %g1, %o0, %g7
10017+
10018+#ifdef CONFIG_PAX_REFCOUNT
10019+ tvs %icc, 6
10020+#endif
10021+
10022 cas [%o1], %g1, %g7
10023 cmp %g1, %g7
10024 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10025@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10026 2: BACKOFF_SPIN(%o2, %o3, 1b)
10027 ENDPROC(atomic_sub)
10028
10029+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10030+ BACKOFF_SETUP(%o2)
10031+1: lduw [%o1], %g1
10032+ sub %g1, %o0, %g7
10033+ cas [%o1], %g1, %g7
10034+ cmp %g1, %g7
10035+ bne,pn %icc, 2f
10036+ nop
10037+ retl
10038+ nop
10039+2: BACKOFF_SPIN(%o2, %o3, 1b)
10040+ENDPROC(atomic_sub_unchecked)
10041+
10042 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10043 BACKOFF_SETUP(%o2)
10044 1: lduw [%o1], %g1
10045- add %g1, %o0, %g7
10046+ addcc %g1, %o0, %g7
10047+
10048+#ifdef CONFIG_PAX_REFCOUNT
10049+ tvs %icc, 6
10050+#endif
10051+
10052 cas [%o1], %g1, %g7
10053 cmp %g1, %g7
10054 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10055@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10056 2: BACKOFF_SPIN(%o2, %o3, 1b)
10057 ENDPROC(atomic_add_ret)
10058
10059+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10060+ BACKOFF_SETUP(%o2)
10061+1: lduw [%o1], %g1
10062+ addcc %g1, %o0, %g7
10063+ cas [%o1], %g1, %g7
10064+ cmp %g1, %g7
10065+ bne,pn %icc, 2f
10066+ add %g7, %o0, %g7
10067+ sra %g7, 0, %o0
10068+ retl
10069+ nop
10070+2: BACKOFF_SPIN(%o2, %o3, 1b)
10071+ENDPROC(atomic_add_ret_unchecked)
10072+
10073 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10074 BACKOFF_SETUP(%o2)
10075 1: lduw [%o1], %g1
10076- sub %g1, %o0, %g7
10077+ subcc %g1, %o0, %g7
10078+
10079+#ifdef CONFIG_PAX_REFCOUNT
10080+ tvs %icc, 6
10081+#endif
10082+
10083 cas [%o1], %g1, %g7
10084 cmp %g1, %g7
10085 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10086@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10087 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10088 BACKOFF_SETUP(%o2)
10089 1: ldx [%o1], %g1
10090- add %g1, %o0, %g7
10091+ addcc %g1, %o0, %g7
10092+
10093+#ifdef CONFIG_PAX_REFCOUNT
10094+ tvs %xcc, 6
10095+#endif
10096+
10097 casx [%o1], %g1, %g7
10098 cmp %g1, %g7
10099 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10100@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10101 2: BACKOFF_SPIN(%o2, %o3, 1b)
10102 ENDPROC(atomic64_add)
10103
10104+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10105+ BACKOFF_SETUP(%o2)
10106+1: ldx [%o1], %g1
10107+ addcc %g1, %o0, %g7
10108+ casx [%o1], %g1, %g7
10109+ cmp %g1, %g7
10110+ bne,pn %xcc, 2f
10111+ nop
10112+ retl
10113+ nop
10114+2: BACKOFF_SPIN(%o2, %o3, 1b)
10115+ENDPROC(atomic64_add_unchecked)
10116+
10117 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10118 BACKOFF_SETUP(%o2)
10119 1: ldx [%o1], %g1
10120- sub %g1, %o0, %g7
10121+ subcc %g1, %o0, %g7
10122+
10123+#ifdef CONFIG_PAX_REFCOUNT
10124+ tvs %xcc, 6
10125+#endif
10126+
10127 casx [%o1], %g1, %g7
10128 cmp %g1, %g7
10129 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10130@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10131 2: BACKOFF_SPIN(%o2, %o3, 1b)
10132 ENDPROC(atomic64_sub)
10133
10134+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10135+ BACKOFF_SETUP(%o2)
10136+1: ldx [%o1], %g1
10137+ subcc %g1, %o0, %g7
10138+ casx [%o1], %g1, %g7
10139+ cmp %g1, %g7
10140+ bne,pn %xcc, 2f
10141+ nop
10142+ retl
10143+ nop
10144+2: BACKOFF_SPIN(%o2, %o3, 1b)
10145+ENDPROC(atomic64_sub_unchecked)
10146+
10147 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10148 BACKOFF_SETUP(%o2)
10149 1: ldx [%o1], %g1
10150- add %g1, %o0, %g7
10151+ addcc %g1, %o0, %g7
10152+
10153+#ifdef CONFIG_PAX_REFCOUNT
10154+ tvs %xcc, 6
10155+#endif
10156+
10157 casx [%o1], %g1, %g7
10158 cmp %g1, %g7
10159 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10160@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10161 2: BACKOFF_SPIN(%o2, %o3, 1b)
10162 ENDPROC(atomic64_add_ret)
10163
10164+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10165+ BACKOFF_SETUP(%o2)
10166+1: ldx [%o1], %g1
10167+ addcc %g1, %o0, %g7
10168+ casx [%o1], %g1, %g7
10169+ cmp %g1, %g7
10170+ bne,pn %xcc, 2f
10171+ add %g7, %o0, %g7
10172+ mov %g7, %o0
10173+ retl
10174+ nop
10175+2: BACKOFF_SPIN(%o2, %o3, 1b)
10176+ENDPROC(atomic64_add_ret_unchecked)
10177+
10178 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10179 BACKOFF_SETUP(%o2)
10180 1: ldx [%o1], %g1
10181- sub %g1, %o0, %g7
10182+ subcc %g1, %o0, %g7
10183+
10184+#ifdef CONFIG_PAX_REFCOUNT
10185+ tvs %xcc, 6
10186+#endif
10187+
10188 casx [%o1], %g1, %g7
10189 cmp %g1, %g7
10190 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10191diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10192index 0c4e35e..745d3e4 100644
10193--- a/arch/sparc/lib/ksyms.c
10194+++ b/arch/sparc/lib/ksyms.c
10195@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
10196
10197 /* Atomic counter implementation. */
10198 EXPORT_SYMBOL(atomic_add);
10199+EXPORT_SYMBOL(atomic_add_unchecked);
10200 EXPORT_SYMBOL(atomic_add_ret);
10201+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10202 EXPORT_SYMBOL(atomic_sub);
10203+EXPORT_SYMBOL(atomic_sub_unchecked);
10204 EXPORT_SYMBOL(atomic_sub_ret);
10205 EXPORT_SYMBOL(atomic64_add);
10206+EXPORT_SYMBOL(atomic64_add_unchecked);
10207 EXPORT_SYMBOL(atomic64_add_ret);
10208+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10209 EXPORT_SYMBOL(atomic64_sub);
10210+EXPORT_SYMBOL(atomic64_sub_unchecked);
10211 EXPORT_SYMBOL(atomic64_sub_ret);
10212 EXPORT_SYMBOL(atomic64_dec_if_positive);
10213
10214diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10215index 30c3ecc..736f015 100644
10216--- a/arch/sparc/mm/Makefile
10217+++ b/arch/sparc/mm/Makefile
10218@@ -2,7 +2,7 @@
10219 #
10220
10221 asflags-y := -ansi
10222-ccflags-y := -Werror
10223+#ccflags-y := -Werror
10224
10225 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10226 obj-y += fault_$(BITS).o
10227diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10228index e98bfda..ea8d221 100644
10229--- a/arch/sparc/mm/fault_32.c
10230+++ b/arch/sparc/mm/fault_32.c
10231@@ -21,6 +21,9 @@
10232 #include <linux/perf_event.h>
10233 #include <linux/interrupt.h>
10234 #include <linux/kdebug.h>
10235+#include <linux/slab.h>
10236+#include <linux/pagemap.h>
10237+#include <linux/compiler.h>
10238
10239 #include <asm/page.h>
10240 #include <asm/pgtable.h>
10241@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10242 return safe_compute_effective_address(regs, insn);
10243 }
10244
10245+#ifdef CONFIG_PAX_PAGEEXEC
10246+#ifdef CONFIG_PAX_DLRESOLVE
10247+static void pax_emuplt_close(struct vm_area_struct *vma)
10248+{
10249+ vma->vm_mm->call_dl_resolve = 0UL;
10250+}
10251+
10252+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10253+{
10254+ unsigned int *kaddr;
10255+
10256+ vmf->page = alloc_page(GFP_HIGHUSER);
10257+ if (!vmf->page)
10258+ return VM_FAULT_OOM;
10259+
10260+ kaddr = kmap(vmf->page);
10261+ memset(kaddr, 0, PAGE_SIZE);
10262+ kaddr[0] = 0x9DE3BFA8U; /* save */
10263+ flush_dcache_page(vmf->page);
10264+ kunmap(vmf->page);
10265+ return VM_FAULT_MAJOR;
10266+}
10267+
10268+static const struct vm_operations_struct pax_vm_ops = {
10269+ .close = pax_emuplt_close,
10270+ .fault = pax_emuplt_fault
10271+};
10272+
10273+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10274+{
10275+ int ret;
10276+
10277+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10278+ vma->vm_mm = current->mm;
10279+ vma->vm_start = addr;
10280+ vma->vm_end = addr + PAGE_SIZE;
10281+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10282+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10283+ vma->vm_ops = &pax_vm_ops;
10284+
10285+ ret = insert_vm_struct(current->mm, vma);
10286+ if (ret)
10287+ return ret;
10288+
10289+ ++current->mm->total_vm;
10290+ return 0;
10291+}
10292+#endif
10293+
10294+/*
10295+ * PaX: decide what to do with offenders (regs->pc = fault address)
10296+ *
10297+ * returns 1 when task should be killed
10298+ * 2 when patched PLT trampoline was detected
10299+ * 3 when unpatched PLT trampoline was detected
10300+ */
10301+static int pax_handle_fetch_fault(struct pt_regs *regs)
10302+{
10303+
10304+#ifdef CONFIG_PAX_EMUPLT
10305+ int err;
10306+
10307+ do { /* PaX: patched PLT emulation #1 */
10308+ unsigned int sethi1, sethi2, jmpl;
10309+
10310+ err = get_user(sethi1, (unsigned int *)regs->pc);
10311+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10312+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10313+
10314+ if (err)
10315+ break;
10316+
10317+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10318+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10319+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10320+ {
10321+ unsigned int addr;
10322+
10323+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10324+ addr = regs->u_regs[UREG_G1];
10325+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10326+ regs->pc = addr;
10327+ regs->npc = addr+4;
10328+ return 2;
10329+ }
10330+ } while (0);
10331+
10332+ do { /* PaX: patched PLT emulation #2 */
10333+ unsigned int ba;
10334+
10335+ err = get_user(ba, (unsigned int *)regs->pc);
10336+
10337+ if (err)
10338+ break;
10339+
10340+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10341+ unsigned int addr;
10342+
10343+ if ((ba & 0xFFC00000U) == 0x30800000U)
10344+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10345+ else
10346+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10347+ regs->pc = addr;
10348+ regs->npc = addr+4;
10349+ return 2;
10350+ }
10351+ } while (0);
10352+
10353+ do { /* PaX: patched PLT emulation #3 */
10354+ unsigned int sethi, bajmpl, nop;
10355+
10356+ err = get_user(sethi, (unsigned int *)regs->pc);
10357+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10358+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10359+
10360+ if (err)
10361+ break;
10362+
10363+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10364+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10365+ nop == 0x01000000U)
10366+ {
10367+ unsigned int addr;
10368+
10369+ addr = (sethi & 0x003FFFFFU) << 10;
10370+ regs->u_regs[UREG_G1] = addr;
10371+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10372+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10373+ else
10374+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10375+ regs->pc = addr;
10376+ regs->npc = addr+4;
10377+ return 2;
10378+ }
10379+ } while (0);
10380+
10381+ do { /* PaX: unpatched PLT emulation step 1 */
10382+ unsigned int sethi, ba, nop;
10383+
10384+ err = get_user(sethi, (unsigned int *)regs->pc);
10385+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
10386+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10387+
10388+ if (err)
10389+ break;
10390+
10391+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10392+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10393+ nop == 0x01000000U)
10394+ {
10395+ unsigned int addr, save, call;
10396+
10397+ if ((ba & 0xFFC00000U) == 0x30800000U)
10398+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10399+ else
10400+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10401+
10402+ err = get_user(save, (unsigned int *)addr);
10403+ err |= get_user(call, (unsigned int *)(addr+4));
10404+ err |= get_user(nop, (unsigned int *)(addr+8));
10405+ if (err)
10406+ break;
10407+
10408+#ifdef CONFIG_PAX_DLRESOLVE
10409+ if (save == 0x9DE3BFA8U &&
10410+ (call & 0xC0000000U) == 0x40000000U &&
10411+ nop == 0x01000000U)
10412+ {
10413+ struct vm_area_struct *vma;
10414+ unsigned long call_dl_resolve;
10415+
10416+ down_read(&current->mm->mmap_sem);
10417+ call_dl_resolve = current->mm->call_dl_resolve;
10418+ up_read(&current->mm->mmap_sem);
10419+ if (likely(call_dl_resolve))
10420+ goto emulate;
10421+
10422+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10423+
10424+ down_write(&current->mm->mmap_sem);
10425+ if (current->mm->call_dl_resolve) {
10426+ call_dl_resolve = current->mm->call_dl_resolve;
10427+ up_write(&current->mm->mmap_sem);
10428+ if (vma)
10429+ kmem_cache_free(vm_area_cachep, vma);
10430+ goto emulate;
10431+ }
10432+
10433+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10434+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10435+ up_write(&current->mm->mmap_sem);
10436+ if (vma)
10437+ kmem_cache_free(vm_area_cachep, vma);
10438+ return 1;
10439+ }
10440+
10441+ if (pax_insert_vma(vma, call_dl_resolve)) {
10442+ up_write(&current->mm->mmap_sem);
10443+ kmem_cache_free(vm_area_cachep, vma);
10444+ return 1;
10445+ }
10446+
10447+ current->mm->call_dl_resolve = call_dl_resolve;
10448+ up_write(&current->mm->mmap_sem);
10449+
10450+emulate:
10451+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10452+ regs->pc = call_dl_resolve;
10453+ regs->npc = addr+4;
10454+ return 3;
10455+ }
10456+#endif
10457+
10458+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10459+ if ((save & 0xFFC00000U) == 0x05000000U &&
10460+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10461+ nop == 0x01000000U)
10462+ {
10463+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10464+ regs->u_regs[UREG_G2] = addr + 4;
10465+ addr = (save & 0x003FFFFFU) << 10;
10466+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10467+ regs->pc = addr;
10468+ regs->npc = addr+4;
10469+ return 3;
10470+ }
10471+ }
10472+ } while (0);
10473+
10474+ do { /* PaX: unpatched PLT emulation step 2 */
10475+ unsigned int save, call, nop;
10476+
10477+ err = get_user(save, (unsigned int *)(regs->pc-4));
10478+ err |= get_user(call, (unsigned int *)regs->pc);
10479+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
10480+ if (err)
10481+ break;
10482+
10483+ if (save == 0x9DE3BFA8U &&
10484+ (call & 0xC0000000U) == 0x40000000U &&
10485+ nop == 0x01000000U)
10486+ {
10487+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10488+
10489+ regs->u_regs[UREG_RETPC] = regs->pc;
10490+ regs->pc = dl_resolve;
10491+ regs->npc = dl_resolve+4;
10492+ return 3;
10493+ }
10494+ } while (0);
10495+#endif
10496+
10497+ return 1;
10498+}
10499+
10500+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10501+{
10502+ unsigned long i;
10503+
10504+ printk(KERN_ERR "PAX: bytes at PC: ");
10505+ for (i = 0; i < 8; i++) {
10506+ unsigned int c;
10507+ if (get_user(c, (unsigned int *)pc+i))
10508+ printk(KERN_CONT "???????? ");
10509+ else
10510+ printk(KERN_CONT "%08x ", c);
10511+ }
10512+ printk("\n");
10513+}
10514+#endif
10515+
10516 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10517 int text_fault)
10518 {
10519@@ -230,6 +504,24 @@ good_area:
10520 if (!(vma->vm_flags & VM_WRITE))
10521 goto bad_area;
10522 } else {
10523+
10524+#ifdef CONFIG_PAX_PAGEEXEC
10525+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10526+ up_read(&mm->mmap_sem);
10527+ switch (pax_handle_fetch_fault(regs)) {
10528+
10529+#ifdef CONFIG_PAX_EMUPLT
10530+ case 2:
10531+ case 3:
10532+ return;
10533+#endif
10534+
10535+ }
10536+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10537+ do_group_exit(SIGKILL);
10538+ }
10539+#endif
10540+
10541 /* Allow reads even for write-only mappings */
10542 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10543 goto bad_area;
10544diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10545index 5062ff3..e0b75f3 100644
10546--- a/arch/sparc/mm/fault_64.c
10547+++ b/arch/sparc/mm/fault_64.c
10548@@ -21,6 +21,9 @@
10549 #include <linux/kprobes.h>
10550 #include <linux/kdebug.h>
10551 #include <linux/percpu.h>
10552+#include <linux/slab.h>
10553+#include <linux/pagemap.h>
10554+#include <linux/compiler.h>
10555
10556 #include <asm/page.h>
10557 #include <asm/pgtable.h>
10558@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10559 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10560 regs->tpc);
10561 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10562- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10563+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10564 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10565 dump_stack();
10566 unhandled_fault(regs->tpc, current, regs);
10567@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10568 show_regs(regs);
10569 }
10570
10571+#ifdef CONFIG_PAX_PAGEEXEC
10572+#ifdef CONFIG_PAX_DLRESOLVE
10573+static void pax_emuplt_close(struct vm_area_struct *vma)
10574+{
10575+ vma->vm_mm->call_dl_resolve = 0UL;
10576+}
10577+
10578+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10579+{
10580+ unsigned int *kaddr;
10581+
10582+ vmf->page = alloc_page(GFP_HIGHUSER);
10583+ if (!vmf->page)
10584+ return VM_FAULT_OOM;
10585+
10586+ kaddr = kmap(vmf->page);
10587+ memset(kaddr, 0, PAGE_SIZE);
10588+ kaddr[0] = 0x9DE3BFA8U; /* save */
10589+ flush_dcache_page(vmf->page);
10590+ kunmap(vmf->page);
10591+ return VM_FAULT_MAJOR;
10592+}
10593+
10594+static const struct vm_operations_struct pax_vm_ops = {
10595+ .close = pax_emuplt_close,
10596+ .fault = pax_emuplt_fault
10597+};
10598+
10599+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10600+{
10601+ int ret;
10602+
10603+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10604+ vma->vm_mm = current->mm;
10605+ vma->vm_start = addr;
10606+ vma->vm_end = addr + PAGE_SIZE;
10607+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10608+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10609+ vma->vm_ops = &pax_vm_ops;
10610+
10611+ ret = insert_vm_struct(current->mm, vma);
10612+ if (ret)
10613+ return ret;
10614+
10615+ ++current->mm->total_vm;
10616+ return 0;
10617+}
10618+#endif
10619+
10620+/*
10621+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10622+ *
10623+ * returns 1 when task should be killed
10624+ * 2 when patched PLT trampoline was detected
10625+ * 3 when unpatched PLT trampoline was detected
10626+ */
10627+static int pax_handle_fetch_fault(struct pt_regs *regs)
10628+{
10629+
10630+#ifdef CONFIG_PAX_EMUPLT
10631+ int err;
10632+
10633+ do { /* PaX: patched PLT emulation #1 */
10634+ unsigned int sethi1, sethi2, jmpl;
10635+
10636+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10637+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10638+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10639+
10640+ if (err)
10641+ break;
10642+
10643+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10644+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10645+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10646+ {
10647+ unsigned long addr;
10648+
10649+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10650+ addr = regs->u_regs[UREG_G1];
10651+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10652+
10653+ if (test_thread_flag(TIF_32BIT))
10654+ addr &= 0xFFFFFFFFUL;
10655+
10656+ regs->tpc = addr;
10657+ regs->tnpc = addr+4;
10658+ return 2;
10659+ }
10660+ } while (0);
10661+
10662+ do { /* PaX: patched PLT emulation #2 */
10663+ unsigned int ba;
10664+
10665+ err = get_user(ba, (unsigned int *)regs->tpc);
10666+
10667+ if (err)
10668+ break;
10669+
10670+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10671+ unsigned long addr;
10672+
10673+ if ((ba & 0xFFC00000U) == 0x30800000U)
10674+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10675+ else
10676+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10677+
10678+ if (test_thread_flag(TIF_32BIT))
10679+ addr &= 0xFFFFFFFFUL;
10680+
10681+ regs->tpc = addr;
10682+ regs->tnpc = addr+4;
10683+ return 2;
10684+ }
10685+ } while (0);
10686+
10687+ do { /* PaX: patched PLT emulation #3 */
10688+ unsigned int sethi, bajmpl, nop;
10689+
10690+ err = get_user(sethi, (unsigned int *)regs->tpc);
10691+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10692+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10693+
10694+ if (err)
10695+ break;
10696+
10697+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10698+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10699+ nop == 0x01000000U)
10700+ {
10701+ unsigned long addr;
10702+
10703+ addr = (sethi & 0x003FFFFFU) << 10;
10704+ regs->u_regs[UREG_G1] = addr;
10705+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10706+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10707+ else
10708+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10709+
10710+ if (test_thread_flag(TIF_32BIT))
10711+ addr &= 0xFFFFFFFFUL;
10712+
10713+ regs->tpc = addr;
10714+ regs->tnpc = addr+4;
10715+ return 2;
10716+ }
10717+ } while (0);
10718+
10719+ do { /* PaX: patched PLT emulation #4 */
10720+ unsigned int sethi, mov1, call, mov2;
10721+
10722+ err = get_user(sethi, (unsigned int *)regs->tpc);
10723+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10724+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10725+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10726+
10727+ if (err)
10728+ break;
10729+
10730+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10731+ mov1 == 0x8210000FU &&
10732+ (call & 0xC0000000U) == 0x40000000U &&
10733+ mov2 == 0x9E100001U)
10734+ {
10735+ unsigned long addr;
10736+
10737+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10738+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10739+
10740+ if (test_thread_flag(TIF_32BIT))
10741+ addr &= 0xFFFFFFFFUL;
10742+
10743+ regs->tpc = addr;
10744+ regs->tnpc = addr+4;
10745+ return 2;
10746+ }
10747+ } while (0);
10748+
10749+ do { /* PaX: patched PLT emulation #5 */
10750+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10751+
10752+ err = get_user(sethi, (unsigned int *)regs->tpc);
10753+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10754+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10755+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10756+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10757+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10758+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10759+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10760+
10761+ if (err)
10762+ break;
10763+
10764+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10765+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10766+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10767+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10768+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10769+ sllx == 0x83287020U &&
10770+ jmpl == 0x81C04005U &&
10771+ nop == 0x01000000U)
10772+ {
10773+ unsigned long addr;
10774+
10775+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10776+ regs->u_regs[UREG_G1] <<= 32;
10777+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10778+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10779+ regs->tpc = addr;
10780+ regs->tnpc = addr+4;
10781+ return 2;
10782+ }
10783+ } while (0);
10784+
10785+ do { /* PaX: patched PLT emulation #6 */
10786+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10787+
10788+ err = get_user(sethi, (unsigned int *)regs->tpc);
10789+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10790+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10791+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10792+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10793+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10794+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10795+
10796+ if (err)
10797+ break;
10798+
10799+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10800+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10801+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10802+ sllx == 0x83287020U &&
10803+ (or & 0xFFFFE000U) == 0x8A116000U &&
10804+ jmpl == 0x81C04005U &&
10805+ nop == 0x01000000U)
10806+ {
10807+ unsigned long addr;
10808+
10809+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10810+ regs->u_regs[UREG_G1] <<= 32;
10811+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10812+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10813+ regs->tpc = addr;
10814+ regs->tnpc = addr+4;
10815+ return 2;
10816+ }
10817+ } while (0);
10818+
10819+ do { /* PaX: unpatched PLT emulation step 1 */
10820+ unsigned int sethi, ba, nop;
10821+
10822+ err = get_user(sethi, (unsigned int *)regs->tpc);
10823+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10824+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10825+
10826+ if (err)
10827+ break;
10828+
10829+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10830+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10831+ nop == 0x01000000U)
10832+ {
10833+ unsigned long addr;
10834+ unsigned int save, call;
10835+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10836+
10837+ if ((ba & 0xFFC00000U) == 0x30800000U)
10838+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10839+ else
10840+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10841+
10842+ if (test_thread_flag(TIF_32BIT))
10843+ addr &= 0xFFFFFFFFUL;
10844+
10845+ err = get_user(save, (unsigned int *)addr);
10846+ err |= get_user(call, (unsigned int *)(addr+4));
10847+ err |= get_user(nop, (unsigned int *)(addr+8));
10848+ if (err)
10849+ break;
10850+
10851+#ifdef CONFIG_PAX_DLRESOLVE
10852+ if (save == 0x9DE3BFA8U &&
10853+ (call & 0xC0000000U) == 0x40000000U &&
10854+ nop == 0x01000000U)
10855+ {
10856+ struct vm_area_struct *vma;
10857+ unsigned long call_dl_resolve;
10858+
10859+ down_read(&current->mm->mmap_sem);
10860+ call_dl_resolve = current->mm->call_dl_resolve;
10861+ up_read(&current->mm->mmap_sem);
10862+ if (likely(call_dl_resolve))
10863+ goto emulate;
10864+
10865+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10866+
10867+ down_write(&current->mm->mmap_sem);
10868+ if (current->mm->call_dl_resolve) {
10869+ call_dl_resolve = current->mm->call_dl_resolve;
10870+ up_write(&current->mm->mmap_sem);
10871+ if (vma)
10872+ kmem_cache_free(vm_area_cachep, vma);
10873+ goto emulate;
10874+ }
10875+
10876+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10877+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10878+ up_write(&current->mm->mmap_sem);
10879+ if (vma)
10880+ kmem_cache_free(vm_area_cachep, vma);
10881+ return 1;
10882+ }
10883+
10884+ if (pax_insert_vma(vma, call_dl_resolve)) {
10885+ up_write(&current->mm->mmap_sem);
10886+ kmem_cache_free(vm_area_cachep, vma);
10887+ return 1;
10888+ }
10889+
10890+ current->mm->call_dl_resolve = call_dl_resolve;
10891+ up_write(&current->mm->mmap_sem);
10892+
10893+emulate:
10894+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10895+ regs->tpc = call_dl_resolve;
10896+ regs->tnpc = addr+4;
10897+ return 3;
10898+ }
10899+#endif
10900+
10901+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10902+ if ((save & 0xFFC00000U) == 0x05000000U &&
10903+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10904+ nop == 0x01000000U)
10905+ {
10906+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10907+ regs->u_regs[UREG_G2] = addr + 4;
10908+ addr = (save & 0x003FFFFFU) << 10;
10909+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10910+
10911+ if (test_thread_flag(TIF_32BIT))
10912+ addr &= 0xFFFFFFFFUL;
10913+
10914+ regs->tpc = addr;
10915+ regs->tnpc = addr+4;
10916+ return 3;
10917+ }
10918+
10919+ /* PaX: 64-bit PLT stub */
10920+ err = get_user(sethi1, (unsigned int *)addr);
10921+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10922+ err |= get_user(or1, (unsigned int *)(addr+8));
10923+ err |= get_user(or2, (unsigned int *)(addr+12));
10924+ err |= get_user(sllx, (unsigned int *)(addr+16));
10925+ err |= get_user(add, (unsigned int *)(addr+20));
10926+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10927+ err |= get_user(nop, (unsigned int *)(addr+28));
10928+ if (err)
10929+ break;
10930+
10931+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10932+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10933+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10934+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10935+ sllx == 0x89293020U &&
10936+ add == 0x8A010005U &&
10937+ jmpl == 0x89C14000U &&
10938+ nop == 0x01000000U)
10939+ {
10940+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10941+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10942+ regs->u_regs[UREG_G4] <<= 32;
10943+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10944+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10945+ regs->u_regs[UREG_G4] = addr + 24;
10946+ addr = regs->u_regs[UREG_G5];
10947+ regs->tpc = addr;
10948+ regs->tnpc = addr+4;
10949+ return 3;
10950+ }
10951+ }
10952+ } while (0);
10953+
10954+#ifdef CONFIG_PAX_DLRESOLVE
10955+ do { /* PaX: unpatched PLT emulation step 2 */
10956+ unsigned int save, call, nop;
10957+
10958+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10959+ err |= get_user(call, (unsigned int *)regs->tpc);
10960+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10961+ if (err)
10962+ break;
10963+
10964+ if (save == 0x9DE3BFA8U &&
10965+ (call & 0xC0000000U) == 0x40000000U &&
10966+ nop == 0x01000000U)
10967+ {
10968+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10969+
10970+ if (test_thread_flag(TIF_32BIT))
10971+ dl_resolve &= 0xFFFFFFFFUL;
10972+
10973+ regs->u_regs[UREG_RETPC] = regs->tpc;
10974+ regs->tpc = dl_resolve;
10975+ regs->tnpc = dl_resolve+4;
10976+ return 3;
10977+ }
10978+ } while (0);
10979+#endif
10980+
10981+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10982+ unsigned int sethi, ba, nop;
10983+
10984+ err = get_user(sethi, (unsigned int *)regs->tpc);
10985+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10986+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10987+
10988+ if (err)
10989+ break;
10990+
10991+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10992+ (ba & 0xFFF00000U) == 0x30600000U &&
10993+ nop == 0x01000000U)
10994+ {
10995+ unsigned long addr;
10996+
10997+ addr = (sethi & 0x003FFFFFU) << 10;
10998+ regs->u_regs[UREG_G1] = addr;
10999+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11000+
11001+ if (test_thread_flag(TIF_32BIT))
11002+ addr &= 0xFFFFFFFFUL;
11003+
11004+ regs->tpc = addr;
11005+ regs->tnpc = addr+4;
11006+ return 2;
11007+ }
11008+ } while (0);
11009+
11010+#endif
11011+
11012+ return 1;
11013+}
11014+
11015+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11016+{
11017+ unsigned long i;
11018+
11019+ printk(KERN_ERR "PAX: bytes at PC: ");
11020+ for (i = 0; i < 8; i++) {
11021+ unsigned int c;
11022+ if (get_user(c, (unsigned int *)pc+i))
11023+ printk(KERN_CONT "???????? ");
11024+ else
11025+ printk(KERN_CONT "%08x ", c);
11026+ }
11027+ printk("\n");
11028+}
11029+#endif
11030+
11031 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11032 {
11033 struct mm_struct *mm = current->mm;
11034@@ -341,6 +804,29 @@ retry:
11035 if (!vma)
11036 goto bad_area;
11037
11038+#ifdef CONFIG_PAX_PAGEEXEC
11039+ /* PaX: detect ITLB misses on non-exec pages */
11040+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11041+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11042+ {
11043+ if (address != regs->tpc)
11044+ goto good_area;
11045+
11046+ up_read(&mm->mmap_sem);
11047+ switch (pax_handle_fetch_fault(regs)) {
11048+
11049+#ifdef CONFIG_PAX_EMUPLT
11050+ case 2:
11051+ case 3:
11052+ return;
11053+#endif
11054+
11055+ }
11056+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11057+ do_group_exit(SIGKILL);
11058+ }
11059+#endif
11060+
11061 /* Pure DTLB misses do not tell us whether the fault causing
11062 * load/store/atomic was a write or not, it only says that there
11063 * was no match. So in such a case we (carefully) read the
11064diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11065index d2b5944..d878f3c 100644
11066--- a/arch/sparc/mm/hugetlbpage.c
11067+++ b/arch/sparc/mm/hugetlbpage.c
11068@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11069 unsigned long addr,
11070 unsigned long len,
11071 unsigned long pgoff,
11072- unsigned long flags)
11073+ unsigned long flags,
11074+ unsigned long offset)
11075 {
11076 unsigned long task_size = TASK_SIZE;
11077 struct vm_unmapped_area_info info;
11078@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11079
11080 info.flags = 0;
11081 info.length = len;
11082- info.low_limit = TASK_UNMAPPED_BASE;
11083+ info.low_limit = mm->mmap_base;
11084 info.high_limit = min(task_size, VA_EXCLUDE_START);
11085 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11086 info.align_offset = 0;
11087+ info.threadstack_offset = offset;
11088 addr = vm_unmapped_area(&info);
11089
11090 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11091 VM_BUG_ON(addr != -ENOMEM);
11092 info.low_limit = VA_EXCLUDE_END;
11093+
11094+#ifdef CONFIG_PAX_RANDMMAP
11095+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11096+ info.low_limit += mm->delta_mmap;
11097+#endif
11098+
11099 info.high_limit = task_size;
11100 addr = vm_unmapped_area(&info);
11101 }
11102@@ -58,7 +66,8 @@ static unsigned long
11103 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11104 const unsigned long len,
11105 const unsigned long pgoff,
11106- const unsigned long flags)
11107+ const unsigned long flags,
11108+ const unsigned long offset)
11109 {
11110 struct mm_struct *mm = current->mm;
11111 unsigned long addr = addr0;
11112@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11113 info.high_limit = mm->mmap_base;
11114 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11115 info.align_offset = 0;
11116+ info.threadstack_offset = offset;
11117 addr = vm_unmapped_area(&info);
11118
11119 /*
11120@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11121 VM_BUG_ON(addr != -ENOMEM);
11122 info.flags = 0;
11123 info.low_limit = TASK_UNMAPPED_BASE;
11124+
11125+#ifdef CONFIG_PAX_RANDMMAP
11126+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11127+ info.low_limit += mm->delta_mmap;
11128+#endif
11129+
11130 info.high_limit = STACK_TOP32;
11131 addr = vm_unmapped_area(&info);
11132 }
11133@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11134 struct mm_struct *mm = current->mm;
11135 struct vm_area_struct *vma;
11136 unsigned long task_size = TASK_SIZE;
11137+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11138
11139 if (test_thread_flag(TIF_32BIT))
11140 task_size = STACK_TOP32;
11141@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11142 return addr;
11143 }
11144
11145+#ifdef CONFIG_PAX_RANDMMAP
11146+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11147+#endif
11148+
11149 if (addr) {
11150 addr = ALIGN(addr, HPAGE_SIZE);
11151 vma = find_vma(mm, addr);
11152- if (task_size - len >= addr &&
11153- (!vma || addr + len <= vma->vm_start))
11154+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11155 return addr;
11156 }
11157 if (mm->get_unmapped_area == arch_get_unmapped_area)
11158 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11159- pgoff, flags);
11160+ pgoff, flags, offset);
11161 else
11162 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11163- pgoff, flags);
11164+ pgoff, flags, offset);
11165 }
11166
11167 pte_t *huge_pte_alloc(struct mm_struct *mm,
11168diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11169index f4500c6..889656c 100644
11170--- a/arch/tile/include/asm/atomic_64.h
11171+++ b/arch/tile/include/asm/atomic_64.h
11172@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11173
11174 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11175
11176+#define atomic64_read_unchecked(v) atomic64_read(v)
11177+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11178+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11179+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11180+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11181+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11182+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11183+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11184+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11185+
11186 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11187 #define smp_mb__before_atomic_dec() smp_mb()
11188 #define smp_mb__after_atomic_dec() smp_mb()
11189diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11190index a9a5299..0fce79e 100644
11191--- a/arch/tile/include/asm/cache.h
11192+++ b/arch/tile/include/asm/cache.h
11193@@ -15,11 +15,12 @@
11194 #ifndef _ASM_TILE_CACHE_H
11195 #define _ASM_TILE_CACHE_H
11196
11197+#include <linux/const.h>
11198 #include <arch/chip.h>
11199
11200 /* bytes per L1 data cache line */
11201 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11202-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11203+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11204
11205 /* bytes per L2 cache line */
11206 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11207diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11208index 8a082bc..7a6bf87 100644
11209--- a/arch/tile/include/asm/uaccess.h
11210+++ b/arch/tile/include/asm/uaccess.h
11211@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11212 const void __user *from,
11213 unsigned long n)
11214 {
11215- int sz = __compiletime_object_size(to);
11216+ size_t sz = __compiletime_object_size(to);
11217
11218- if (likely(sz == -1 || sz >= n))
11219+ if (likely(sz == (size_t)-1 || sz >= n))
11220 n = _copy_from_user(to, from, n);
11221 else
11222 copy_from_user_overflow();
11223diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11224index 650ccff..45fe2d6 100644
11225--- a/arch/tile/mm/hugetlbpage.c
11226+++ b/arch/tile/mm/hugetlbpage.c
11227@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11228 info.high_limit = TASK_SIZE;
11229 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11230 info.align_offset = 0;
11231+ info.threadstack_offset = 0;
11232 return vm_unmapped_area(&info);
11233 }
11234
11235@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11236 info.high_limit = current->mm->mmap_base;
11237 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11238 info.align_offset = 0;
11239+ info.threadstack_offset = 0;
11240 addr = vm_unmapped_area(&info);
11241
11242 /*
11243diff --git a/arch/um/Makefile b/arch/um/Makefile
11244index 133f7de..1d6f2f1 100644
11245--- a/arch/um/Makefile
11246+++ b/arch/um/Makefile
11247@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11248 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11249 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11250
11251+ifdef CONSTIFY_PLUGIN
11252+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11253+endif
11254+
11255 #This will adjust *FLAGS accordingly to the platform.
11256 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11257
11258diff --git a/arch/um/defconfig b/arch/um/defconfig
11259index 08107a7..ab22afe 100644
11260--- a/arch/um/defconfig
11261+++ b/arch/um/defconfig
11262@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
11263 CONFIG_X86_L1_CACHE_SHIFT=5
11264 CONFIG_X86_XADD=y
11265 CONFIG_X86_PPRO_FENCE=y
11266-CONFIG_X86_WP_WORKS_OK=y
11267 CONFIG_X86_INVLPG=y
11268 CONFIG_X86_BSWAP=y
11269 CONFIG_X86_POPAD_OK=y
11270diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11271index 19e1bdd..3665b77 100644
11272--- a/arch/um/include/asm/cache.h
11273+++ b/arch/um/include/asm/cache.h
11274@@ -1,6 +1,7 @@
11275 #ifndef __UM_CACHE_H
11276 #define __UM_CACHE_H
11277
11278+#include <linux/const.h>
11279
11280 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11281 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11282@@ -12,6 +13,6 @@
11283 # define L1_CACHE_SHIFT 5
11284 #endif
11285
11286-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11287+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11288
11289 #endif
11290diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11291index 2e0a6b1..a64d0f5 100644
11292--- a/arch/um/include/asm/kmap_types.h
11293+++ b/arch/um/include/asm/kmap_types.h
11294@@ -8,6 +8,6 @@
11295
11296 /* No more #include "asm/arch/kmap_types.h" ! */
11297
11298-#define KM_TYPE_NR 14
11299+#define KM_TYPE_NR 15
11300
11301 #endif
11302diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11303index 5ff53d9..5850cdf 100644
11304--- a/arch/um/include/asm/page.h
11305+++ b/arch/um/include/asm/page.h
11306@@ -14,6 +14,9 @@
11307 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11308 #define PAGE_MASK (~(PAGE_SIZE-1))
11309
11310+#define ktla_ktva(addr) (addr)
11311+#define ktva_ktla(addr) (addr)
11312+
11313 #ifndef __ASSEMBLY__
11314
11315 struct page;
11316diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11317index 0032f92..cd151e0 100644
11318--- a/arch/um/include/asm/pgtable-3level.h
11319+++ b/arch/um/include/asm/pgtable-3level.h
11320@@ -58,6 +58,7 @@
11321 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11322 #define pud_populate(mm, pud, pmd) \
11323 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11324+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11325
11326 #ifdef CONFIG_64BIT
11327 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11328diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11329index bbcef52..6a2a483 100644
11330--- a/arch/um/kernel/process.c
11331+++ b/arch/um/kernel/process.c
11332@@ -367,22 +367,6 @@ int singlestepping(void * t)
11333 return 2;
11334 }
11335
11336-/*
11337- * Only x86 and x86_64 have an arch_align_stack().
11338- * All other arches have "#define arch_align_stack(x) (x)"
11339- * in their asm/system.h
11340- * As this is included in UML from asm-um/system-generic.h,
11341- * we can use it to behave as the subarch does.
11342- */
11343-#ifndef arch_align_stack
11344-unsigned long arch_align_stack(unsigned long sp)
11345-{
11346- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11347- sp -= get_random_int() % 8192;
11348- return sp & ~0xf;
11349-}
11350-#endif
11351-
11352 unsigned long get_wchan(struct task_struct *p)
11353 {
11354 unsigned long stack_page, sp, ip;
11355diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11356index ad8f795..2c7eec6 100644
11357--- a/arch/unicore32/include/asm/cache.h
11358+++ b/arch/unicore32/include/asm/cache.h
11359@@ -12,8 +12,10 @@
11360 #ifndef __UNICORE_CACHE_H__
11361 #define __UNICORE_CACHE_H__
11362
11363-#define L1_CACHE_SHIFT (5)
11364-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11365+#include <linux/const.h>
11366+
11367+#define L1_CACHE_SHIFT 5
11368+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11369
11370 /*
11371 * Memory returned by kmalloc() may be used for DMA, so we must make
11372diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11373index fe120da..24177f7 100644
11374--- a/arch/x86/Kconfig
11375+++ b/arch/x86/Kconfig
11376@@ -239,7 +239,7 @@ config X86_HT
11377
11378 config X86_32_LAZY_GS
11379 def_bool y
11380- depends on X86_32 && !CC_STACKPROTECTOR
11381+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11382
11383 config ARCH_HWEIGHT_CFLAGS
11384 string
11385@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
11386
11387 config X86_MSR
11388 tristate "/dev/cpu/*/msr - Model-specific register support"
11389+ depends on !GRKERNSEC_KMEM
11390 ---help---
11391 This device gives privileged processes access to the x86
11392 Model-Specific Registers (MSRs). It is a character device with
11393@@ -1096,7 +1097,7 @@ choice
11394
11395 config NOHIGHMEM
11396 bool "off"
11397- depends on !X86_NUMAQ
11398+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11399 ---help---
11400 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11401 However, the address space of 32-bit x86 processors is only 4
11402@@ -1133,7 +1134,7 @@ config NOHIGHMEM
11403
11404 config HIGHMEM4G
11405 bool "4GB"
11406- depends on !X86_NUMAQ
11407+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11408 ---help---
11409 Select this if you have a 32-bit processor and between 1 and 4
11410 gigabytes of physical RAM.
11411@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
11412 hex
11413 default 0xB0000000 if VMSPLIT_3G_OPT
11414 default 0x80000000 if VMSPLIT_2G
11415- default 0x78000000 if VMSPLIT_2G_OPT
11416+ default 0x70000000 if VMSPLIT_2G_OPT
11417 default 0x40000000 if VMSPLIT_1G
11418 default 0xC0000000
11419 depends on X86_32
11420@@ -1584,6 +1585,7 @@ config SECCOMP
11421
11422 config CC_STACKPROTECTOR
11423 bool "Enable -fstack-protector buffer overflow detection"
11424+ depends on X86_64 || !PAX_MEMORY_UDEREF
11425 ---help---
11426 This option turns on the -fstack-protector GCC feature. This
11427 feature puts, at the beginning of functions, a canary value on
11428@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
11429 config PHYSICAL_ALIGN
11430 hex "Alignment value to which kernel should be aligned" if X86_32
11431 default "0x1000000"
11432+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11433+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11434 range 0x2000 0x1000000
11435 ---help---
11436 This value puts the alignment restrictions on physical address
11437@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
11438 If unsure, say N.
11439
11440 config COMPAT_VDSO
11441- def_bool y
11442+ def_bool n
11443 prompt "Compat VDSO support"
11444 depends on X86_32 || IA32_EMULATION
11445+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11446 ---help---
11447 Map the 32-bit VDSO to the predictable old-style address too.
11448
11449diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11450index c026cca..14657ae 100644
11451--- a/arch/x86/Kconfig.cpu
11452+++ b/arch/x86/Kconfig.cpu
11453@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11454
11455 config X86_F00F_BUG
11456 def_bool y
11457- depends on M586MMX || M586TSC || M586 || M486
11458+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11459
11460 config X86_INVD_BUG
11461 def_bool y
11462@@ -327,7 +327,7 @@ config X86_INVD_BUG
11463
11464 config X86_ALIGNMENT_16
11465 def_bool y
11466- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11467+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11468
11469 config X86_INTEL_USERCOPY
11470 def_bool y
11471@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11472 # generates cmov.
11473 config X86_CMOV
11474 def_bool y
11475- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11476+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11477
11478 config X86_MINIMUM_CPU_FAMILY
11479 int
11480diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11481index c198b7e..63eea60 100644
11482--- a/arch/x86/Kconfig.debug
11483+++ b/arch/x86/Kconfig.debug
11484@@ -84,7 +84,7 @@ config X86_PTDUMP
11485 config DEBUG_RODATA
11486 bool "Write protect kernel read-only data structures"
11487 default y
11488- depends on DEBUG_KERNEL
11489+ depends on DEBUG_KERNEL && BROKEN
11490 ---help---
11491 Mark the kernel read-only data as write-protected in the pagetables,
11492 in order to catch accidental (and incorrect) writes to such const
11493@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
11494
11495 config DEBUG_SET_MODULE_RONX
11496 bool "Set loadable kernel module data as NX and text as RO"
11497- depends on MODULES
11498+ depends on MODULES && BROKEN
11499 ---help---
11500 This option helps catch unintended modifications to loadable
11501 kernel module's text and read-only data. It also prevents execution
11502diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11503index 5c47726..8c4fa67 100644
11504--- a/arch/x86/Makefile
11505+++ b/arch/x86/Makefile
11506@@ -54,6 +54,7 @@ else
11507 UTS_MACHINE := x86_64
11508 CHECKFLAGS += -D__x86_64__ -m64
11509
11510+ biarch := $(call cc-option,-m64)
11511 KBUILD_AFLAGS += -m64
11512 KBUILD_CFLAGS += -m64
11513
11514@@ -234,3 +235,12 @@ define archhelp
11515 echo ' FDARGS="..." arguments for the booted kernel'
11516 echo ' FDINITRD=file initrd for the booted kernel'
11517 endef
11518+
11519+define OLD_LD
11520+
11521+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11522+*** Please upgrade your binutils to 2.18 or newer
11523+endef
11524+
11525+archprepare:
11526+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11527diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11528index 379814b..add62ce 100644
11529--- a/arch/x86/boot/Makefile
11530+++ b/arch/x86/boot/Makefile
11531@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
11532 $(call cc-option, -fno-stack-protector) \
11533 $(call cc-option, -mpreferred-stack-boundary=2)
11534 KBUILD_CFLAGS += $(call cc-option, -m32)
11535+ifdef CONSTIFY_PLUGIN
11536+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11537+endif
11538 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11539 GCOV_PROFILE := n
11540
11541diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11542index 878e4b9..20537ab 100644
11543--- a/arch/x86/boot/bitops.h
11544+++ b/arch/x86/boot/bitops.h
11545@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11546 u8 v;
11547 const u32 *p = (const u32 *)addr;
11548
11549- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11550+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11551 return v;
11552 }
11553
11554@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11555
11556 static inline void set_bit(int nr, void *addr)
11557 {
11558- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11559+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11560 }
11561
11562 #endif /* BOOT_BITOPS_H */
11563diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11564index 5b75319..331a4ca 100644
11565--- a/arch/x86/boot/boot.h
11566+++ b/arch/x86/boot/boot.h
11567@@ -85,7 +85,7 @@ static inline void io_delay(void)
11568 static inline u16 ds(void)
11569 {
11570 u16 seg;
11571- asm("movw %%ds,%0" : "=rm" (seg));
11572+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11573 return seg;
11574 }
11575
11576@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11577 static inline int memcmp(const void *s1, const void *s2, size_t len)
11578 {
11579 u8 diff;
11580- asm("repe; cmpsb; setnz %0"
11581+ asm volatile("repe; cmpsb; setnz %0"
11582 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11583 return diff;
11584 }
11585diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11586index 5ef205c..342191d 100644
11587--- a/arch/x86/boot/compressed/Makefile
11588+++ b/arch/x86/boot/compressed/Makefile
11589@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
11590 KBUILD_CFLAGS += $(cflags-y)
11591 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11592 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11593+ifdef CONSTIFY_PLUGIN
11594+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11595+endif
11596
11597 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11598 GCOV_PROFILE := n
11599diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
11600index d606463..b887794 100644
11601--- a/arch/x86/boot/compressed/eboot.c
11602+++ b/arch/x86/boot/compressed/eboot.c
11603@@ -150,7 +150,6 @@ again:
11604 *addr = max_addr;
11605 }
11606
11607-free_pool:
11608 efi_call_phys1(sys_table->boottime->free_pool, map);
11609
11610 fail:
11611@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
11612 if (i == map_size / desc_size)
11613 status = EFI_NOT_FOUND;
11614
11615-free_pool:
11616 efi_call_phys1(sys_table->boottime->free_pool, map);
11617 fail:
11618 return status;
11619diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11620index a53440e..c3dbf1e 100644
11621--- a/arch/x86/boot/compressed/efi_stub_32.S
11622+++ b/arch/x86/boot/compressed/efi_stub_32.S
11623@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11624 * parameter 2, ..., param n. To make things easy, we save the return
11625 * address of efi_call_phys in a global variable.
11626 */
11627- popl %ecx
11628- movl %ecx, saved_return_addr(%edx)
11629- /* get the function pointer into ECX*/
11630- popl %ecx
11631- movl %ecx, efi_rt_function_ptr(%edx)
11632+ popl saved_return_addr(%edx)
11633+ popl efi_rt_function_ptr(%edx)
11634
11635 /*
11636 * 3. Call the physical function.
11637 */
11638- call *%ecx
11639+ call *efi_rt_function_ptr(%edx)
11640
11641 /*
11642 * 4. Balance the stack. And because EAX contain the return value,
11643@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11644 1: popl %edx
11645 subl $1b, %edx
11646
11647- movl efi_rt_function_ptr(%edx), %ecx
11648- pushl %ecx
11649+ pushl efi_rt_function_ptr(%edx)
11650
11651 /*
11652 * 10. Push the saved return address onto the stack and return.
11653 */
11654- movl saved_return_addr(%edx), %ecx
11655- pushl %ecx
11656- ret
11657+ jmpl *saved_return_addr(%edx)
11658 ENDPROC(efi_call_phys)
11659 .previous
11660
11661diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11662index 1e3184f..0d11e2e 100644
11663--- a/arch/x86/boot/compressed/head_32.S
11664+++ b/arch/x86/boot/compressed/head_32.S
11665@@ -118,7 +118,7 @@ preferred_addr:
11666 notl %eax
11667 andl %eax, %ebx
11668 #else
11669- movl $LOAD_PHYSICAL_ADDR, %ebx
11670+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11671 #endif
11672
11673 /* Target address to relocate to for decompression */
11674@@ -204,7 +204,7 @@ relocated:
11675 * and where it was actually loaded.
11676 */
11677 movl %ebp, %ebx
11678- subl $LOAD_PHYSICAL_ADDR, %ebx
11679+ subl $____LOAD_PHYSICAL_ADDR, %ebx
11680 jz 2f /* Nothing to be done if loaded at compiled addr. */
11681 /*
11682 * Process relocations.
11683@@ -212,8 +212,7 @@ relocated:
11684
11685 1: subl $4, %edi
11686 movl (%edi), %ecx
11687- testl %ecx, %ecx
11688- jz 2f
11689+ jecxz 2f
11690 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
11691 jmp 1b
11692 2:
11693diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11694index 16f24e6..47491a3 100644
11695--- a/arch/x86/boot/compressed/head_64.S
11696+++ b/arch/x86/boot/compressed/head_64.S
11697@@ -97,7 +97,7 @@ ENTRY(startup_32)
11698 notl %eax
11699 andl %eax, %ebx
11700 #else
11701- movl $LOAD_PHYSICAL_ADDR, %ebx
11702+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11703 #endif
11704
11705 /* Target address to relocate to for decompression */
11706@@ -272,7 +272,7 @@ preferred_addr:
11707 notq %rax
11708 andq %rax, %rbp
11709 #else
11710- movq $LOAD_PHYSICAL_ADDR, %rbp
11711+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11712 #endif
11713
11714 /* Target address to relocate to for decompression */
11715@@ -363,8 +363,8 @@ gdt:
11716 .long gdt
11717 .word 0
11718 .quad 0x0000000000000000 /* NULL descriptor */
11719- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11720- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11721+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11722+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11723 .quad 0x0080890000000000 /* TS descriptor */
11724 .quad 0x0000000000000000 /* TS continued */
11725 gdt_end:
11726diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11727index 7cb56c6..d382d84 100644
11728--- a/arch/x86/boot/compressed/misc.c
11729+++ b/arch/x86/boot/compressed/misc.c
11730@@ -303,7 +303,7 @@ static void parse_elf(void *output)
11731 case PT_LOAD:
11732 #ifdef CONFIG_RELOCATABLE
11733 dest = output;
11734- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11735+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11736 #else
11737 dest = (void *)(phdr->p_paddr);
11738 #endif
11739@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
11740 error("Destination address too large");
11741 #endif
11742 #ifndef CONFIG_RELOCATABLE
11743- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
11744+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
11745 error("Wrong destination address");
11746 #endif
11747
11748diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
11749index 4d3ff03..e4972ff 100644
11750--- a/arch/x86/boot/cpucheck.c
11751+++ b/arch/x86/boot/cpucheck.c
11752@@ -74,7 +74,7 @@ static int has_fpu(void)
11753 u16 fcw = -1, fsw = -1;
11754 u32 cr0;
11755
11756- asm("movl %%cr0,%0" : "=r" (cr0));
11757+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
11758 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
11759 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
11760 asm volatile("movl %0,%%cr0" : : "r" (cr0));
11761@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
11762 {
11763 u32 f0, f1;
11764
11765- asm("pushfl ; "
11766+ asm volatile("pushfl ; "
11767 "pushfl ; "
11768 "popl %0 ; "
11769 "movl %0,%1 ; "
11770@@ -115,7 +115,7 @@ static void get_flags(void)
11771 set_bit(X86_FEATURE_FPU, cpu.flags);
11772
11773 if (has_eflag(X86_EFLAGS_ID)) {
11774- asm("cpuid"
11775+ asm volatile("cpuid"
11776 : "=a" (max_intel_level),
11777 "=b" (cpu_vendor[0]),
11778 "=d" (cpu_vendor[1]),
11779@@ -124,7 +124,7 @@ static void get_flags(void)
11780
11781 if (max_intel_level >= 0x00000001 &&
11782 max_intel_level <= 0x0000ffff) {
11783- asm("cpuid"
11784+ asm volatile("cpuid"
11785 : "=a" (tfms),
11786 "=c" (cpu.flags[4]),
11787 "=d" (cpu.flags[0])
11788@@ -136,7 +136,7 @@ static void get_flags(void)
11789 cpu.model += ((tfms >> 16) & 0xf) << 4;
11790 }
11791
11792- asm("cpuid"
11793+ asm volatile("cpuid"
11794 : "=a" (max_amd_level)
11795 : "a" (0x80000000)
11796 : "ebx", "ecx", "edx");
11797@@ -144,7 +144,7 @@ static void get_flags(void)
11798 if (max_amd_level >= 0x80000001 &&
11799 max_amd_level <= 0x8000ffff) {
11800 u32 eax = 0x80000001;
11801- asm("cpuid"
11802+ asm volatile("cpuid"
11803 : "+a" (eax),
11804 "=c" (cpu.flags[6]),
11805 "=d" (cpu.flags[1])
11806@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11807 u32 ecx = MSR_K7_HWCR;
11808 u32 eax, edx;
11809
11810- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11811+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11812 eax &= ~(1 << 15);
11813- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11814+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11815
11816 get_flags(); /* Make sure it really did something */
11817 err = check_flags();
11818@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11819 u32 ecx = MSR_VIA_FCR;
11820 u32 eax, edx;
11821
11822- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11823+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11824 eax |= (1<<1)|(1<<7);
11825- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11826+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11827
11828 set_bit(X86_FEATURE_CX8, cpu.flags);
11829 err = check_flags();
11830@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11831 u32 eax, edx;
11832 u32 level = 1;
11833
11834- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11835- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11836- asm("cpuid"
11837+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11838+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11839+ asm volatile("cpuid"
11840 : "+a" (level), "=d" (cpu.flags[0])
11841 : : "ecx", "ebx");
11842- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11843+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11844
11845 err = check_flags();
11846 }
11847diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
11848index 9ec06a1..2c25e79 100644
11849--- a/arch/x86/boot/header.S
11850+++ b/arch/x86/boot/header.S
11851@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
11852 # single linked list of
11853 # struct setup_data
11854
11855-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
11856+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
11857
11858 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
11859+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11860+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
11861+#else
11862 #define VO_INIT_SIZE (VO__end - VO__text)
11863+#endif
11864 #if ZO_INIT_SIZE > VO_INIT_SIZE
11865 #define INIT_SIZE ZO_INIT_SIZE
11866 #else
11867diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
11868index db75d07..8e6d0af 100644
11869--- a/arch/x86/boot/memory.c
11870+++ b/arch/x86/boot/memory.c
11871@@ -19,7 +19,7 @@
11872
11873 static int detect_memory_e820(void)
11874 {
11875- int count = 0;
11876+ unsigned int count = 0;
11877 struct biosregs ireg, oreg;
11878 struct e820entry *desc = boot_params.e820_map;
11879 static struct e820entry buf; /* static so it is zeroed */
11880diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
11881index 11e8c6e..fdbb1ed 100644
11882--- a/arch/x86/boot/video-vesa.c
11883+++ b/arch/x86/boot/video-vesa.c
11884@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
11885
11886 boot_params.screen_info.vesapm_seg = oreg.es;
11887 boot_params.screen_info.vesapm_off = oreg.di;
11888+ boot_params.screen_info.vesapm_size = oreg.cx;
11889 }
11890
11891 /*
11892diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
11893index 43eda28..5ab5fdb 100644
11894--- a/arch/x86/boot/video.c
11895+++ b/arch/x86/boot/video.c
11896@@ -96,7 +96,7 @@ static void store_mode_params(void)
11897 static unsigned int get_entry(void)
11898 {
11899 char entry_buf[4];
11900- int i, len = 0;
11901+ unsigned int i, len = 0;
11902 int key;
11903 unsigned int v;
11904
11905diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
11906index 9105655..5e37f27 100644
11907--- a/arch/x86/crypto/aes-x86_64-asm_64.S
11908+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
11909@@ -8,6 +8,8 @@
11910 * including this sentence is retained in full.
11911 */
11912
11913+#include <asm/alternative-asm.h>
11914+
11915 .extern crypto_ft_tab
11916 .extern crypto_it_tab
11917 .extern crypto_fl_tab
11918@@ -70,6 +72,8 @@
11919 je B192; \
11920 leaq 32(r9),r9;
11921
11922+#define ret pax_force_retaddr 0, 1; ret
11923+
11924 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11925 movq r1,r2; \
11926 movq r3,r4; \
11927diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11928index 477e9d7..3ab339f 100644
11929--- a/arch/x86/crypto/aesni-intel_asm.S
11930+++ b/arch/x86/crypto/aesni-intel_asm.S
11931@@ -31,6 +31,7 @@
11932
11933 #include <linux/linkage.h>
11934 #include <asm/inst.h>
11935+#include <asm/alternative-asm.h>
11936
11937 #ifdef __x86_64__
11938 .data
11939@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
11940 pop %r14
11941 pop %r13
11942 pop %r12
11943+ pax_force_retaddr 0, 1
11944 ret
11945 ENDPROC(aesni_gcm_dec)
11946
11947@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
11948 pop %r14
11949 pop %r13
11950 pop %r12
11951+ pax_force_retaddr 0, 1
11952 ret
11953 ENDPROC(aesni_gcm_enc)
11954
11955@@ -1722,6 +1725,7 @@ _key_expansion_256a:
11956 pxor %xmm1, %xmm0
11957 movaps %xmm0, (TKEYP)
11958 add $0x10, TKEYP
11959+ pax_force_retaddr_bts
11960 ret
11961 ENDPROC(_key_expansion_128)
11962 ENDPROC(_key_expansion_256a)
11963@@ -1748,6 +1752,7 @@ _key_expansion_192a:
11964 shufps $0b01001110, %xmm2, %xmm1
11965 movaps %xmm1, 0x10(TKEYP)
11966 add $0x20, TKEYP
11967+ pax_force_retaddr_bts
11968 ret
11969 ENDPROC(_key_expansion_192a)
11970
11971@@ -1768,6 +1773,7 @@ _key_expansion_192b:
11972
11973 movaps %xmm0, (TKEYP)
11974 add $0x10, TKEYP
11975+ pax_force_retaddr_bts
11976 ret
11977 ENDPROC(_key_expansion_192b)
11978
11979@@ -1781,6 +1787,7 @@ _key_expansion_256b:
11980 pxor %xmm1, %xmm2
11981 movaps %xmm2, (TKEYP)
11982 add $0x10, TKEYP
11983+ pax_force_retaddr_bts
11984 ret
11985 ENDPROC(_key_expansion_256b)
11986
11987@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
11988 #ifndef __x86_64__
11989 popl KEYP
11990 #endif
11991+ pax_force_retaddr 0, 1
11992 ret
11993 ENDPROC(aesni_set_key)
11994
11995@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
11996 popl KLEN
11997 popl KEYP
11998 #endif
11999+ pax_force_retaddr 0, 1
12000 ret
12001 ENDPROC(aesni_enc)
12002
12003@@ -1974,6 +1983,7 @@ _aesni_enc1:
12004 AESENC KEY STATE
12005 movaps 0x70(TKEYP), KEY
12006 AESENCLAST KEY STATE
12007+ pax_force_retaddr_bts
12008 ret
12009 ENDPROC(_aesni_enc1)
12010
12011@@ -2083,6 +2093,7 @@ _aesni_enc4:
12012 AESENCLAST KEY STATE2
12013 AESENCLAST KEY STATE3
12014 AESENCLAST KEY STATE4
12015+ pax_force_retaddr_bts
12016 ret
12017 ENDPROC(_aesni_enc4)
12018
12019@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12020 popl KLEN
12021 popl KEYP
12022 #endif
12023+ pax_force_retaddr 0, 1
12024 ret
12025 ENDPROC(aesni_dec)
12026
12027@@ -2164,6 +2176,7 @@ _aesni_dec1:
12028 AESDEC KEY STATE
12029 movaps 0x70(TKEYP), KEY
12030 AESDECLAST KEY STATE
12031+ pax_force_retaddr_bts
12032 ret
12033 ENDPROC(_aesni_dec1)
12034
12035@@ -2273,6 +2286,7 @@ _aesni_dec4:
12036 AESDECLAST KEY STATE2
12037 AESDECLAST KEY STATE3
12038 AESDECLAST KEY STATE4
12039+ pax_force_retaddr_bts
12040 ret
12041 ENDPROC(_aesni_dec4)
12042
12043@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12044 popl KEYP
12045 popl LEN
12046 #endif
12047+ pax_force_retaddr 0, 1
12048 ret
12049 ENDPROC(aesni_ecb_enc)
12050
12051@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12052 popl KEYP
12053 popl LEN
12054 #endif
12055+ pax_force_retaddr 0, 1
12056 ret
12057 ENDPROC(aesni_ecb_dec)
12058
12059@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12060 popl LEN
12061 popl IVP
12062 #endif
12063+ pax_force_retaddr 0, 1
12064 ret
12065 ENDPROC(aesni_cbc_enc)
12066
12067@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12068 popl LEN
12069 popl IVP
12070 #endif
12071+ pax_force_retaddr 0, 1
12072 ret
12073 ENDPROC(aesni_cbc_dec)
12074
12075@@ -2550,6 +2568,7 @@ _aesni_inc_init:
12076 mov $1, TCTR_LOW
12077 MOVQ_R64_XMM TCTR_LOW INC
12078 MOVQ_R64_XMM CTR TCTR_LOW
12079+ pax_force_retaddr_bts
12080 ret
12081 ENDPROC(_aesni_inc_init)
12082
12083@@ -2579,6 +2598,7 @@ _aesni_inc:
12084 .Linc_low:
12085 movaps CTR, IV
12086 PSHUFB_XMM BSWAP_MASK IV
12087+ pax_force_retaddr_bts
12088 ret
12089 ENDPROC(_aesni_inc)
12090
12091@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12092 .Lctr_enc_ret:
12093 movups IV, (IVP)
12094 .Lctr_enc_just_ret:
12095+ pax_force_retaddr 0, 1
12096 ret
12097 ENDPROC(aesni_ctr_enc)
12098
12099@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12100 pxor INC, STATE4
12101 movdqu STATE4, 0x70(OUTP)
12102
12103+ pax_force_retaddr 0, 1
12104 ret
12105 ENDPROC(aesni_xts_crypt8)
12106
12107diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S
12108index 784452e..46982c7 100644
12109--- a/arch/x86/crypto/blowfish-avx2-asm_64.S
12110+++ b/arch/x86/crypto/blowfish-avx2-asm_64.S
12111@@ -221,6 +221,7 @@ __blowfish_enc_blk32:
12112
12113 write_block(RXl, RXr);
12114
12115+ pax_force_retaddr 0, 1
12116 ret;
12117 ENDPROC(__blowfish_enc_blk32)
12118
12119@@ -250,6 +251,7 @@ __blowfish_dec_blk32:
12120
12121 write_block(RXl, RXr);
12122
12123+ pax_force_retaddr 0, 1
12124 ret;
12125 ENDPROC(__blowfish_dec_blk32)
12126
12127@@ -284,6 +286,7 @@ ENTRY(blowfish_ecb_enc_32way)
12128
12129 vzeroupper;
12130
12131+ pax_force_retaddr 0, 1
12132 ret;
12133 ENDPROC(blowfish_ecb_enc_32way)
12134
12135@@ -318,6 +321,7 @@ ENTRY(blowfish_ecb_dec_32way)
12136
12137 vzeroupper;
12138
12139+ pax_force_retaddr 0, 1
12140 ret;
12141 ENDPROC(blowfish_ecb_dec_32way)
12142
12143@@ -365,6 +369,7 @@ ENTRY(blowfish_cbc_dec_32way)
12144
12145 vzeroupper;
12146
12147+ pax_force_retaddr 0, 1
12148 ret;
12149 ENDPROC(blowfish_cbc_dec_32way)
12150
12151@@ -445,5 +450,6 @@ ENTRY(blowfish_ctr_32way)
12152
12153 vzeroupper;
12154
12155+ pax_force_retaddr 0, 1
12156 ret;
12157 ENDPROC(blowfish_ctr_32way)
12158diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12159index 246c670..4d1ed00 100644
12160--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12161+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12162@@ -21,6 +21,7 @@
12163 */
12164
12165 #include <linux/linkage.h>
12166+#include <asm/alternative-asm.h>
12167
12168 .file "blowfish-x86_64-asm.S"
12169 .text
12170@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12171 jnz .L__enc_xor;
12172
12173 write_block();
12174+ pax_force_retaddr 0, 1
12175 ret;
12176 .L__enc_xor:
12177 xor_block();
12178+ pax_force_retaddr 0, 1
12179 ret;
12180 ENDPROC(__blowfish_enc_blk)
12181
12182@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12183
12184 movq %r11, %rbp;
12185
12186+ pax_force_retaddr 0, 1
12187 ret;
12188 ENDPROC(blowfish_dec_blk)
12189
12190@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12191
12192 popq %rbx;
12193 popq %rbp;
12194+ pax_force_retaddr 0, 1
12195 ret;
12196
12197 .L__enc_xor4:
12198@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12199
12200 popq %rbx;
12201 popq %rbp;
12202+ pax_force_retaddr 0, 1
12203 ret;
12204 ENDPROC(__blowfish_enc_blk_4way)
12205
12206@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12207 popq %rbx;
12208 popq %rbp;
12209
12210+ pax_force_retaddr 0, 1
12211 ret;
12212 ENDPROC(blowfish_dec_blk_4way)
12213diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12214index ce71f92..2dd5b1e 100644
12215--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12216+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12217@@ -16,6 +16,7 @@
12218 */
12219
12220 #include <linux/linkage.h>
12221+#include <asm/alternative-asm.h>
12222
12223 #define CAMELLIA_TABLE_BYTE_LEN 272
12224
12225@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12226 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12227 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12228 %rcx, (%r9));
12229+ pax_force_retaddr_bts
12230 ret;
12231 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12232
12233@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12234 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12235 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12236 %rax, (%r9));
12237+ pax_force_retaddr_bts
12238 ret;
12239 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12240
12241@@ -780,6 +783,7 @@ __camellia_enc_blk16:
12242 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12243 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12244
12245+ pax_force_retaddr_bts
12246 ret;
12247
12248 .align 8
12249@@ -865,6 +869,7 @@ __camellia_dec_blk16:
12250 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12251 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12252
12253+ pax_force_retaddr_bts
12254 ret;
12255
12256 .align 8
12257@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12258 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12259 %xmm8, %rsi);
12260
12261+ pax_force_retaddr 0, 1
12262 ret;
12263 ENDPROC(camellia_ecb_enc_16way)
12264
12265@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12266 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12267 %xmm8, %rsi);
12268
12269+ pax_force_retaddr 0, 1
12270 ret;
12271 ENDPROC(camellia_ecb_dec_16way)
12272
12273@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12274 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12275 %xmm8, %rsi);
12276
12277+ pax_force_retaddr 0, 1
12278 ret;
12279 ENDPROC(camellia_cbc_dec_16way)
12280
12281@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12282 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12283 %xmm8, %rsi);
12284
12285+ pax_force_retaddr 0, 1
12286 ret;
12287 ENDPROC(camellia_ctr_16way)
12288
12289@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12290 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12291 %xmm8, %rsi);
12292
12293+ pax_force_retaddr 0, 1
12294 ret;
12295 ENDPROC(camellia_xts_crypt_16way)
12296
12297diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12298index 91a1878..bcf340a 100644
12299--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12300+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12301@@ -11,6 +11,7 @@
12302 */
12303
12304 #include <linux/linkage.h>
12305+#include <asm/alternative-asm.h>
12306
12307 #define CAMELLIA_TABLE_BYTE_LEN 272
12308
12309@@ -212,6 +213,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12310 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12311 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12312 %rcx, (%r9));
12313+ pax_force_retaddr_bts
12314 ret;
12315 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12316
12317@@ -220,6 +222,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12318 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12319 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12320 %rax, (%r9));
12321+ pax_force_retaddr_bts
12322 ret;
12323 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12324
12325@@ -802,6 +805,7 @@ __camellia_enc_blk32:
12326 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12327 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12328
12329+ pax_force_retaddr_bts
12330 ret;
12331
12332 .align 8
12333@@ -887,6 +891,7 @@ __camellia_dec_blk32:
12334 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12335 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12336
12337+ pax_force_retaddr_bts
12338 ret;
12339
12340 .align 8
12341@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
12342
12343 vzeroupper;
12344
12345+ pax_force_retaddr 0, 1
12346 ret;
12347 ENDPROC(camellia_ecb_enc_32way)
12348
12349@@ -962,6 +968,7 @@ ENTRY(camellia_ecb_dec_32way)
12350
12351 vzeroupper;
12352
12353+ pax_force_retaddr 0, 1
12354 ret;
12355 ENDPROC(camellia_ecb_dec_32way)
12356
12357@@ -1028,6 +1035,7 @@ ENTRY(camellia_cbc_dec_32way)
12358
12359 vzeroupper;
12360
12361+ pax_force_retaddr 0, 1
12362 ret;
12363 ENDPROC(camellia_cbc_dec_32way)
12364
12365@@ -1166,6 +1174,7 @@ ENTRY(camellia_ctr_32way)
12366
12367 vzeroupper;
12368
12369+ pax_force_retaddr 0, 1
12370 ret;
12371 ENDPROC(camellia_ctr_32way)
12372
12373@@ -1331,6 +1340,7 @@ camellia_xts_crypt_32way:
12374
12375 vzeroupper;
12376
12377+ pax_force_retaddr 0, 1
12378 ret;
12379 ENDPROC(camellia_xts_crypt_32way)
12380
12381diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12382index 310319c..ce174a4 100644
12383--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12384+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12385@@ -21,6 +21,7 @@
12386 */
12387
12388 #include <linux/linkage.h>
12389+#include <asm/alternative-asm.h>
12390
12391 .file "camellia-x86_64-asm_64.S"
12392 .text
12393@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12394 enc_outunpack(mov, RT1);
12395
12396 movq RRBP, %rbp;
12397+ pax_force_retaddr 0, 1
12398 ret;
12399
12400 .L__enc_xor:
12401 enc_outunpack(xor, RT1);
12402
12403 movq RRBP, %rbp;
12404+ pax_force_retaddr 0, 1
12405 ret;
12406 ENDPROC(__camellia_enc_blk)
12407
12408@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12409 dec_outunpack();
12410
12411 movq RRBP, %rbp;
12412+ pax_force_retaddr 0, 1
12413 ret;
12414 ENDPROC(camellia_dec_blk)
12415
12416@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12417
12418 movq RRBP, %rbp;
12419 popq %rbx;
12420+ pax_force_retaddr 0, 1
12421 ret;
12422
12423 .L__enc2_xor:
12424@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12425
12426 movq RRBP, %rbp;
12427 popq %rbx;
12428+ pax_force_retaddr 0, 1
12429 ret;
12430 ENDPROC(__camellia_enc_blk_2way)
12431
12432@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12433
12434 movq RRBP, %rbp;
12435 movq RXOR, %rbx;
12436+ pax_force_retaddr 0, 1
12437 ret;
12438 ENDPROC(camellia_dec_blk_2way)
12439diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12440index c35fd5d..c1ee236 100644
12441--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12442+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12443@@ -24,6 +24,7 @@
12444 */
12445
12446 #include <linux/linkage.h>
12447+#include <asm/alternative-asm.h>
12448
12449 .file "cast5-avx-x86_64-asm_64.S"
12450
12451@@ -281,6 +282,7 @@ __cast5_enc_blk16:
12452 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12453 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12454
12455+ pax_force_retaddr 0, 1
12456 ret;
12457 ENDPROC(__cast5_enc_blk16)
12458
12459@@ -352,6 +354,7 @@ __cast5_dec_blk16:
12460 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12461 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12462
12463+ pax_force_retaddr 0, 1
12464 ret;
12465
12466 .L__skip_dec:
12467@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12468 vmovdqu RR4, (6*4*4)(%r11);
12469 vmovdqu RL4, (7*4*4)(%r11);
12470
12471+ pax_force_retaddr
12472 ret;
12473 ENDPROC(cast5_ecb_enc_16way)
12474
12475@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12476 vmovdqu RR4, (6*4*4)(%r11);
12477 vmovdqu RL4, (7*4*4)(%r11);
12478
12479+ pax_force_retaddr
12480 ret;
12481 ENDPROC(cast5_ecb_dec_16way)
12482
12483@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
12484
12485 popq %r12;
12486
12487+ pax_force_retaddr
12488 ret;
12489 ENDPROC(cast5_cbc_dec_16way)
12490
12491@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
12492
12493 popq %r12;
12494
12495+ pax_force_retaddr
12496 ret;
12497 ENDPROC(cast5_ctr_16way)
12498diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12499index e3531f8..18ded3a 100644
12500--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12501+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12502@@ -24,6 +24,7 @@
12503 */
12504
12505 #include <linux/linkage.h>
12506+#include <asm/alternative-asm.h>
12507 #include "glue_helper-asm-avx.S"
12508
12509 .file "cast6-avx-x86_64-asm_64.S"
12510@@ -295,6 +296,7 @@ __cast6_enc_blk8:
12511 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12512 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12513
12514+ pax_force_retaddr 0, 1
12515 ret;
12516 ENDPROC(__cast6_enc_blk8)
12517
12518@@ -340,6 +342,7 @@ __cast6_dec_blk8:
12519 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12520 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12521
12522+ pax_force_retaddr 0, 1
12523 ret;
12524 ENDPROC(__cast6_dec_blk8)
12525
12526@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
12527
12528 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12529
12530+ pax_force_retaddr
12531 ret;
12532 ENDPROC(cast6_ecb_enc_8way)
12533
12534@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
12535
12536 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12537
12538+ pax_force_retaddr
12539 ret;
12540 ENDPROC(cast6_ecb_dec_8way)
12541
12542@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
12543
12544 popq %r12;
12545
12546+ pax_force_retaddr
12547 ret;
12548 ENDPROC(cast6_cbc_dec_8way)
12549
12550@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
12551
12552 popq %r12;
12553
12554+ pax_force_retaddr
12555 ret;
12556 ENDPROC(cast6_ctr_8way)
12557
12558@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
12559 /* dst <= regs xor IVs(in dst) */
12560 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12561
12562+ pax_force_retaddr
12563 ret;
12564 ENDPROC(cast6_xts_enc_8way)
12565
12566@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
12567 /* dst <= regs xor IVs(in dst) */
12568 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12569
12570+ pax_force_retaddr
12571 ret;
12572 ENDPROC(cast6_xts_dec_8way)
12573diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12574index dbc4339..3d868c5 100644
12575--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12576+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12577@@ -45,6 +45,7 @@
12578
12579 #include <asm/inst.h>
12580 #include <linux/linkage.h>
12581+#include <asm/alternative-asm.h>
12582
12583 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
12584
12585@@ -312,6 +313,7 @@ do_return:
12586 popq %rsi
12587 popq %rdi
12588 popq %rbx
12589+ pax_force_retaddr 0, 1
12590 ret
12591
12592 ################################################################
12593diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
12594index 586f41a..d02851e 100644
12595--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
12596+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
12597@@ -18,6 +18,7 @@
12598
12599 #include <linux/linkage.h>
12600 #include <asm/inst.h>
12601+#include <asm/alternative-asm.h>
12602
12603 .data
12604
12605@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
12606 psrlq $1, T2
12607 pxor T2, T1
12608 pxor T1, DATA
12609+ pax_force_retaddr
12610 ret
12611 ENDPROC(__clmul_gf128mul_ble)
12612
12613@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
12614 call __clmul_gf128mul_ble
12615 PSHUFB_XMM BSWAP DATA
12616 movups DATA, (%rdi)
12617+ pax_force_retaddr
12618 ret
12619 ENDPROC(clmul_ghash_mul)
12620
12621@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
12622 PSHUFB_XMM BSWAP DATA
12623 movups DATA, (%rdi)
12624 .Lupdate_just_ret:
12625+ pax_force_retaddr
12626 ret
12627 ENDPROC(clmul_ghash_update)
12628
12629@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
12630 pand .Lpoly, %xmm1
12631 pxor %xmm1, %xmm0
12632 movups %xmm0, (%rdi)
12633+ pax_force_retaddr
12634 ret
12635 ENDPROC(clmul_ghash_setkey)
12636diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12637index 9279e0b..9270820 100644
12638--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
12639+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12640@@ -1,4 +1,5 @@
12641 #include <linux/linkage.h>
12642+#include <asm/alternative-asm.h>
12643
12644 # enter salsa20_encrypt_bytes
12645 ENTRY(salsa20_encrypt_bytes)
12646@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
12647 add %r11,%rsp
12648 mov %rdi,%rax
12649 mov %rsi,%rdx
12650+ pax_force_retaddr 0, 1
12651 ret
12652 # bytesatleast65:
12653 ._bytesatleast65:
12654@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
12655 add %r11,%rsp
12656 mov %rdi,%rax
12657 mov %rsi,%rdx
12658+ pax_force_retaddr
12659 ret
12660 ENDPROC(salsa20_keysetup)
12661
12662@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
12663 add %r11,%rsp
12664 mov %rdi,%rax
12665 mov %rsi,%rdx
12666+ pax_force_retaddr
12667 ret
12668 ENDPROC(salsa20_ivsetup)
12669diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12670index 2f202f4..d9164d6 100644
12671--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12672+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12673@@ -24,6 +24,7 @@
12674 */
12675
12676 #include <linux/linkage.h>
12677+#include <asm/alternative-asm.h>
12678 #include "glue_helper-asm-avx.S"
12679
12680 .file "serpent-avx-x86_64-asm_64.S"
12681@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
12682 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12683 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12684
12685+ pax_force_retaddr
12686 ret;
12687 ENDPROC(__serpent_enc_blk8_avx)
12688
12689@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
12690 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12691 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12692
12693+ pax_force_retaddr
12694 ret;
12695 ENDPROC(__serpent_dec_blk8_avx)
12696
12697@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
12698
12699 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12700
12701+ pax_force_retaddr
12702 ret;
12703 ENDPROC(serpent_ecb_enc_8way_avx)
12704
12705@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
12706
12707 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12708
12709+ pax_force_retaddr
12710 ret;
12711 ENDPROC(serpent_ecb_dec_8way_avx)
12712
12713@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
12714
12715 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12716
12717+ pax_force_retaddr
12718 ret;
12719 ENDPROC(serpent_cbc_dec_8way_avx)
12720
12721@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
12722
12723 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12724
12725+ pax_force_retaddr
12726 ret;
12727 ENDPROC(serpent_ctr_8way_avx)
12728
12729@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
12730 /* dst <= regs xor IVs(in dst) */
12731 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12732
12733+ pax_force_retaddr
12734 ret;
12735 ENDPROC(serpent_xts_enc_8way_avx)
12736
12737@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
12738 /* dst <= regs xor IVs(in dst) */
12739 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12740
12741+ pax_force_retaddr
12742 ret;
12743 ENDPROC(serpent_xts_dec_8way_avx)
12744diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
12745index b222085..abd483c 100644
12746--- a/arch/x86/crypto/serpent-avx2-asm_64.S
12747+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
12748@@ -15,6 +15,7 @@
12749 */
12750
12751 #include <linux/linkage.h>
12752+#include <asm/alternative-asm.h>
12753 #include "glue_helper-asm-avx2.S"
12754
12755 .file "serpent-avx2-asm_64.S"
12756@@ -610,6 +611,7 @@ __serpent_enc_blk16:
12757 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12758 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12759
12760+ pax_force_retaddr
12761 ret;
12762 ENDPROC(__serpent_enc_blk16)
12763
12764@@ -664,6 +666,7 @@ __serpent_dec_blk16:
12765 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12766 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12767
12768+ pax_force_retaddr
12769 ret;
12770 ENDPROC(__serpent_dec_blk16)
12771
12772@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
12773
12774 vzeroupper;
12775
12776+ pax_force_retaddr
12777 ret;
12778 ENDPROC(serpent_ecb_enc_16way)
12779
12780@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
12781
12782 vzeroupper;
12783
12784+ pax_force_retaddr
12785 ret;
12786 ENDPROC(serpent_ecb_dec_16way)
12787
12788@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
12789
12790 vzeroupper;
12791
12792+ pax_force_retaddr
12793 ret;
12794 ENDPROC(serpent_cbc_dec_16way)
12795
12796@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
12797
12798 vzeroupper;
12799
12800+ pax_force_retaddr
12801 ret;
12802 ENDPROC(serpent_ctr_16way)
12803
12804@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
12805
12806 vzeroupper;
12807
12808+ pax_force_retaddr
12809 ret;
12810 ENDPROC(serpent_xts_enc_16way)
12811
12812@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
12813
12814 vzeroupper;
12815
12816+ pax_force_retaddr
12817 ret;
12818 ENDPROC(serpent_xts_dec_16way)
12819diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12820index acc066c..1559cc4 100644
12821--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12822+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12823@@ -25,6 +25,7 @@
12824 */
12825
12826 #include <linux/linkage.h>
12827+#include <asm/alternative-asm.h>
12828
12829 .file "serpent-sse2-x86_64-asm_64.S"
12830 .text
12831@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
12832 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12833 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12834
12835+ pax_force_retaddr
12836 ret;
12837
12838 .L__enc_xor8:
12839 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12840 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12841
12842+ pax_force_retaddr
12843 ret;
12844 ENDPROC(__serpent_enc_blk_8way)
12845
12846@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
12847 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12848 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12849
12850+ pax_force_retaddr
12851 ret;
12852 ENDPROC(serpent_dec_blk_8way)
12853diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
12854index a410950..3356d42 100644
12855--- a/arch/x86/crypto/sha1_ssse3_asm.S
12856+++ b/arch/x86/crypto/sha1_ssse3_asm.S
12857@@ -29,6 +29,7 @@
12858 */
12859
12860 #include <linux/linkage.h>
12861+#include <asm/alternative-asm.h>
12862
12863 #define CTX %rdi // arg1
12864 #define BUF %rsi // arg2
12865@@ -104,6 +105,7 @@
12866 pop %r12
12867 pop %rbp
12868 pop %rbx
12869+ pax_force_retaddr 0, 1
12870 ret
12871
12872 ENDPROC(\name)
12873diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
12874index 642f156..4ab07b9 100644
12875--- a/arch/x86/crypto/sha256-avx-asm.S
12876+++ b/arch/x86/crypto/sha256-avx-asm.S
12877@@ -49,6 +49,7 @@
12878
12879 #ifdef CONFIG_AS_AVX
12880 #include <linux/linkage.h>
12881+#include <asm/alternative-asm.h>
12882
12883 ## assume buffers not aligned
12884 #define VMOVDQ vmovdqu
12885@@ -460,6 +461,7 @@ done_hash:
12886 popq %r13
12887 popq %rbp
12888 popq %rbx
12889+ pax_force_retaddr 0, 1
12890 ret
12891 ENDPROC(sha256_transform_avx)
12892
12893diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
12894index 9e86944..2e7f95a 100644
12895--- a/arch/x86/crypto/sha256-avx2-asm.S
12896+++ b/arch/x86/crypto/sha256-avx2-asm.S
12897@@ -50,6 +50,7 @@
12898
12899 #ifdef CONFIG_AS_AVX2
12900 #include <linux/linkage.h>
12901+#include <asm/alternative-asm.h>
12902
12903 ## assume buffers not aligned
12904 #define VMOVDQ vmovdqu
12905@@ -720,6 +721,7 @@ done_hash:
12906 popq %r12
12907 popq %rbp
12908 popq %rbx
12909+ pax_force_retaddr 0, 1
12910 ret
12911 ENDPROC(sha256_transform_rorx)
12912
12913diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
12914index f833b74..c36ed14 100644
12915--- a/arch/x86/crypto/sha256-ssse3-asm.S
12916+++ b/arch/x86/crypto/sha256-ssse3-asm.S
12917@@ -47,6 +47,7 @@
12918 ########################################################################
12919
12920 #include <linux/linkage.h>
12921+#include <asm/alternative-asm.h>
12922
12923 ## assume buffers not aligned
12924 #define MOVDQ movdqu
12925@@ -471,6 +472,7 @@ done_hash:
12926 popq %rbp
12927 popq %rbx
12928
12929+ pax_force_retaddr 0, 1
12930 ret
12931 ENDPROC(sha256_transform_ssse3)
12932
12933diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
12934index 974dde9..4533d34 100644
12935--- a/arch/x86/crypto/sha512-avx-asm.S
12936+++ b/arch/x86/crypto/sha512-avx-asm.S
12937@@ -49,6 +49,7 @@
12938
12939 #ifdef CONFIG_AS_AVX
12940 #include <linux/linkage.h>
12941+#include <asm/alternative-asm.h>
12942
12943 .text
12944
12945@@ -364,6 +365,7 @@ updateblock:
12946 mov frame_RSPSAVE(%rsp), %rsp
12947
12948 nowork:
12949+ pax_force_retaddr 0, 1
12950 ret
12951 ENDPROC(sha512_transform_avx)
12952
12953diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
12954index 568b961..061ef1d 100644
12955--- a/arch/x86/crypto/sha512-avx2-asm.S
12956+++ b/arch/x86/crypto/sha512-avx2-asm.S
12957@@ -51,6 +51,7 @@
12958
12959 #ifdef CONFIG_AS_AVX2
12960 #include <linux/linkage.h>
12961+#include <asm/alternative-asm.h>
12962
12963 .text
12964
12965@@ -678,6 +679,7 @@ done_hash:
12966
12967 # Restore Stack Pointer
12968 mov frame_RSPSAVE(%rsp), %rsp
12969+ pax_force_retaddr 0, 1
12970 ret
12971 ENDPROC(sha512_transform_rorx)
12972
12973diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
12974index fb56855..e23914f 100644
12975--- a/arch/x86/crypto/sha512-ssse3-asm.S
12976+++ b/arch/x86/crypto/sha512-ssse3-asm.S
12977@@ -48,6 +48,7 @@
12978 ########################################################################
12979
12980 #include <linux/linkage.h>
12981+#include <asm/alternative-asm.h>
12982
12983 .text
12984
12985@@ -363,6 +364,7 @@ updateblock:
12986 mov frame_RSPSAVE(%rsp), %rsp
12987
12988 nowork:
12989+ pax_force_retaddr 0, 1
12990 ret
12991 ENDPROC(sha512_transform_ssse3)
12992
12993diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12994index 0505813..63b1d00 100644
12995--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12996+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12997@@ -24,6 +24,7 @@
12998 */
12999
13000 #include <linux/linkage.h>
13001+#include <asm/alternative-asm.h>
13002 #include "glue_helper-asm-avx.S"
13003
13004 .file "twofish-avx-x86_64-asm_64.S"
13005@@ -284,6 +285,7 @@ __twofish_enc_blk8:
13006 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
13007 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
13008
13009+ pax_force_retaddr 0, 1
13010 ret;
13011 ENDPROC(__twofish_enc_blk8)
13012
13013@@ -324,6 +326,7 @@ __twofish_dec_blk8:
13014 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13015 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13016
13017+ pax_force_retaddr 0, 1
13018 ret;
13019 ENDPROC(__twofish_dec_blk8)
13020
13021@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13022
13023 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13024
13025+ pax_force_retaddr 0, 1
13026 ret;
13027 ENDPROC(twofish_ecb_enc_8way)
13028
13029@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13030
13031 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13032
13033+ pax_force_retaddr 0, 1
13034 ret;
13035 ENDPROC(twofish_ecb_dec_8way)
13036
13037@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
13038
13039 popq %r12;
13040
13041+ pax_force_retaddr 0, 1
13042 ret;
13043 ENDPROC(twofish_cbc_dec_8way)
13044
13045@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
13046
13047 popq %r12;
13048
13049+ pax_force_retaddr 0, 1
13050 ret;
13051 ENDPROC(twofish_ctr_8way)
13052
13053@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13054 /* dst <= regs xor IVs(in dst) */
13055 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13056
13057+ pax_force_retaddr 0, 1
13058 ret;
13059 ENDPROC(twofish_xts_enc_8way)
13060
13061@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13062 /* dst <= regs xor IVs(in dst) */
13063 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13064
13065+ pax_force_retaddr 0, 1
13066 ret;
13067 ENDPROC(twofish_xts_dec_8way)
13068diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S
13069index e1a83b9..33006b9 100644
13070--- a/arch/x86/crypto/twofish-avx2-asm_64.S
13071+++ b/arch/x86/crypto/twofish-avx2-asm_64.S
13072@@ -11,6 +11,7 @@
13073 */
13074
13075 #include <linux/linkage.h>
13076+#include <asm/alternative-asm.h>
13077 #include "glue_helper-asm-avx2.S"
13078
13079 .file "twofish-avx2-asm_64.S"
13080@@ -422,6 +423,7 @@ __twofish_enc_blk16:
13081 outunpack_enc16(RA, RB, RC, RD);
13082 write_blocks16(RA, RB, RC, RD);
13083
13084+ pax_force_retaddr_bts
13085 ret;
13086 ENDPROC(__twofish_enc_blk16)
13087
13088@@ -454,6 +456,7 @@ __twofish_dec_blk16:
13089 outunpack_dec16(RA, RB, RC, RD);
13090 write_blocks16(RA, RB, RC, RD);
13091
13092+ pax_force_retaddr_bts
13093 ret;
13094 ENDPROC(__twofish_dec_blk16)
13095
13096@@ -476,6 +479,7 @@ ENTRY(twofish_ecb_enc_16way)
13097 popq %r12;
13098 vzeroupper;
13099
13100+ pax_force_retaddr 0, 1
13101 ret;
13102 ENDPROC(twofish_ecb_enc_16way)
13103
13104@@ -498,6 +502,7 @@ ENTRY(twofish_ecb_dec_16way)
13105 popq %r12;
13106 vzeroupper;
13107
13108+ pax_force_retaddr 0, 1
13109 ret;
13110 ENDPROC(twofish_ecb_dec_16way)
13111
13112@@ -521,6 +526,7 @@ ENTRY(twofish_cbc_dec_16way)
13113 popq %r12;
13114 vzeroupper;
13115
13116+ pax_force_retaddr 0, 1
13117 ret;
13118 ENDPROC(twofish_cbc_dec_16way)
13119
13120@@ -546,6 +552,7 @@ ENTRY(twofish_ctr_16way)
13121 popq %r12;
13122 vzeroupper;
13123
13124+ pax_force_retaddr 0, 1
13125 ret;
13126 ENDPROC(twofish_ctr_16way)
13127
13128@@ -574,6 +581,7 @@ twofish_xts_crypt_16way:
13129 popq %r12;
13130 vzeroupper;
13131
13132+ pax_force_retaddr 0, 1
13133 ret;
13134 ENDPROC(twofish_xts_crypt_16way)
13135
13136diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13137index 1c3b7ce..b365c5e 100644
13138--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13139+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13140@@ -21,6 +21,7 @@
13141 */
13142
13143 #include <linux/linkage.h>
13144+#include <asm/alternative-asm.h>
13145
13146 .file "twofish-x86_64-asm-3way.S"
13147 .text
13148@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13149 popq %r13;
13150 popq %r14;
13151 popq %r15;
13152+ pax_force_retaddr 0, 1
13153 ret;
13154
13155 .L__enc_xor3:
13156@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13157 popq %r13;
13158 popq %r14;
13159 popq %r15;
13160+ pax_force_retaddr 0, 1
13161 ret;
13162 ENDPROC(__twofish_enc_blk_3way)
13163
13164@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13165 popq %r13;
13166 popq %r14;
13167 popq %r15;
13168+ pax_force_retaddr 0, 1
13169 ret;
13170 ENDPROC(twofish_dec_blk_3way)
13171diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13172index a039d21..29e7615 100644
13173--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13174+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13175@@ -22,6 +22,7 @@
13176
13177 #include <linux/linkage.h>
13178 #include <asm/asm-offsets.h>
13179+#include <asm/alternative-asm.h>
13180
13181 #define a_offset 0
13182 #define b_offset 4
13183@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13184
13185 popq R1
13186 movq $1,%rax
13187+ pax_force_retaddr 0, 1
13188 ret
13189 ENDPROC(twofish_enc_blk)
13190
13191@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13192
13193 popq R1
13194 movq $1,%rax
13195+ pax_force_retaddr 0, 1
13196 ret
13197 ENDPROC(twofish_dec_blk)
13198diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13199index 52ff81c..98af645 100644
13200--- a/arch/x86/ia32/ia32_aout.c
13201+++ b/arch/x86/ia32/ia32_aout.c
13202@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
13203 unsigned long dump_start, dump_size;
13204 struct user32 dump;
13205
13206+ memset(&dump, 0, sizeof(dump));
13207+
13208 fs = get_fs();
13209 set_fs(KERNEL_DS);
13210 has_dumped = 1;
13211diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13212index cf1a471..5ba2673 100644
13213--- a/arch/x86/ia32/ia32_signal.c
13214+++ b/arch/x86/ia32/ia32_signal.c
13215@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13216 sp -= frame_size;
13217 /* Align the stack pointer according to the i386 ABI,
13218 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13219- sp = ((sp + 4) & -16ul) - 4;
13220+ sp = ((sp - 12) & -16ul) - 4;
13221 return (void __user *) sp;
13222 }
13223
13224@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13225 * These are actually not used anymore, but left because some
13226 * gdb versions depend on them as a marker.
13227 */
13228- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13229+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13230 } put_user_catch(err);
13231
13232 if (err)
13233@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13234 0xb8,
13235 __NR_ia32_rt_sigreturn,
13236 0x80cd,
13237- 0,
13238+ 0
13239 };
13240
13241 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13242@@ -459,20 +459,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13243 else
13244 put_user_ex(0, &frame->uc.uc_flags);
13245 put_user_ex(0, &frame->uc.uc_link);
13246- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
13247+ __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
13248
13249 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13250 restorer = ksig->ka.sa.sa_restorer;
13251+ else if (current->mm->context.vdso)
13252+ /* Return stub is in 32bit vsyscall page */
13253+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13254 else
13255- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13256- rt_sigreturn);
13257+ restorer = &frame->retcode;
13258 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13259
13260 /*
13261 * Not actually used anymore, but left because some gdb
13262 * versions need it.
13263 */
13264- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13265+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13266 } put_user_catch(err);
13267
13268 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13269diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13270index 474dc1b..9297c58 100644
13271--- a/arch/x86/ia32/ia32entry.S
13272+++ b/arch/x86/ia32/ia32entry.S
13273@@ -15,8 +15,10 @@
13274 #include <asm/irqflags.h>
13275 #include <asm/asm.h>
13276 #include <asm/smap.h>
13277+#include <asm/pgtable.h>
13278 #include <linux/linkage.h>
13279 #include <linux/err.h>
13280+#include <asm/alternative-asm.h>
13281
13282 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13283 #include <linux/elf-em.h>
13284@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13285 ENDPROC(native_irq_enable_sysexit)
13286 #endif
13287
13288+ .macro pax_enter_kernel_user
13289+ pax_set_fptr_mask
13290+#ifdef CONFIG_PAX_MEMORY_UDEREF
13291+ call pax_enter_kernel_user
13292+#endif
13293+ .endm
13294+
13295+ .macro pax_exit_kernel_user
13296+#ifdef CONFIG_PAX_MEMORY_UDEREF
13297+ call pax_exit_kernel_user
13298+#endif
13299+#ifdef CONFIG_PAX_RANDKSTACK
13300+ pushq %rax
13301+ pushq %r11
13302+ call pax_randomize_kstack
13303+ popq %r11
13304+ popq %rax
13305+#endif
13306+ .endm
13307+
13308+ .macro pax_erase_kstack
13309+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13310+ call pax_erase_kstack
13311+#endif
13312+ .endm
13313+
13314 /*
13315 * 32bit SYSENTER instruction entry.
13316 *
13317@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13318 CFI_REGISTER rsp,rbp
13319 SWAPGS_UNSAFE_STACK
13320 movq PER_CPU_VAR(kernel_stack), %rsp
13321- addq $(KERNEL_STACK_OFFSET),%rsp
13322- /*
13323- * No need to follow this irqs on/off section: the syscall
13324- * disabled irqs, here we enable it straight after entry:
13325- */
13326- ENABLE_INTERRUPTS(CLBR_NONE)
13327 movl %ebp,%ebp /* zero extension */
13328 pushq_cfi $__USER32_DS
13329 /*CFI_REL_OFFSET ss,0*/
13330@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13331 CFI_REL_OFFSET rsp,0
13332 pushfq_cfi
13333 /*CFI_REL_OFFSET rflags,0*/
13334- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13335- CFI_REGISTER rip,r10
13336+ orl $X86_EFLAGS_IF,(%rsp)
13337+ GET_THREAD_INFO(%r11)
13338+ movl TI_sysenter_return(%r11), %r11d
13339+ CFI_REGISTER rip,r11
13340 pushq_cfi $__USER32_CS
13341 /*CFI_REL_OFFSET cs,0*/
13342 movl %eax, %eax
13343- pushq_cfi %r10
13344+ pushq_cfi %r11
13345 CFI_REL_OFFSET rip,0
13346 pushq_cfi %rax
13347 cld
13348 SAVE_ARGS 0,1,0
13349+ pax_enter_kernel_user
13350+
13351+#ifdef CONFIG_PAX_RANDKSTACK
13352+ pax_erase_kstack
13353+#endif
13354+
13355+ /*
13356+ * No need to follow this irqs on/off section: the syscall
13357+ * disabled irqs, here we enable it straight after entry:
13358+ */
13359+ ENABLE_INTERRUPTS(CLBR_NONE)
13360 /* no need to do an access_ok check here because rbp has been
13361 32bit zero extended */
13362+
13363+#ifdef CONFIG_PAX_MEMORY_UDEREF
13364+ addq pax_user_shadow_base,%rbp
13365+ ASM_PAX_OPEN_USERLAND
13366+#endif
13367+
13368 ASM_STAC
13369 1: movl (%rbp),%ebp
13370 _ASM_EXTABLE(1b,ia32_badarg)
13371 ASM_CLAC
13372- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13373- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13374+
13375+#ifdef CONFIG_PAX_MEMORY_UDEREF
13376+ ASM_PAX_CLOSE_USERLAND
13377+#endif
13378+
13379+ GET_THREAD_INFO(%r11)
13380+ orl $TS_COMPAT,TI_status(%r11)
13381+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13382 CFI_REMEMBER_STATE
13383 jnz sysenter_tracesys
13384 cmpq $(IA32_NR_syscalls-1),%rax
13385@@ -162,12 +209,15 @@ sysenter_do_call:
13386 sysenter_dispatch:
13387 call *ia32_sys_call_table(,%rax,8)
13388 movq %rax,RAX-ARGOFFSET(%rsp)
13389+ GET_THREAD_INFO(%r11)
13390 DISABLE_INTERRUPTS(CLBR_NONE)
13391 TRACE_IRQS_OFF
13392- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13393+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13394 jnz sysexit_audit
13395 sysexit_from_sys_call:
13396- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13397+ pax_exit_kernel_user
13398+ pax_erase_kstack
13399+ andl $~TS_COMPAT,TI_status(%r11)
13400 /* clear IF, that popfq doesn't enable interrupts early */
13401 andl $~0x200,EFLAGS-R11(%rsp)
13402 movl RIP-R11(%rsp),%edx /* User %eip */
13403@@ -193,6 +243,9 @@ sysexit_from_sys_call:
13404 movl %eax,%esi /* 2nd arg: syscall number */
13405 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
13406 call __audit_syscall_entry
13407+
13408+ pax_erase_kstack
13409+
13410 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
13411 cmpq $(IA32_NR_syscalls-1),%rax
13412 ja ia32_badsys
13413@@ -204,7 +257,7 @@ sysexit_from_sys_call:
13414 .endm
13415
13416 .macro auditsys_exit exit
13417- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13418+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13419 jnz ia32_ret_from_sys_call
13420 TRACE_IRQS_ON
13421 ENABLE_INTERRUPTS(CLBR_NONE)
13422@@ -215,11 +268,12 @@ sysexit_from_sys_call:
13423 1: setbe %al /* 1 if error, 0 if not */
13424 movzbl %al,%edi /* zero-extend that into %edi */
13425 call __audit_syscall_exit
13426+ GET_THREAD_INFO(%r11)
13427 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
13428 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
13429 DISABLE_INTERRUPTS(CLBR_NONE)
13430 TRACE_IRQS_OFF
13431- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13432+ testl %edi,TI_flags(%r11)
13433 jz \exit
13434 CLEAR_RREGS -ARGOFFSET
13435 jmp int_with_check
13436@@ -237,7 +291,7 @@ sysexit_audit:
13437
13438 sysenter_tracesys:
13439 #ifdef CONFIG_AUDITSYSCALL
13440- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13441+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13442 jz sysenter_auditsys
13443 #endif
13444 SAVE_REST
13445@@ -249,6 +303,9 @@ sysenter_tracesys:
13446 RESTORE_REST
13447 cmpq $(IA32_NR_syscalls-1),%rax
13448 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
13449+
13450+ pax_erase_kstack
13451+
13452 jmp sysenter_do_call
13453 CFI_ENDPROC
13454 ENDPROC(ia32_sysenter_target)
13455@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
13456 ENTRY(ia32_cstar_target)
13457 CFI_STARTPROC32 simple
13458 CFI_SIGNAL_FRAME
13459- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13460+ CFI_DEF_CFA rsp,0
13461 CFI_REGISTER rip,rcx
13462 /*CFI_REGISTER rflags,r11*/
13463 SWAPGS_UNSAFE_STACK
13464 movl %esp,%r8d
13465 CFI_REGISTER rsp,r8
13466 movq PER_CPU_VAR(kernel_stack),%rsp
13467+ SAVE_ARGS 8*6,0,0
13468+ pax_enter_kernel_user
13469+
13470+#ifdef CONFIG_PAX_RANDKSTACK
13471+ pax_erase_kstack
13472+#endif
13473+
13474 /*
13475 * No need to follow this irqs on/off section: the syscall
13476 * disabled irqs and here we enable it straight after entry:
13477 */
13478 ENABLE_INTERRUPTS(CLBR_NONE)
13479- SAVE_ARGS 8,0,0
13480 movl %eax,%eax /* zero extension */
13481 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13482 movq %rcx,RIP-ARGOFFSET(%rsp)
13483@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
13484 /* no need to do an access_ok check here because r8 has been
13485 32bit zero extended */
13486 /* hardware stack frame is complete now */
13487+
13488+#ifdef CONFIG_PAX_MEMORY_UDEREF
13489+ ASM_PAX_OPEN_USERLAND
13490+ movq pax_user_shadow_base,%r8
13491+ addq RSP-ARGOFFSET(%rsp),%r8
13492+#endif
13493+
13494 ASM_STAC
13495 1: movl (%r8),%r9d
13496 _ASM_EXTABLE(1b,ia32_badarg)
13497 ASM_CLAC
13498- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13499- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13500+
13501+#ifdef CONFIG_PAX_MEMORY_UDEREF
13502+ ASM_PAX_CLOSE_USERLAND
13503+#endif
13504+
13505+ GET_THREAD_INFO(%r11)
13506+ orl $TS_COMPAT,TI_status(%r11)
13507+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13508 CFI_REMEMBER_STATE
13509 jnz cstar_tracesys
13510 cmpq $IA32_NR_syscalls-1,%rax
13511@@ -319,12 +395,15 @@ cstar_do_call:
13512 cstar_dispatch:
13513 call *ia32_sys_call_table(,%rax,8)
13514 movq %rax,RAX-ARGOFFSET(%rsp)
13515+ GET_THREAD_INFO(%r11)
13516 DISABLE_INTERRUPTS(CLBR_NONE)
13517 TRACE_IRQS_OFF
13518- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13519+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13520 jnz sysretl_audit
13521 sysretl_from_sys_call:
13522- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13523+ pax_exit_kernel_user
13524+ pax_erase_kstack
13525+ andl $~TS_COMPAT,TI_status(%r11)
13526 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
13527 movl RIP-ARGOFFSET(%rsp),%ecx
13528 CFI_REGISTER rip,rcx
13529@@ -352,7 +431,7 @@ sysretl_audit:
13530
13531 cstar_tracesys:
13532 #ifdef CONFIG_AUDITSYSCALL
13533- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13534+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13535 jz cstar_auditsys
13536 #endif
13537 xchgl %r9d,%ebp
13538@@ -366,11 +445,19 @@ cstar_tracesys:
13539 xchgl %ebp,%r9d
13540 cmpq $(IA32_NR_syscalls-1),%rax
13541 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
13542+
13543+ pax_erase_kstack
13544+
13545 jmp cstar_do_call
13546 END(ia32_cstar_target)
13547
13548 ia32_badarg:
13549 ASM_CLAC
13550+
13551+#ifdef CONFIG_PAX_MEMORY_UDEREF
13552+ ASM_PAX_CLOSE_USERLAND
13553+#endif
13554+
13555 movq $-EFAULT,%rax
13556 jmp ia32_sysret
13557 CFI_ENDPROC
13558@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
13559 CFI_REL_OFFSET rip,RIP-RIP
13560 PARAVIRT_ADJUST_EXCEPTION_FRAME
13561 SWAPGS
13562- /*
13563- * No need to follow this irqs on/off section: the syscall
13564- * disabled irqs and here we enable it straight after entry:
13565- */
13566- ENABLE_INTERRUPTS(CLBR_NONE)
13567 movl %eax,%eax
13568 pushq_cfi %rax
13569 cld
13570 /* note the registers are not zero extended to the sf.
13571 this could be a problem. */
13572 SAVE_ARGS 0,1,0
13573- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13574- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13575+ pax_enter_kernel_user
13576+
13577+#ifdef CONFIG_PAX_RANDKSTACK
13578+ pax_erase_kstack
13579+#endif
13580+
13581+ /*
13582+ * No need to follow this irqs on/off section: the syscall
13583+ * disabled irqs and here we enable it straight after entry:
13584+ */
13585+ ENABLE_INTERRUPTS(CLBR_NONE)
13586+ GET_THREAD_INFO(%r11)
13587+ orl $TS_COMPAT,TI_status(%r11)
13588+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13589 jnz ia32_tracesys
13590 cmpq $(IA32_NR_syscalls-1),%rax
13591 ja ia32_badsys
13592@@ -442,6 +536,9 @@ ia32_tracesys:
13593 RESTORE_REST
13594 cmpq $(IA32_NR_syscalls-1),%rax
13595 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
13596+
13597+ pax_erase_kstack
13598+
13599 jmp ia32_do_call
13600 END(ia32_syscall)
13601
13602diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
13603index 8e0ceec..af13504 100644
13604--- a/arch/x86/ia32/sys_ia32.c
13605+++ b/arch/x86/ia32/sys_ia32.c
13606@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
13607 */
13608 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
13609 {
13610- typeof(ubuf->st_uid) uid = 0;
13611- typeof(ubuf->st_gid) gid = 0;
13612+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
13613+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
13614 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
13615 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
13616 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
13617diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
13618index 372231c..a5aa1a1 100644
13619--- a/arch/x86/include/asm/alternative-asm.h
13620+++ b/arch/x86/include/asm/alternative-asm.h
13621@@ -18,6 +18,45 @@
13622 .endm
13623 #endif
13624
13625+#ifdef KERNEXEC_PLUGIN
13626+ .macro pax_force_retaddr_bts rip=0
13627+ btsq $63,\rip(%rsp)
13628+ .endm
13629+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13630+ .macro pax_force_retaddr rip=0, reload=0
13631+ btsq $63,\rip(%rsp)
13632+ .endm
13633+ .macro pax_force_fptr ptr
13634+ btsq $63,\ptr
13635+ .endm
13636+ .macro pax_set_fptr_mask
13637+ .endm
13638+#endif
13639+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
13640+ .macro pax_force_retaddr rip=0, reload=0
13641+ .if \reload
13642+ pax_set_fptr_mask
13643+ .endif
13644+ orq %r10,\rip(%rsp)
13645+ .endm
13646+ .macro pax_force_fptr ptr
13647+ orq %r10,\ptr
13648+ .endm
13649+ .macro pax_set_fptr_mask
13650+ movabs $0x8000000000000000,%r10
13651+ .endm
13652+#endif
13653+#else
13654+ .macro pax_force_retaddr rip=0, reload=0
13655+ .endm
13656+ .macro pax_force_fptr ptr
13657+ .endm
13658+ .macro pax_force_retaddr_bts rip=0
13659+ .endm
13660+ .macro pax_set_fptr_mask
13661+ .endm
13662+#endif
13663+
13664 .macro altinstruction_entry orig alt feature orig_len alt_len
13665 .long \orig - .
13666 .long \alt - .
13667diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
13668index 58ed6d9..f1cbe58 100644
13669--- a/arch/x86/include/asm/alternative.h
13670+++ b/arch/x86/include/asm/alternative.h
13671@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
13672 ".pushsection .discard,\"aw\",@progbits\n" \
13673 DISCARD_ENTRY(1) \
13674 ".popsection\n" \
13675- ".pushsection .altinstr_replacement, \"ax\"\n" \
13676+ ".pushsection .altinstr_replacement, \"a\"\n" \
13677 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
13678 ".popsection"
13679
13680@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
13681 DISCARD_ENTRY(1) \
13682 DISCARD_ENTRY(2) \
13683 ".popsection\n" \
13684- ".pushsection .altinstr_replacement, \"ax\"\n" \
13685+ ".pushsection .altinstr_replacement, \"a\"\n" \
13686 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
13687 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
13688 ".popsection"
13689diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
13690index 3388034..050f0b9 100644
13691--- a/arch/x86/include/asm/apic.h
13692+++ b/arch/x86/include/asm/apic.h
13693@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
13694
13695 #ifdef CONFIG_X86_LOCAL_APIC
13696
13697-extern unsigned int apic_verbosity;
13698+extern int apic_verbosity;
13699 extern int local_apic_timer_c2_ok;
13700
13701 extern int disable_apic;
13702diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
13703index 20370c6..a2eb9b0 100644
13704--- a/arch/x86/include/asm/apm.h
13705+++ b/arch/x86/include/asm/apm.h
13706@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
13707 __asm__ __volatile__(APM_DO_ZERO_SEGS
13708 "pushl %%edi\n\t"
13709 "pushl %%ebp\n\t"
13710- "lcall *%%cs:apm_bios_entry\n\t"
13711+ "lcall *%%ss:apm_bios_entry\n\t"
13712 "setc %%al\n\t"
13713 "popl %%ebp\n\t"
13714 "popl %%edi\n\t"
13715@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
13716 __asm__ __volatile__(APM_DO_ZERO_SEGS
13717 "pushl %%edi\n\t"
13718 "pushl %%ebp\n\t"
13719- "lcall *%%cs:apm_bios_entry\n\t"
13720+ "lcall *%%ss:apm_bios_entry\n\t"
13721 "setc %%bl\n\t"
13722 "popl %%ebp\n\t"
13723 "popl %%edi\n\t"
13724diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
13725index 722aa3b..3a0bb27 100644
13726--- a/arch/x86/include/asm/atomic.h
13727+++ b/arch/x86/include/asm/atomic.h
13728@@ -22,7 +22,18 @@
13729 */
13730 static inline int atomic_read(const atomic_t *v)
13731 {
13732- return (*(volatile int *)&(v)->counter);
13733+ return (*(volatile const int *)&(v)->counter);
13734+}
13735+
13736+/**
13737+ * atomic_read_unchecked - read atomic variable
13738+ * @v: pointer of type atomic_unchecked_t
13739+ *
13740+ * Atomically reads the value of @v.
13741+ */
13742+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
13743+{
13744+ return (*(volatile const int *)&(v)->counter);
13745 }
13746
13747 /**
13748@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
13749 }
13750
13751 /**
13752+ * atomic_set_unchecked - set atomic variable
13753+ * @v: pointer of type atomic_unchecked_t
13754+ * @i: required value
13755+ *
13756+ * Atomically sets the value of @v to @i.
13757+ */
13758+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
13759+{
13760+ v->counter = i;
13761+}
13762+
13763+/**
13764 * atomic_add - add integer to atomic variable
13765 * @i: integer value to add
13766 * @v: pointer of type atomic_t
13767@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
13768 */
13769 static inline void atomic_add(int i, atomic_t *v)
13770 {
13771- asm volatile(LOCK_PREFIX "addl %1,%0"
13772+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13773+
13774+#ifdef CONFIG_PAX_REFCOUNT
13775+ "jno 0f\n"
13776+ LOCK_PREFIX "subl %1,%0\n"
13777+ "int $4\n0:\n"
13778+ _ASM_EXTABLE(0b, 0b)
13779+#endif
13780+
13781+ : "+m" (v->counter)
13782+ : "ir" (i));
13783+}
13784+
13785+/**
13786+ * atomic_add_unchecked - add integer to atomic variable
13787+ * @i: integer value to add
13788+ * @v: pointer of type atomic_unchecked_t
13789+ *
13790+ * Atomically adds @i to @v.
13791+ */
13792+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
13793+{
13794+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13795 : "+m" (v->counter)
13796 : "ir" (i));
13797 }
13798@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
13799 */
13800 static inline void atomic_sub(int i, atomic_t *v)
13801 {
13802- asm volatile(LOCK_PREFIX "subl %1,%0"
13803+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13804+
13805+#ifdef CONFIG_PAX_REFCOUNT
13806+ "jno 0f\n"
13807+ LOCK_PREFIX "addl %1,%0\n"
13808+ "int $4\n0:\n"
13809+ _ASM_EXTABLE(0b, 0b)
13810+#endif
13811+
13812+ : "+m" (v->counter)
13813+ : "ir" (i));
13814+}
13815+
13816+/**
13817+ * atomic_sub_unchecked - subtract integer from atomic variable
13818+ * @i: integer value to subtract
13819+ * @v: pointer of type atomic_unchecked_t
13820+ *
13821+ * Atomically subtracts @i from @v.
13822+ */
13823+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
13824+{
13825+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13826 : "+m" (v->counter)
13827 : "ir" (i));
13828 }
13829@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13830 {
13831 unsigned char c;
13832
13833- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
13834+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
13835+
13836+#ifdef CONFIG_PAX_REFCOUNT
13837+ "jno 0f\n"
13838+ LOCK_PREFIX "addl %2,%0\n"
13839+ "int $4\n0:\n"
13840+ _ASM_EXTABLE(0b, 0b)
13841+#endif
13842+
13843+ "sete %1\n"
13844 : "+m" (v->counter), "=qm" (c)
13845 : "ir" (i) : "memory");
13846 return c;
13847@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13848 */
13849 static inline void atomic_inc(atomic_t *v)
13850 {
13851- asm volatile(LOCK_PREFIX "incl %0"
13852+ asm volatile(LOCK_PREFIX "incl %0\n"
13853+
13854+#ifdef CONFIG_PAX_REFCOUNT
13855+ "jno 0f\n"
13856+ LOCK_PREFIX "decl %0\n"
13857+ "int $4\n0:\n"
13858+ _ASM_EXTABLE(0b, 0b)
13859+#endif
13860+
13861+ : "+m" (v->counter));
13862+}
13863+
13864+/**
13865+ * atomic_inc_unchecked - increment atomic variable
13866+ * @v: pointer of type atomic_unchecked_t
13867+ *
13868+ * Atomically increments @v by 1.
13869+ */
13870+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
13871+{
13872+ asm volatile(LOCK_PREFIX "incl %0\n"
13873 : "+m" (v->counter));
13874 }
13875
13876@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
13877 */
13878 static inline void atomic_dec(atomic_t *v)
13879 {
13880- asm volatile(LOCK_PREFIX "decl %0"
13881+ asm volatile(LOCK_PREFIX "decl %0\n"
13882+
13883+#ifdef CONFIG_PAX_REFCOUNT
13884+ "jno 0f\n"
13885+ LOCK_PREFIX "incl %0\n"
13886+ "int $4\n0:\n"
13887+ _ASM_EXTABLE(0b, 0b)
13888+#endif
13889+
13890+ : "+m" (v->counter));
13891+}
13892+
13893+/**
13894+ * atomic_dec_unchecked - decrement atomic variable
13895+ * @v: pointer of type atomic_unchecked_t
13896+ *
13897+ * Atomically decrements @v by 1.
13898+ */
13899+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
13900+{
13901+ asm volatile(LOCK_PREFIX "decl %0\n"
13902 : "+m" (v->counter));
13903 }
13904
13905@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
13906 {
13907 unsigned char c;
13908
13909- asm volatile(LOCK_PREFIX "decl %0; sete %1"
13910+ asm volatile(LOCK_PREFIX "decl %0\n"
13911+
13912+#ifdef CONFIG_PAX_REFCOUNT
13913+ "jno 0f\n"
13914+ LOCK_PREFIX "incl %0\n"
13915+ "int $4\n0:\n"
13916+ _ASM_EXTABLE(0b, 0b)
13917+#endif
13918+
13919+ "sete %1\n"
13920 : "+m" (v->counter), "=qm" (c)
13921 : : "memory");
13922 return c != 0;
13923@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
13924 {
13925 unsigned char c;
13926
13927- asm volatile(LOCK_PREFIX "incl %0; sete %1"
13928+ asm volatile(LOCK_PREFIX "incl %0\n"
13929+
13930+#ifdef CONFIG_PAX_REFCOUNT
13931+ "jno 0f\n"
13932+ LOCK_PREFIX "decl %0\n"
13933+ "int $4\n0:\n"
13934+ _ASM_EXTABLE(0b, 0b)
13935+#endif
13936+
13937+ "sete %1\n"
13938+ : "+m" (v->counter), "=qm" (c)
13939+ : : "memory");
13940+ return c != 0;
13941+}
13942+
13943+/**
13944+ * atomic_inc_and_test_unchecked - increment and test
13945+ * @v: pointer of type atomic_unchecked_t
13946+ *
13947+ * Atomically increments @v by 1
13948+ * and returns true if the result is zero, or false for all
13949+ * other cases.
13950+ */
13951+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
13952+{
13953+ unsigned char c;
13954+
13955+ asm volatile(LOCK_PREFIX "incl %0\n"
13956+ "sete %1\n"
13957 : "+m" (v->counter), "=qm" (c)
13958 : : "memory");
13959 return c != 0;
13960@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13961 {
13962 unsigned char c;
13963
13964- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
13965+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
13966+
13967+#ifdef CONFIG_PAX_REFCOUNT
13968+ "jno 0f\n"
13969+ LOCK_PREFIX "subl %2,%0\n"
13970+ "int $4\n0:\n"
13971+ _ASM_EXTABLE(0b, 0b)
13972+#endif
13973+
13974+ "sets %1\n"
13975 : "+m" (v->counter), "=qm" (c)
13976 : "ir" (i) : "memory");
13977 return c;
13978@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13979 */
13980 static inline int atomic_add_return(int i, atomic_t *v)
13981 {
13982+ return i + xadd_check_overflow(&v->counter, i);
13983+}
13984+
13985+/**
13986+ * atomic_add_return_unchecked - add integer and return
13987+ * @i: integer value to add
13988+ * @v: pointer of type atomic_unchecked_t
13989+ *
13990+ * Atomically adds @i to @v and returns @i + @v
13991+ */
13992+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
13993+{
13994 return i + xadd(&v->counter, i);
13995 }
13996
13997@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
13998 }
13999
14000 #define atomic_inc_return(v) (atomic_add_return(1, v))
14001+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
14002+{
14003+ return atomic_add_return_unchecked(1, v);
14004+}
14005 #define atomic_dec_return(v) (atomic_sub_return(1, v))
14006
14007 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14008@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14009 return cmpxchg(&v->counter, old, new);
14010 }
14011
14012+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14013+{
14014+ return cmpxchg(&v->counter, old, new);
14015+}
14016+
14017 static inline int atomic_xchg(atomic_t *v, int new)
14018 {
14019 return xchg(&v->counter, new);
14020 }
14021
14022+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14023+{
14024+ return xchg(&v->counter, new);
14025+}
14026+
14027 /**
14028 * __atomic_add_unless - add unless the number is already a given value
14029 * @v: pointer of type atomic_t
14030@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
14031 */
14032 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14033 {
14034- int c, old;
14035+ int c, old, new;
14036 c = atomic_read(v);
14037 for (;;) {
14038- if (unlikely(c == (u)))
14039+ if (unlikely(c == u))
14040 break;
14041- old = atomic_cmpxchg((v), c, c + (a));
14042+
14043+ asm volatile("addl %2,%0\n"
14044+
14045+#ifdef CONFIG_PAX_REFCOUNT
14046+ "jno 0f\n"
14047+ "subl %2,%0\n"
14048+ "int $4\n0:\n"
14049+ _ASM_EXTABLE(0b, 0b)
14050+#endif
14051+
14052+ : "=r" (new)
14053+ : "0" (c), "ir" (a));
14054+
14055+ old = atomic_cmpxchg(v, c, new);
14056 if (likely(old == c))
14057 break;
14058 c = old;
14059@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14060 }
14061
14062 /**
14063+ * atomic_inc_not_zero_hint - increment if not null
14064+ * @v: pointer of type atomic_t
14065+ * @hint: probable value of the atomic before the increment
14066+ *
14067+ * This version of atomic_inc_not_zero() gives a hint of probable
14068+ * value of the atomic. This helps processor to not read the memory
14069+ * before doing the atomic read/modify/write cycle, lowering
14070+ * number of bus transactions on some arches.
14071+ *
14072+ * Returns: 0 if increment was not done, 1 otherwise.
14073+ */
14074+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14075+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14076+{
14077+ int val, c = hint, new;
14078+
14079+ /* sanity test, should be removed by compiler if hint is a constant */
14080+ if (!hint)
14081+ return __atomic_add_unless(v, 1, 0);
14082+
14083+ do {
14084+ asm volatile("incl %0\n"
14085+
14086+#ifdef CONFIG_PAX_REFCOUNT
14087+ "jno 0f\n"
14088+ "decl %0\n"
14089+ "int $4\n0:\n"
14090+ _ASM_EXTABLE(0b, 0b)
14091+#endif
14092+
14093+ : "=r" (new)
14094+ : "0" (c));
14095+
14096+ val = atomic_cmpxchg(v, c, new);
14097+ if (val == c)
14098+ return 1;
14099+ c = val;
14100+ } while (c);
14101+
14102+ return 0;
14103+}
14104+
14105+/**
14106 * atomic_inc_short - increment of a short integer
14107 * @v: pointer to type int
14108 *
14109@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14110 #endif
14111
14112 /* These are x86-specific, used by some header files */
14113-#define atomic_clear_mask(mask, addr) \
14114- asm volatile(LOCK_PREFIX "andl %0,%1" \
14115- : : "r" (~(mask)), "m" (*(addr)) : "memory")
14116+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14117+{
14118+ asm volatile(LOCK_PREFIX "andl %1,%0"
14119+ : "+m" (v->counter)
14120+ : "r" (~(mask))
14121+ : "memory");
14122+}
14123
14124-#define atomic_set_mask(mask, addr) \
14125- asm volatile(LOCK_PREFIX "orl %0,%1" \
14126- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14127- : "memory")
14128+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14129+{
14130+ asm volatile(LOCK_PREFIX "andl %1,%0"
14131+ : "+m" (v->counter)
14132+ : "r" (~(mask))
14133+ : "memory");
14134+}
14135+
14136+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14137+{
14138+ asm volatile(LOCK_PREFIX "orl %1,%0"
14139+ : "+m" (v->counter)
14140+ : "r" (mask)
14141+ : "memory");
14142+}
14143+
14144+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14145+{
14146+ asm volatile(LOCK_PREFIX "orl %1,%0"
14147+ : "+m" (v->counter)
14148+ : "r" (mask)
14149+ : "memory");
14150+}
14151
14152 /* Atomic operations are already serializing on x86 */
14153 #define smp_mb__before_atomic_dec() barrier()
14154diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14155index b154de7..aadebd8 100644
14156--- a/arch/x86/include/asm/atomic64_32.h
14157+++ b/arch/x86/include/asm/atomic64_32.h
14158@@ -12,6 +12,14 @@ typedef struct {
14159 u64 __aligned(8) counter;
14160 } atomic64_t;
14161
14162+#ifdef CONFIG_PAX_REFCOUNT
14163+typedef struct {
14164+ u64 __aligned(8) counter;
14165+} atomic64_unchecked_t;
14166+#else
14167+typedef atomic64_t atomic64_unchecked_t;
14168+#endif
14169+
14170 #define ATOMIC64_INIT(val) { (val) }
14171
14172 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14173@@ -37,21 +45,31 @@ typedef struct {
14174 ATOMIC64_DECL_ONE(sym##_386)
14175
14176 ATOMIC64_DECL_ONE(add_386);
14177+ATOMIC64_DECL_ONE(add_unchecked_386);
14178 ATOMIC64_DECL_ONE(sub_386);
14179+ATOMIC64_DECL_ONE(sub_unchecked_386);
14180 ATOMIC64_DECL_ONE(inc_386);
14181+ATOMIC64_DECL_ONE(inc_unchecked_386);
14182 ATOMIC64_DECL_ONE(dec_386);
14183+ATOMIC64_DECL_ONE(dec_unchecked_386);
14184 #endif
14185
14186 #define alternative_atomic64(f, out, in...) \
14187 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14188
14189 ATOMIC64_DECL(read);
14190+ATOMIC64_DECL(read_unchecked);
14191 ATOMIC64_DECL(set);
14192+ATOMIC64_DECL(set_unchecked);
14193 ATOMIC64_DECL(xchg);
14194 ATOMIC64_DECL(add_return);
14195+ATOMIC64_DECL(add_return_unchecked);
14196 ATOMIC64_DECL(sub_return);
14197+ATOMIC64_DECL(sub_return_unchecked);
14198 ATOMIC64_DECL(inc_return);
14199+ATOMIC64_DECL(inc_return_unchecked);
14200 ATOMIC64_DECL(dec_return);
14201+ATOMIC64_DECL(dec_return_unchecked);
14202 ATOMIC64_DECL(dec_if_positive);
14203 ATOMIC64_DECL(inc_not_zero);
14204 ATOMIC64_DECL(add_unless);
14205@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14206 }
14207
14208 /**
14209+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14210+ * @p: pointer to type atomic64_unchecked_t
14211+ * @o: expected value
14212+ * @n: new value
14213+ *
14214+ * Atomically sets @v to @n if it was equal to @o and returns
14215+ * the old value.
14216+ */
14217+
14218+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14219+{
14220+ return cmpxchg64(&v->counter, o, n);
14221+}
14222+
14223+/**
14224 * atomic64_xchg - xchg atomic64 variable
14225 * @v: pointer to type atomic64_t
14226 * @n: value to assign
14227@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14228 }
14229
14230 /**
14231+ * atomic64_set_unchecked - set atomic64 variable
14232+ * @v: pointer to type atomic64_unchecked_t
14233+ * @n: value to assign
14234+ *
14235+ * Atomically sets the value of @v to @n.
14236+ */
14237+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14238+{
14239+ unsigned high = (unsigned)(i >> 32);
14240+ unsigned low = (unsigned)i;
14241+ alternative_atomic64(set, /* no output */,
14242+ "S" (v), "b" (low), "c" (high)
14243+ : "eax", "edx", "memory");
14244+}
14245+
14246+/**
14247 * atomic64_read - read atomic64 variable
14248 * @v: pointer to type atomic64_t
14249 *
14250@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14251 }
14252
14253 /**
14254+ * atomic64_read_unchecked - read atomic64 variable
14255+ * @v: pointer to type atomic64_unchecked_t
14256+ *
14257+ * Atomically reads the value of @v and returns it.
14258+ */
14259+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
14260+{
14261+ long long r;
14262+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14263+ return r;
14264+ }
14265+
14266+/**
14267 * atomic64_add_return - add and return
14268 * @i: integer value to add
14269 * @v: pointer to type atomic64_t
14270@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14271 return i;
14272 }
14273
14274+/**
14275+ * atomic64_add_return_unchecked - add and return
14276+ * @i: integer value to add
14277+ * @v: pointer to type atomic64_unchecked_t
14278+ *
14279+ * Atomically adds @i to @v and returns @i + *@v
14280+ */
14281+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14282+{
14283+ alternative_atomic64(add_return_unchecked,
14284+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14285+ ASM_NO_INPUT_CLOBBER("memory"));
14286+ return i;
14287+}
14288+
14289 /*
14290 * Other variants with different arithmetic operators:
14291 */
14292@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14293 return a;
14294 }
14295
14296+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14297+{
14298+ long long a;
14299+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
14300+ "S" (v) : "memory", "ecx");
14301+ return a;
14302+}
14303+
14304 static inline long long atomic64_dec_return(atomic64_t *v)
14305 {
14306 long long a;
14307@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14308 }
14309
14310 /**
14311+ * atomic64_add_unchecked - add integer to atomic64 variable
14312+ * @i: integer value to add
14313+ * @v: pointer to type atomic64_unchecked_t
14314+ *
14315+ * Atomically adds @i to @v.
14316+ */
14317+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14318+{
14319+ __alternative_atomic64(add_unchecked, add_return_unchecked,
14320+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14321+ ASM_NO_INPUT_CLOBBER("memory"));
14322+ return i;
14323+}
14324+
14325+/**
14326 * atomic64_sub - subtract the atomic64 variable
14327 * @i: integer value to subtract
14328 * @v: pointer to type atomic64_t
14329diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14330index 0e1cbfc..5623683 100644
14331--- a/arch/x86/include/asm/atomic64_64.h
14332+++ b/arch/x86/include/asm/atomic64_64.h
14333@@ -18,7 +18,19 @@
14334 */
14335 static inline long atomic64_read(const atomic64_t *v)
14336 {
14337- return (*(volatile long *)&(v)->counter);
14338+ return (*(volatile const long *)&(v)->counter);
14339+}
14340+
14341+/**
14342+ * atomic64_read_unchecked - read atomic64 variable
14343+ * @v: pointer of type atomic64_unchecked_t
14344+ *
14345+ * Atomically reads the value of @v.
14346+ * Doesn't imply a read memory barrier.
14347+ */
14348+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
14349+{
14350+ return (*(volatile const long *)&(v)->counter);
14351 }
14352
14353 /**
14354@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14355 }
14356
14357 /**
14358+ * atomic64_set_unchecked - set atomic64 variable
14359+ * @v: pointer to type atomic64_unchecked_t
14360+ * @i: required value
14361+ *
14362+ * Atomically sets the value of @v to @i.
14363+ */
14364+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14365+{
14366+ v->counter = i;
14367+}
14368+
14369+/**
14370 * atomic64_add - add integer to atomic64 variable
14371 * @i: integer value to add
14372 * @v: pointer to type atomic64_t
14373@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14374 */
14375 static inline void atomic64_add(long i, atomic64_t *v)
14376 {
14377+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
14378+
14379+#ifdef CONFIG_PAX_REFCOUNT
14380+ "jno 0f\n"
14381+ LOCK_PREFIX "subq %1,%0\n"
14382+ "int $4\n0:\n"
14383+ _ASM_EXTABLE(0b, 0b)
14384+#endif
14385+
14386+ : "=m" (v->counter)
14387+ : "er" (i), "m" (v->counter));
14388+}
14389+
14390+/**
14391+ * atomic64_add_unchecked - add integer to atomic64 variable
14392+ * @i: integer value to add
14393+ * @v: pointer to type atomic64_unchecked_t
14394+ *
14395+ * Atomically adds @i to @v.
14396+ */
14397+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14398+{
14399 asm volatile(LOCK_PREFIX "addq %1,%0"
14400 : "=m" (v->counter)
14401 : "er" (i), "m" (v->counter));
14402@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14403 */
14404 static inline void atomic64_sub(long i, atomic64_t *v)
14405 {
14406- asm volatile(LOCK_PREFIX "subq %1,%0"
14407+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14408+
14409+#ifdef CONFIG_PAX_REFCOUNT
14410+ "jno 0f\n"
14411+ LOCK_PREFIX "addq %1,%0\n"
14412+ "int $4\n0:\n"
14413+ _ASM_EXTABLE(0b, 0b)
14414+#endif
14415+
14416+ : "=m" (v->counter)
14417+ : "er" (i), "m" (v->counter));
14418+}
14419+
14420+/**
14421+ * atomic64_sub_unchecked - subtract the atomic64 variable
14422+ * @i: integer value to subtract
14423+ * @v: pointer to type atomic64_unchecked_t
14424+ *
14425+ * Atomically subtracts @i from @v.
14426+ */
14427+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14428+{
14429+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14430 : "=m" (v->counter)
14431 : "er" (i), "m" (v->counter));
14432 }
14433@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14434 {
14435 unsigned char c;
14436
14437- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
14438+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
14439+
14440+#ifdef CONFIG_PAX_REFCOUNT
14441+ "jno 0f\n"
14442+ LOCK_PREFIX "addq %2,%0\n"
14443+ "int $4\n0:\n"
14444+ _ASM_EXTABLE(0b, 0b)
14445+#endif
14446+
14447+ "sete %1\n"
14448 : "=m" (v->counter), "=qm" (c)
14449 : "er" (i), "m" (v->counter) : "memory");
14450 return c;
14451@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14452 */
14453 static inline void atomic64_inc(atomic64_t *v)
14454 {
14455+ asm volatile(LOCK_PREFIX "incq %0\n"
14456+
14457+#ifdef CONFIG_PAX_REFCOUNT
14458+ "jno 0f\n"
14459+ LOCK_PREFIX "decq %0\n"
14460+ "int $4\n0:\n"
14461+ _ASM_EXTABLE(0b, 0b)
14462+#endif
14463+
14464+ : "=m" (v->counter)
14465+ : "m" (v->counter));
14466+}
14467+
14468+/**
14469+ * atomic64_inc_unchecked - increment atomic64 variable
14470+ * @v: pointer to type atomic64_unchecked_t
14471+ *
14472+ * Atomically increments @v by 1.
14473+ */
14474+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
14475+{
14476 asm volatile(LOCK_PREFIX "incq %0"
14477 : "=m" (v->counter)
14478 : "m" (v->counter));
14479@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
14480 */
14481 static inline void atomic64_dec(atomic64_t *v)
14482 {
14483- asm volatile(LOCK_PREFIX "decq %0"
14484+ asm volatile(LOCK_PREFIX "decq %0\n"
14485+
14486+#ifdef CONFIG_PAX_REFCOUNT
14487+ "jno 0f\n"
14488+ LOCK_PREFIX "incq %0\n"
14489+ "int $4\n0:\n"
14490+ _ASM_EXTABLE(0b, 0b)
14491+#endif
14492+
14493+ : "=m" (v->counter)
14494+ : "m" (v->counter));
14495+}
14496+
14497+/**
14498+ * atomic64_dec_unchecked - decrement atomic64 variable
14499+ * @v: pointer to type atomic64_t
14500+ *
14501+ * Atomically decrements @v by 1.
14502+ */
14503+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
14504+{
14505+ asm volatile(LOCK_PREFIX "decq %0\n"
14506 : "=m" (v->counter)
14507 : "m" (v->counter));
14508 }
14509@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
14510 {
14511 unsigned char c;
14512
14513- asm volatile(LOCK_PREFIX "decq %0; sete %1"
14514+ asm volatile(LOCK_PREFIX "decq %0\n"
14515+
14516+#ifdef CONFIG_PAX_REFCOUNT
14517+ "jno 0f\n"
14518+ LOCK_PREFIX "incq %0\n"
14519+ "int $4\n0:\n"
14520+ _ASM_EXTABLE(0b, 0b)
14521+#endif
14522+
14523+ "sete %1\n"
14524 : "=m" (v->counter), "=qm" (c)
14525 : "m" (v->counter) : "memory");
14526 return c != 0;
14527@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
14528 {
14529 unsigned char c;
14530
14531- asm volatile(LOCK_PREFIX "incq %0; sete %1"
14532+ asm volatile(LOCK_PREFIX "incq %0\n"
14533+
14534+#ifdef CONFIG_PAX_REFCOUNT
14535+ "jno 0f\n"
14536+ LOCK_PREFIX "decq %0\n"
14537+ "int $4\n0:\n"
14538+ _ASM_EXTABLE(0b, 0b)
14539+#endif
14540+
14541+ "sete %1\n"
14542 : "=m" (v->counter), "=qm" (c)
14543 : "m" (v->counter) : "memory");
14544 return c != 0;
14545@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
14546 {
14547 unsigned char c;
14548
14549- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
14550+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
14551+
14552+#ifdef CONFIG_PAX_REFCOUNT
14553+ "jno 0f\n"
14554+ LOCK_PREFIX "subq %2,%0\n"
14555+ "int $4\n0:\n"
14556+ _ASM_EXTABLE(0b, 0b)
14557+#endif
14558+
14559+ "sets %1\n"
14560 : "=m" (v->counter), "=qm" (c)
14561 : "er" (i), "m" (v->counter) : "memory");
14562 return c;
14563@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
14564 */
14565 static inline long atomic64_add_return(long i, atomic64_t *v)
14566 {
14567+ return i + xadd_check_overflow(&v->counter, i);
14568+}
14569+
14570+/**
14571+ * atomic64_add_return_unchecked - add and return
14572+ * @i: integer value to add
14573+ * @v: pointer to type atomic64_unchecked_t
14574+ *
14575+ * Atomically adds @i to @v and returns @i + @v
14576+ */
14577+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
14578+{
14579 return i + xadd(&v->counter, i);
14580 }
14581
14582@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
14583 }
14584
14585 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
14586+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14587+{
14588+ return atomic64_add_return_unchecked(1, v);
14589+}
14590 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
14591
14592 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
14593@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
14594 return cmpxchg(&v->counter, old, new);
14595 }
14596
14597+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
14598+{
14599+ return cmpxchg(&v->counter, old, new);
14600+}
14601+
14602 static inline long atomic64_xchg(atomic64_t *v, long new)
14603 {
14604 return xchg(&v->counter, new);
14605@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
14606 */
14607 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
14608 {
14609- long c, old;
14610+ long c, old, new;
14611 c = atomic64_read(v);
14612 for (;;) {
14613- if (unlikely(c == (u)))
14614+ if (unlikely(c == u))
14615 break;
14616- old = atomic64_cmpxchg((v), c, c + (a));
14617+
14618+ asm volatile("add %2,%0\n"
14619+
14620+#ifdef CONFIG_PAX_REFCOUNT
14621+ "jno 0f\n"
14622+ "sub %2,%0\n"
14623+ "int $4\n0:\n"
14624+ _ASM_EXTABLE(0b, 0b)
14625+#endif
14626+
14627+ : "=r" (new)
14628+ : "0" (c), "ir" (a));
14629+
14630+ old = atomic64_cmpxchg(v, c, new);
14631 if (likely(old == c))
14632 break;
14633 c = old;
14634 }
14635- return c != (u);
14636+ return c != u;
14637 }
14638
14639 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
14640diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
14641index 6dfd019..28e188d 100644
14642--- a/arch/x86/include/asm/bitops.h
14643+++ b/arch/x86/include/asm/bitops.h
14644@@ -40,7 +40,7 @@
14645 * a mask operation on a byte.
14646 */
14647 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
14648-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
14649+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
14650 #define CONST_MASK(nr) (1 << ((nr) & 7))
14651
14652 /**
14653@@ -486,7 +486,7 @@ static inline int fls(int x)
14654 * at position 64.
14655 */
14656 #ifdef CONFIG_X86_64
14657-static __always_inline int fls64(__u64 x)
14658+static __always_inline long fls64(__u64 x)
14659 {
14660 int bitpos = -1;
14661 /*
14662diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
14663index 4fa687a..60f2d39 100644
14664--- a/arch/x86/include/asm/boot.h
14665+++ b/arch/x86/include/asm/boot.h
14666@@ -6,10 +6,15 @@
14667 #include <uapi/asm/boot.h>
14668
14669 /* Physical address where kernel should be loaded. */
14670-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
14671+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
14672 + (CONFIG_PHYSICAL_ALIGN - 1)) \
14673 & ~(CONFIG_PHYSICAL_ALIGN - 1))
14674
14675+#ifndef __ASSEMBLY__
14676+extern unsigned char __LOAD_PHYSICAL_ADDR[];
14677+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
14678+#endif
14679+
14680 /* Minimum kernel alignment, as a power of two */
14681 #ifdef CONFIG_X86_64
14682 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
14683diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
14684index 48f99f1..d78ebf9 100644
14685--- a/arch/x86/include/asm/cache.h
14686+++ b/arch/x86/include/asm/cache.h
14687@@ -5,12 +5,13 @@
14688
14689 /* L1 cache line size */
14690 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
14691-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
14692+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
14693
14694 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
14695+#define __read_only __attribute__((__section__(".data..read_only")))
14696
14697 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
14698-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
14699+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
14700
14701 #ifdef CONFIG_X86_VSMP
14702 #ifdef CONFIG_SMP
14703diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
14704index 9863ee3..4a1f8e1 100644
14705--- a/arch/x86/include/asm/cacheflush.h
14706+++ b/arch/x86/include/asm/cacheflush.h
14707@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
14708 unsigned long pg_flags = pg->flags & _PGMT_MASK;
14709
14710 if (pg_flags == _PGMT_DEFAULT)
14711- return -1;
14712+ return ~0UL;
14713 else if (pg_flags == _PGMT_WC)
14714 return _PAGE_CACHE_WC;
14715 else if (pg_flags == _PGMT_UC_MINUS)
14716diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
14717index 46fc474..b02b0f9 100644
14718--- a/arch/x86/include/asm/checksum_32.h
14719+++ b/arch/x86/include/asm/checksum_32.h
14720@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
14721 int len, __wsum sum,
14722 int *src_err_ptr, int *dst_err_ptr);
14723
14724+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
14725+ int len, __wsum sum,
14726+ int *src_err_ptr, int *dst_err_ptr);
14727+
14728+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
14729+ int len, __wsum sum,
14730+ int *src_err_ptr, int *dst_err_ptr);
14731+
14732 /*
14733 * Note: when you get a NULL pointer exception here this means someone
14734 * passed in an incorrect kernel address to one of these functions.
14735@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
14736 int *err_ptr)
14737 {
14738 might_sleep();
14739- return csum_partial_copy_generic((__force void *)src, dst,
14740+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
14741 len, sum, err_ptr, NULL);
14742 }
14743
14744@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
14745 {
14746 might_sleep();
14747 if (access_ok(VERIFY_WRITE, dst, len))
14748- return csum_partial_copy_generic(src, (__force void *)dst,
14749+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
14750 len, sum, NULL, err_ptr);
14751
14752 if (len)
14753diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
14754index d47786a..ce1b05d 100644
14755--- a/arch/x86/include/asm/cmpxchg.h
14756+++ b/arch/x86/include/asm/cmpxchg.h
14757@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
14758 __compiletime_error("Bad argument size for cmpxchg");
14759 extern void __xadd_wrong_size(void)
14760 __compiletime_error("Bad argument size for xadd");
14761+extern void __xadd_check_overflow_wrong_size(void)
14762+ __compiletime_error("Bad argument size for xadd_check_overflow");
14763 extern void __add_wrong_size(void)
14764 __compiletime_error("Bad argument size for add");
14765+extern void __add_check_overflow_wrong_size(void)
14766+ __compiletime_error("Bad argument size for add_check_overflow");
14767
14768 /*
14769 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
14770@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
14771 __ret; \
14772 })
14773
14774+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
14775+ ({ \
14776+ __typeof__ (*(ptr)) __ret = (arg); \
14777+ switch (sizeof(*(ptr))) { \
14778+ case __X86_CASE_L: \
14779+ asm volatile (lock #op "l %0, %1\n" \
14780+ "jno 0f\n" \
14781+ "mov %0,%1\n" \
14782+ "int $4\n0:\n" \
14783+ _ASM_EXTABLE(0b, 0b) \
14784+ : "+r" (__ret), "+m" (*(ptr)) \
14785+ : : "memory", "cc"); \
14786+ break; \
14787+ case __X86_CASE_Q: \
14788+ asm volatile (lock #op "q %q0, %1\n" \
14789+ "jno 0f\n" \
14790+ "mov %0,%1\n" \
14791+ "int $4\n0:\n" \
14792+ _ASM_EXTABLE(0b, 0b) \
14793+ : "+r" (__ret), "+m" (*(ptr)) \
14794+ : : "memory", "cc"); \
14795+ break; \
14796+ default: \
14797+ __ ## op ## _check_overflow_wrong_size(); \
14798+ } \
14799+ __ret; \
14800+ })
14801+
14802 /*
14803 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
14804 * Since this is generally used to protect other memory information, we
14805@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
14806 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
14807 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
14808
14809+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
14810+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
14811+
14812 #define __add(ptr, inc, lock) \
14813 ({ \
14814 __typeof__ (*(ptr)) __ret = (inc); \
14815diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
14816index 59c6c40..5e0b22c 100644
14817--- a/arch/x86/include/asm/compat.h
14818+++ b/arch/x86/include/asm/compat.h
14819@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
14820 typedef u32 compat_uint_t;
14821 typedef u32 compat_ulong_t;
14822 typedef u64 __attribute__((aligned(4))) compat_u64;
14823-typedef u32 compat_uptr_t;
14824+typedef u32 __user compat_uptr_t;
14825
14826 struct compat_timespec {
14827 compat_time_t tv_sec;
14828diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
14829index e99ac27..10d834e 100644
14830--- a/arch/x86/include/asm/cpufeature.h
14831+++ b/arch/x86/include/asm/cpufeature.h
14832@@ -203,7 +203,7 @@
14833 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
14834 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
14835 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
14836-
14837+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
14838
14839 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
14840 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
14841@@ -211,7 +211,7 @@
14842 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
14843 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
14844 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
14845-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
14846+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
14847 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
14848 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
14849 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
14850@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
14851 #undef cpu_has_centaur_mcr
14852 #define cpu_has_centaur_mcr 0
14853
14854+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
14855 #endif /* CONFIG_X86_64 */
14856
14857 #if __GNUC__ >= 4
14858@@ -394,7 +395,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
14859 ".section .discard,\"aw\",@progbits\n"
14860 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
14861 ".previous\n"
14862- ".section .altinstr_replacement,\"ax\"\n"
14863+ ".section .altinstr_replacement,\"a\"\n"
14864 "3: movb $1,%0\n"
14865 "4:\n"
14866 ".previous\n"
14867diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
14868index 8bf1c06..b6ae785 100644
14869--- a/arch/x86/include/asm/desc.h
14870+++ b/arch/x86/include/asm/desc.h
14871@@ -4,6 +4,7 @@
14872 #include <asm/desc_defs.h>
14873 #include <asm/ldt.h>
14874 #include <asm/mmu.h>
14875+#include <asm/pgtable.h>
14876
14877 #include <linux/smp.h>
14878 #include <linux/percpu.h>
14879@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14880
14881 desc->type = (info->read_exec_only ^ 1) << 1;
14882 desc->type |= info->contents << 2;
14883+ desc->type |= info->seg_not_present ^ 1;
14884
14885 desc->s = 1;
14886 desc->dpl = 0x3;
14887@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14888 }
14889
14890 extern struct desc_ptr idt_descr;
14891-extern gate_desc idt_table[];
14892 extern struct desc_ptr nmi_idt_descr;
14893-extern gate_desc nmi_idt_table[];
14894-
14895-struct gdt_page {
14896- struct desc_struct gdt[GDT_ENTRIES];
14897-} __attribute__((aligned(PAGE_SIZE)));
14898-
14899-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
14900+extern gate_desc idt_table[256];
14901+extern gate_desc nmi_idt_table[256];
14902
14903+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
14904 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
14905 {
14906- return per_cpu(gdt_page, cpu).gdt;
14907+ return cpu_gdt_table[cpu];
14908 }
14909
14910 #ifdef CONFIG_X86_64
14911@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
14912 unsigned long base, unsigned dpl, unsigned flags,
14913 unsigned short seg)
14914 {
14915- gate->a = (seg << 16) | (base & 0xffff);
14916- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
14917+ gate->gate.offset_low = base;
14918+ gate->gate.seg = seg;
14919+ gate->gate.reserved = 0;
14920+ gate->gate.type = type;
14921+ gate->gate.s = 0;
14922+ gate->gate.dpl = dpl;
14923+ gate->gate.p = 1;
14924+ gate->gate.offset_high = base >> 16;
14925 }
14926
14927 #endif
14928@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
14929
14930 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
14931 {
14932+ pax_open_kernel();
14933 memcpy(&idt[entry], gate, sizeof(*gate));
14934+ pax_close_kernel();
14935 }
14936
14937 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
14938 {
14939+ pax_open_kernel();
14940 memcpy(&ldt[entry], desc, 8);
14941+ pax_close_kernel();
14942 }
14943
14944 static inline void
14945@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
14946 default: size = sizeof(*gdt); break;
14947 }
14948
14949+ pax_open_kernel();
14950 memcpy(&gdt[entry], desc, size);
14951+ pax_close_kernel();
14952 }
14953
14954 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
14955@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
14956
14957 static inline void native_load_tr_desc(void)
14958 {
14959+ pax_open_kernel();
14960 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
14961+ pax_close_kernel();
14962 }
14963
14964 static inline void native_load_gdt(const struct desc_ptr *dtr)
14965@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
14966 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
14967 unsigned int i;
14968
14969+ pax_open_kernel();
14970 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
14971 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
14972+ pax_close_kernel();
14973 }
14974
14975 #define _LDT_empty(info) \
14976@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
14977 preempt_enable();
14978 }
14979
14980-static inline unsigned long get_desc_base(const struct desc_struct *desc)
14981+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
14982 {
14983 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
14984 }
14985@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
14986 }
14987
14988 #ifdef CONFIG_X86_64
14989-static inline void set_nmi_gate(int gate, void *addr)
14990+static inline void set_nmi_gate(int gate, const void *addr)
14991 {
14992 gate_desc s;
14993
14994@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
14995 }
14996 #endif
14997
14998-static inline void _set_gate(int gate, unsigned type, void *addr,
14999+static inline void _set_gate(int gate, unsigned type, const void *addr,
15000 unsigned dpl, unsigned ist, unsigned seg)
15001 {
15002 gate_desc s;
15003@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
15004 * Pentium F0 0F bugfix can have resulted in the mapped
15005 * IDT being write-protected.
15006 */
15007-static inline void set_intr_gate(unsigned int n, void *addr)
15008+static inline void set_intr_gate(unsigned int n, const void *addr)
15009 {
15010 BUG_ON((unsigned)n > 0xFF);
15011 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
15012@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
15013 /*
15014 * This routine sets up an interrupt gate at directory privilege level 3.
15015 */
15016-static inline void set_system_intr_gate(unsigned int n, void *addr)
15017+static inline void set_system_intr_gate(unsigned int n, const void *addr)
15018 {
15019 BUG_ON((unsigned)n > 0xFF);
15020 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15021 }
15022
15023-static inline void set_system_trap_gate(unsigned int n, void *addr)
15024+static inline void set_system_trap_gate(unsigned int n, const void *addr)
15025 {
15026 BUG_ON((unsigned)n > 0xFF);
15027 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15028 }
15029
15030-static inline void set_trap_gate(unsigned int n, void *addr)
15031+static inline void set_trap_gate(unsigned int n, const void *addr)
15032 {
15033 BUG_ON((unsigned)n > 0xFF);
15034 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15035@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15036 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15037 {
15038 BUG_ON((unsigned)n > 0xFF);
15039- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15040+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15041 }
15042
15043-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15044+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15045 {
15046 BUG_ON((unsigned)n > 0xFF);
15047 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15048 }
15049
15050-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15051+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15052 {
15053 BUG_ON((unsigned)n > 0xFF);
15054 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15055 }
15056
15057+#ifdef CONFIG_X86_32
15058+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15059+{
15060+ struct desc_struct d;
15061+
15062+ if (likely(limit))
15063+ limit = (limit - 1UL) >> PAGE_SHIFT;
15064+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
15065+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15066+}
15067+#endif
15068+
15069 #endif /* _ASM_X86_DESC_H */
15070diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15071index 278441f..b95a174 100644
15072--- a/arch/x86/include/asm/desc_defs.h
15073+++ b/arch/x86/include/asm/desc_defs.h
15074@@ -31,6 +31,12 @@ struct desc_struct {
15075 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15076 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15077 };
15078+ struct {
15079+ u16 offset_low;
15080+ u16 seg;
15081+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15082+ unsigned offset_high: 16;
15083+ } gate;
15084 };
15085 } __attribute__((packed));
15086
15087diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15088index ced283a..ffe04cc 100644
15089--- a/arch/x86/include/asm/div64.h
15090+++ b/arch/x86/include/asm/div64.h
15091@@ -39,7 +39,7 @@
15092 __mod; \
15093 })
15094
15095-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15096+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15097 {
15098 union {
15099 u64 v64;
15100diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15101index 9c999c1..3860cb8 100644
15102--- a/arch/x86/include/asm/elf.h
15103+++ b/arch/x86/include/asm/elf.h
15104@@ -243,7 +243,25 @@ extern int force_personality32;
15105 the loader. We need to make sure that it is out of the way of the program
15106 that it will "exec", and that there is sufficient room for the brk. */
15107
15108+#ifdef CONFIG_PAX_SEGMEXEC
15109+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15110+#else
15111 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15112+#endif
15113+
15114+#ifdef CONFIG_PAX_ASLR
15115+#ifdef CONFIG_X86_32
15116+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15117+
15118+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15119+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15120+#else
15121+#define PAX_ELF_ET_DYN_BASE 0x400000UL
15122+
15123+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15124+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15125+#endif
15126+#endif
15127
15128 /* This yields a mask that user programs can use to figure out what
15129 instruction set this CPU supports. This could be done in user space,
15130@@ -296,16 +314,12 @@ do { \
15131
15132 #define ARCH_DLINFO \
15133 do { \
15134- if (vdso_enabled) \
15135- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15136- (unsigned long)current->mm->context.vdso); \
15137+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15138 } while (0)
15139
15140 #define ARCH_DLINFO_X32 \
15141 do { \
15142- if (vdso_enabled) \
15143- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15144- (unsigned long)current->mm->context.vdso); \
15145+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15146 } while (0)
15147
15148 #define AT_SYSINFO 32
15149@@ -320,7 +334,7 @@ else \
15150
15151 #endif /* !CONFIG_X86_32 */
15152
15153-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15154+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15155
15156 #define VDSO_ENTRY \
15157 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15158@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15159 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15160 #define compat_arch_setup_additional_pages syscall32_setup_pages
15161
15162-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15163-#define arch_randomize_brk arch_randomize_brk
15164-
15165 /*
15166 * True on X86_32 or when emulating IA32 on X86_64
15167 */
15168diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15169index 75ce3f4..882e801 100644
15170--- a/arch/x86/include/asm/emergency-restart.h
15171+++ b/arch/x86/include/asm/emergency-restart.h
15172@@ -13,6 +13,6 @@ enum reboot_type {
15173
15174 extern enum reboot_type reboot_type;
15175
15176-extern void machine_emergency_restart(void);
15177+extern void machine_emergency_restart(void) __noreturn;
15178
15179 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
15180diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
15181index e25cc33..7d3ec01 100644
15182--- a/arch/x86/include/asm/fpu-internal.h
15183+++ b/arch/x86/include/asm/fpu-internal.h
15184@@ -126,8 +126,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
15185 #define user_insn(insn, output, input...) \
15186 ({ \
15187 int err; \
15188+ pax_open_userland(); \
15189 asm volatile(ASM_STAC "\n" \
15190- "1:" #insn "\n\t" \
15191+ "1:" \
15192+ __copyuser_seg \
15193+ #insn "\n\t" \
15194 "2: " ASM_CLAC "\n" \
15195 ".section .fixup,\"ax\"\n" \
15196 "3: movl $-1,%[err]\n" \
15197@@ -136,6 +139,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
15198 _ASM_EXTABLE(1b, 3b) \
15199 : [err] "=r" (err), output \
15200 : "0"(0), input); \
15201+ pax_close_userland(); \
15202 err; \
15203 })
15204
15205@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
15206 "emms\n\t" /* clear stack tags */
15207 "fildl %P[addr]", /* set F?P to defined value */
15208 X86_FEATURE_FXSAVE_LEAK,
15209- [addr] "m" (tsk->thread.fpu.has_fpu));
15210+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
15211
15212 return fpu_restore_checking(&tsk->thread.fpu);
15213 }
15214diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
15215index be27ba1..04a8801 100644
15216--- a/arch/x86/include/asm/futex.h
15217+++ b/arch/x86/include/asm/futex.h
15218@@ -12,6 +12,7 @@
15219 #include <asm/smap.h>
15220
15221 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15222+ typecheck(u32 __user *, uaddr); \
15223 asm volatile("\t" ASM_STAC "\n" \
15224 "1:\t" insn "\n" \
15225 "2:\t" ASM_CLAC "\n" \
15226@@ -20,15 +21,16 @@
15227 "\tjmp\t2b\n" \
15228 "\t.previous\n" \
15229 _ASM_EXTABLE(1b, 3b) \
15230- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
15231+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
15232 : "i" (-EFAULT), "0" (oparg), "1" (0))
15233
15234 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
15235+ typecheck(u32 __user *, uaddr); \
15236 asm volatile("\t" ASM_STAC "\n" \
15237 "1:\tmovl %2, %0\n" \
15238 "\tmovl\t%0, %3\n" \
15239 "\t" insn "\n" \
15240- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
15241+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
15242 "\tjnz\t1b\n" \
15243 "3:\t" ASM_CLAC "\n" \
15244 "\t.section .fixup,\"ax\"\n" \
15245@@ -38,7 +40,7 @@
15246 _ASM_EXTABLE(1b, 4b) \
15247 _ASM_EXTABLE(2b, 4b) \
15248 : "=&a" (oldval), "=&r" (ret), \
15249- "+m" (*uaddr), "=&r" (tem) \
15250+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
15251 : "r" (oparg), "i" (-EFAULT), "1" (0))
15252
15253 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15254@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15255
15256 pagefault_disable();
15257
15258+ pax_open_userland();
15259 switch (op) {
15260 case FUTEX_OP_SET:
15261- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
15262+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
15263 break;
15264 case FUTEX_OP_ADD:
15265- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
15266+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
15267 uaddr, oparg);
15268 break;
15269 case FUTEX_OP_OR:
15270@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15271 default:
15272 ret = -ENOSYS;
15273 }
15274+ pax_close_userland();
15275
15276 pagefault_enable();
15277
15278@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
15279 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
15280 return -EFAULT;
15281
15282+ pax_open_userland();
15283 asm volatile("\t" ASM_STAC "\n"
15284- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
15285+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
15286 "2:\t" ASM_CLAC "\n"
15287 "\t.section .fixup, \"ax\"\n"
15288 "3:\tmov %3, %0\n"
15289 "\tjmp 2b\n"
15290 "\t.previous\n"
15291 _ASM_EXTABLE(1b, 3b)
15292- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
15293+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
15294 : "i" (-EFAULT), "r" (newval), "1" (oldval)
15295 : "memory"
15296 );
15297+ pax_close_userland();
15298
15299 *uval = oldval;
15300 return ret;
15301diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
15302index 1da97ef..9c2ebff 100644
15303--- a/arch/x86/include/asm/hw_irq.h
15304+++ b/arch/x86/include/asm/hw_irq.h
15305@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
15306 extern void enable_IO_APIC(void);
15307
15308 /* Statistics */
15309-extern atomic_t irq_err_count;
15310-extern atomic_t irq_mis_count;
15311+extern atomic_unchecked_t irq_err_count;
15312+extern atomic_unchecked_t irq_mis_count;
15313
15314 /* EISA */
15315 extern void eisa_set_level_irq(unsigned int irq);
15316diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
15317index a203659..9889f1c 100644
15318--- a/arch/x86/include/asm/i8259.h
15319+++ b/arch/x86/include/asm/i8259.h
15320@@ -62,7 +62,7 @@ struct legacy_pic {
15321 void (*init)(int auto_eoi);
15322 int (*irq_pending)(unsigned int irq);
15323 void (*make_irq)(unsigned int irq);
15324-};
15325+} __do_const;
15326
15327 extern struct legacy_pic *legacy_pic;
15328 extern struct legacy_pic null_legacy_pic;
15329diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
15330index d8e8eef..1765f78 100644
15331--- a/arch/x86/include/asm/io.h
15332+++ b/arch/x86/include/asm/io.h
15333@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
15334 "m" (*(volatile type __force *)addr) barrier); }
15335
15336 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
15337-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
15338-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
15339+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
15340+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
15341
15342 build_mmio_read(__readb, "b", unsigned char, "=q", )
15343-build_mmio_read(__readw, "w", unsigned short, "=r", )
15344-build_mmio_read(__readl, "l", unsigned int, "=r", )
15345+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
15346+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
15347
15348 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
15349 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
15350@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
15351 return ioremap_nocache(offset, size);
15352 }
15353
15354-extern void iounmap(volatile void __iomem *addr);
15355+extern void iounmap(const volatile void __iomem *addr);
15356
15357 extern void set_iounmap_nonlazy(void);
15358
15359@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
15360
15361 #include <linux/vmalloc.h>
15362
15363+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
15364+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
15365+{
15366+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
15367+}
15368+
15369+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
15370+{
15371+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
15372+}
15373+
15374 /*
15375 * Convert a virtual cached pointer to an uncached pointer
15376 */
15377diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
15378index bba3cf8..06bc8da 100644
15379--- a/arch/x86/include/asm/irqflags.h
15380+++ b/arch/x86/include/asm/irqflags.h
15381@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
15382 sti; \
15383 sysexit
15384
15385+#define GET_CR0_INTO_RDI mov %cr0, %rdi
15386+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
15387+#define GET_CR3_INTO_RDI mov %cr3, %rdi
15388+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
15389+
15390 #else
15391 #define INTERRUPT_RETURN iret
15392 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
15393diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
15394index 5a6d287..f815789 100644
15395--- a/arch/x86/include/asm/kprobes.h
15396+++ b/arch/x86/include/asm/kprobes.h
15397@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
15398 #define RELATIVEJUMP_SIZE 5
15399 #define RELATIVECALL_OPCODE 0xe8
15400 #define RELATIVE_ADDR_SIZE 4
15401-#define MAX_STACK_SIZE 64
15402-#define MIN_STACK_SIZE(ADDR) \
15403- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
15404- THREAD_SIZE - (unsigned long)(ADDR))) \
15405- ? (MAX_STACK_SIZE) \
15406- : (((unsigned long)current_thread_info()) + \
15407- THREAD_SIZE - (unsigned long)(ADDR)))
15408+#define MAX_STACK_SIZE 64UL
15409+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
15410
15411 #define flush_insn_slot(p) do { } while (0)
15412
15413diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
15414index 2d89e39..baee879 100644
15415--- a/arch/x86/include/asm/local.h
15416+++ b/arch/x86/include/asm/local.h
15417@@ -10,33 +10,97 @@ typedef struct {
15418 atomic_long_t a;
15419 } local_t;
15420
15421+typedef struct {
15422+ atomic_long_unchecked_t a;
15423+} local_unchecked_t;
15424+
15425 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
15426
15427 #define local_read(l) atomic_long_read(&(l)->a)
15428+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
15429 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
15430+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
15431
15432 static inline void local_inc(local_t *l)
15433 {
15434- asm volatile(_ASM_INC "%0"
15435+ asm volatile(_ASM_INC "%0\n"
15436+
15437+#ifdef CONFIG_PAX_REFCOUNT
15438+ "jno 0f\n"
15439+ _ASM_DEC "%0\n"
15440+ "int $4\n0:\n"
15441+ _ASM_EXTABLE(0b, 0b)
15442+#endif
15443+
15444+ : "+m" (l->a.counter));
15445+}
15446+
15447+static inline void local_inc_unchecked(local_unchecked_t *l)
15448+{
15449+ asm volatile(_ASM_INC "%0\n"
15450 : "+m" (l->a.counter));
15451 }
15452
15453 static inline void local_dec(local_t *l)
15454 {
15455- asm volatile(_ASM_DEC "%0"
15456+ asm volatile(_ASM_DEC "%0\n"
15457+
15458+#ifdef CONFIG_PAX_REFCOUNT
15459+ "jno 0f\n"
15460+ _ASM_INC "%0\n"
15461+ "int $4\n0:\n"
15462+ _ASM_EXTABLE(0b, 0b)
15463+#endif
15464+
15465+ : "+m" (l->a.counter));
15466+}
15467+
15468+static inline void local_dec_unchecked(local_unchecked_t *l)
15469+{
15470+ asm volatile(_ASM_DEC "%0\n"
15471 : "+m" (l->a.counter));
15472 }
15473
15474 static inline void local_add(long i, local_t *l)
15475 {
15476- asm volatile(_ASM_ADD "%1,%0"
15477+ asm volatile(_ASM_ADD "%1,%0\n"
15478+
15479+#ifdef CONFIG_PAX_REFCOUNT
15480+ "jno 0f\n"
15481+ _ASM_SUB "%1,%0\n"
15482+ "int $4\n0:\n"
15483+ _ASM_EXTABLE(0b, 0b)
15484+#endif
15485+
15486+ : "+m" (l->a.counter)
15487+ : "ir" (i));
15488+}
15489+
15490+static inline void local_add_unchecked(long i, local_unchecked_t *l)
15491+{
15492+ asm volatile(_ASM_ADD "%1,%0\n"
15493 : "+m" (l->a.counter)
15494 : "ir" (i));
15495 }
15496
15497 static inline void local_sub(long i, local_t *l)
15498 {
15499- asm volatile(_ASM_SUB "%1,%0"
15500+ asm volatile(_ASM_SUB "%1,%0\n"
15501+
15502+#ifdef CONFIG_PAX_REFCOUNT
15503+ "jno 0f\n"
15504+ _ASM_ADD "%1,%0\n"
15505+ "int $4\n0:\n"
15506+ _ASM_EXTABLE(0b, 0b)
15507+#endif
15508+
15509+ : "+m" (l->a.counter)
15510+ : "ir" (i));
15511+}
15512+
15513+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
15514+{
15515+ asm volatile(_ASM_SUB "%1,%0\n"
15516 : "+m" (l->a.counter)
15517 : "ir" (i));
15518 }
15519@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
15520 {
15521 unsigned char c;
15522
15523- asm volatile(_ASM_SUB "%2,%0; sete %1"
15524+ asm volatile(_ASM_SUB "%2,%0\n"
15525+
15526+#ifdef CONFIG_PAX_REFCOUNT
15527+ "jno 0f\n"
15528+ _ASM_ADD "%2,%0\n"
15529+ "int $4\n0:\n"
15530+ _ASM_EXTABLE(0b, 0b)
15531+#endif
15532+
15533+ "sete %1\n"
15534 : "+m" (l->a.counter), "=qm" (c)
15535 : "ir" (i) : "memory");
15536 return c;
15537@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
15538 {
15539 unsigned char c;
15540
15541- asm volatile(_ASM_DEC "%0; sete %1"
15542+ asm volatile(_ASM_DEC "%0\n"
15543+
15544+#ifdef CONFIG_PAX_REFCOUNT
15545+ "jno 0f\n"
15546+ _ASM_INC "%0\n"
15547+ "int $4\n0:\n"
15548+ _ASM_EXTABLE(0b, 0b)
15549+#endif
15550+
15551+ "sete %1\n"
15552 : "+m" (l->a.counter), "=qm" (c)
15553 : : "memory");
15554 return c != 0;
15555@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
15556 {
15557 unsigned char c;
15558
15559- asm volatile(_ASM_INC "%0; sete %1"
15560+ asm volatile(_ASM_INC "%0\n"
15561+
15562+#ifdef CONFIG_PAX_REFCOUNT
15563+ "jno 0f\n"
15564+ _ASM_DEC "%0\n"
15565+ "int $4\n0:\n"
15566+ _ASM_EXTABLE(0b, 0b)
15567+#endif
15568+
15569+ "sete %1\n"
15570 : "+m" (l->a.counter), "=qm" (c)
15571 : : "memory");
15572 return c != 0;
15573@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
15574 {
15575 unsigned char c;
15576
15577- asm volatile(_ASM_ADD "%2,%0; sets %1"
15578+ asm volatile(_ASM_ADD "%2,%0\n"
15579+
15580+#ifdef CONFIG_PAX_REFCOUNT
15581+ "jno 0f\n"
15582+ _ASM_SUB "%2,%0\n"
15583+ "int $4\n0:\n"
15584+ _ASM_EXTABLE(0b, 0b)
15585+#endif
15586+
15587+ "sets %1\n"
15588 : "+m" (l->a.counter), "=qm" (c)
15589 : "ir" (i) : "memory");
15590 return c;
15591@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
15592 static inline long local_add_return(long i, local_t *l)
15593 {
15594 long __i = i;
15595+ asm volatile(_ASM_XADD "%0, %1\n"
15596+
15597+#ifdef CONFIG_PAX_REFCOUNT
15598+ "jno 0f\n"
15599+ _ASM_MOV "%0,%1\n"
15600+ "int $4\n0:\n"
15601+ _ASM_EXTABLE(0b, 0b)
15602+#endif
15603+
15604+ : "+r" (i), "+m" (l->a.counter)
15605+ : : "memory");
15606+ return i + __i;
15607+}
15608+
15609+/**
15610+ * local_add_return_unchecked - add and return
15611+ * @i: integer value to add
15612+ * @l: pointer to type local_unchecked_t
15613+ *
15614+ * Atomically adds @i to @l and returns @i + @l
15615+ */
15616+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
15617+{
15618+ long __i = i;
15619 asm volatile(_ASM_XADD "%0, %1;"
15620 : "+r" (i), "+m" (l->a.counter)
15621 : : "memory");
15622@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
15623
15624 #define local_cmpxchg(l, o, n) \
15625 (cmpxchg_local(&((l)->a.counter), (o), (n)))
15626+#define local_cmpxchg_unchecked(l, o, n) \
15627+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
15628 /* Always has a lock prefix */
15629 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
15630
15631diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
15632new file mode 100644
15633index 0000000..2bfd3ba
15634--- /dev/null
15635+++ b/arch/x86/include/asm/mman.h
15636@@ -0,0 +1,15 @@
15637+#ifndef _X86_MMAN_H
15638+#define _X86_MMAN_H
15639+
15640+#include <uapi/asm/mman.h>
15641+
15642+#ifdef __KERNEL__
15643+#ifndef __ASSEMBLY__
15644+#ifdef CONFIG_X86_32
15645+#define arch_mmap_check i386_mmap_check
15646+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
15647+#endif
15648+#endif
15649+#endif
15650+
15651+#endif /* X86_MMAN_H */
15652diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
15653index 5f55e69..e20bfb1 100644
15654--- a/arch/x86/include/asm/mmu.h
15655+++ b/arch/x86/include/asm/mmu.h
15656@@ -9,7 +9,7 @@
15657 * we put the segment information here.
15658 */
15659 typedef struct {
15660- void *ldt;
15661+ struct desc_struct *ldt;
15662 int size;
15663
15664 #ifdef CONFIG_X86_64
15665@@ -18,7 +18,19 @@ typedef struct {
15666 #endif
15667
15668 struct mutex lock;
15669- void *vdso;
15670+ unsigned long vdso;
15671+
15672+#ifdef CONFIG_X86_32
15673+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15674+ unsigned long user_cs_base;
15675+ unsigned long user_cs_limit;
15676+
15677+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15678+ cpumask_t cpu_user_cs_mask;
15679+#endif
15680+
15681+#endif
15682+#endif
15683 } mm_context_t;
15684
15685 #ifdef CONFIG_SMP
15686diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
15687index cdbf367..4c73c9e 100644
15688--- a/arch/x86/include/asm/mmu_context.h
15689+++ b/arch/x86/include/asm/mmu_context.h
15690@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
15691
15692 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
15693 {
15694+
15695+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15696+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
15697+ unsigned int i;
15698+ pgd_t *pgd;
15699+
15700+ pax_open_kernel();
15701+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
15702+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
15703+ set_pgd_batched(pgd+i, native_make_pgd(0));
15704+ pax_close_kernel();
15705+ }
15706+#endif
15707+
15708 #ifdef CONFIG_SMP
15709 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
15710 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
15711@@ -34,16 +48,55 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15712 struct task_struct *tsk)
15713 {
15714 unsigned cpu = smp_processor_id();
15715+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15716+ int tlbstate = TLBSTATE_OK;
15717+#endif
15718
15719 if (likely(prev != next)) {
15720 #ifdef CONFIG_SMP
15721+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15722+ tlbstate = this_cpu_read(cpu_tlbstate.state);
15723+#endif
15724 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15725 this_cpu_write(cpu_tlbstate.active_mm, next);
15726 #endif
15727 cpumask_set_cpu(cpu, mm_cpumask(next));
15728
15729 /* Re-load page tables */
15730+#ifdef CONFIG_PAX_PER_CPU_PGD
15731+ pax_open_kernel();
15732+
15733+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15734+ if (static_cpu_has(X86_FEATURE_PCID))
15735+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15736+ else
15737+#endif
15738+
15739+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15740+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15741+ pax_close_kernel();
15742+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15743+
15744+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15745+ if (static_cpu_has(X86_FEATURE_PCID)) {
15746+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15747+ unsigned long descriptor[2];
15748+ descriptor[0] = PCID_USER;
15749+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15750+ } else {
15751+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15752+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15753+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15754+ else
15755+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15756+ }
15757+ } else
15758+#endif
15759+
15760+ load_cr3(get_cpu_pgd(cpu, kernel));
15761+#else
15762 load_cr3(next->pgd);
15763+#endif
15764
15765 /* stop flush ipis for the previous mm */
15766 cpumask_clear_cpu(cpu, mm_cpumask(prev));
15767@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15768 */
15769 if (unlikely(prev->context.ldt != next->context.ldt))
15770 load_LDT_nolock(&next->context);
15771- }
15772+
15773+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15774+ if (!(__supported_pte_mask & _PAGE_NX)) {
15775+ smp_mb__before_clear_bit();
15776+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
15777+ smp_mb__after_clear_bit();
15778+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15779+ }
15780+#endif
15781+
15782+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15783+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
15784+ prev->context.user_cs_limit != next->context.user_cs_limit))
15785+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15786 #ifdef CONFIG_SMP
15787+ else if (unlikely(tlbstate != TLBSTATE_OK))
15788+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15789+#endif
15790+#endif
15791+
15792+ }
15793 else {
15794+
15795+#ifdef CONFIG_PAX_PER_CPU_PGD
15796+ pax_open_kernel();
15797+
15798+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15799+ if (static_cpu_has(X86_FEATURE_PCID))
15800+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15801+ else
15802+#endif
15803+
15804+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15805+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15806+ pax_close_kernel();
15807+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15808+
15809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15810+ if (static_cpu_has(X86_FEATURE_PCID)) {
15811+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15812+ unsigned long descriptor[2];
15813+ descriptor[0] = PCID_USER;
15814+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15815+ } else {
15816+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15817+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15818+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15819+ else
15820+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15821+ }
15822+ } else
15823+#endif
15824+
15825+ load_cr3(get_cpu_pgd(cpu, kernel));
15826+#endif
15827+
15828+#ifdef CONFIG_SMP
15829 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15830 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
15831
15832@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15833 * tlb flush IPI delivery. We must reload CR3
15834 * to make sure to use no freed page tables.
15835 */
15836+
15837+#ifndef CONFIG_PAX_PER_CPU_PGD
15838 load_cr3(next->pgd);
15839+#endif
15840+
15841 load_LDT_nolock(&next->context);
15842+
15843+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15844+ if (!(__supported_pte_mask & _PAGE_NX))
15845+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15846+#endif
15847+
15848+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15849+#ifdef CONFIG_PAX_PAGEEXEC
15850+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
15851+#endif
15852+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15853+#endif
15854+
15855 }
15856+#endif
15857 }
15858-#endif
15859 }
15860
15861 #define activate_mm(prev, next) \
15862diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
15863index e3b7819..b257c64 100644
15864--- a/arch/x86/include/asm/module.h
15865+++ b/arch/x86/include/asm/module.h
15866@@ -5,6 +5,7 @@
15867
15868 #ifdef CONFIG_X86_64
15869 /* X86_64 does not define MODULE_PROC_FAMILY */
15870+#define MODULE_PROC_FAMILY ""
15871 #elif defined CONFIG_M486
15872 #define MODULE_PROC_FAMILY "486 "
15873 #elif defined CONFIG_M586
15874@@ -57,8 +58,20 @@
15875 #error unknown processor family
15876 #endif
15877
15878-#ifdef CONFIG_X86_32
15879-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
15880+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15881+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
15882+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
15883+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
15884+#else
15885+#define MODULE_PAX_KERNEXEC ""
15886 #endif
15887
15888+#ifdef CONFIG_PAX_MEMORY_UDEREF
15889+#define MODULE_PAX_UDEREF "UDEREF "
15890+#else
15891+#define MODULE_PAX_UDEREF ""
15892+#endif
15893+
15894+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
15895+
15896 #endif /* _ASM_X86_MODULE_H */
15897diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
15898index 86f9301..b365cda 100644
15899--- a/arch/x86/include/asm/nmi.h
15900+++ b/arch/x86/include/asm/nmi.h
15901@@ -40,11 +40,11 @@ struct nmiaction {
15902 nmi_handler_t handler;
15903 unsigned long flags;
15904 const char *name;
15905-};
15906+} __do_const;
15907
15908 #define register_nmi_handler(t, fn, fg, n, init...) \
15909 ({ \
15910- static struct nmiaction init fn##_na = { \
15911+ static const struct nmiaction init fn##_na = { \
15912 .handler = (fn), \
15913 .name = (n), \
15914 .flags = (fg), \
15915@@ -52,7 +52,7 @@ struct nmiaction {
15916 __register_nmi_handler((t), &fn##_na); \
15917 })
15918
15919-int __register_nmi_handler(unsigned int, struct nmiaction *);
15920+int __register_nmi_handler(unsigned int, const struct nmiaction *);
15921
15922 void unregister_nmi_handler(unsigned int, const char *);
15923
15924diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
15925index c878924..21f4889 100644
15926--- a/arch/x86/include/asm/page.h
15927+++ b/arch/x86/include/asm/page.h
15928@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
15929 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
15930
15931 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
15932+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
15933
15934 #define __boot_va(x) __va(x)
15935 #define __boot_pa(x) __pa(x)
15936diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
15937index 0f1ddee..e2fc3d1 100644
15938--- a/arch/x86/include/asm/page_64.h
15939+++ b/arch/x86/include/asm/page_64.h
15940@@ -7,9 +7,9 @@
15941
15942 /* duplicated to the one in bootmem.h */
15943 extern unsigned long max_pfn;
15944-extern unsigned long phys_base;
15945+extern const unsigned long phys_base;
15946
15947-static inline unsigned long __phys_addr_nodebug(unsigned long x)
15948+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
15949 {
15950 unsigned long y = x - __START_KERNEL_map;
15951
15952diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
15953index cfdc9ee..3f7b5d6 100644
15954--- a/arch/x86/include/asm/paravirt.h
15955+++ b/arch/x86/include/asm/paravirt.h
15956@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
15957 return (pmd_t) { ret };
15958 }
15959
15960-static inline pmdval_t pmd_val(pmd_t pmd)
15961+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
15962 {
15963 pmdval_t ret;
15964
15965@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15966 val);
15967 }
15968
15969+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15970+{
15971+ pgdval_t val = native_pgd_val(pgd);
15972+
15973+ if (sizeof(pgdval_t) > sizeof(long))
15974+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
15975+ val, (u64)val >> 32);
15976+ else
15977+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
15978+ val);
15979+}
15980+
15981 static inline void pgd_clear(pgd_t *pgdp)
15982 {
15983 set_pgd(pgdp, __pgd(0));
15984@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
15985 pv_mmu_ops.set_fixmap(idx, phys, flags);
15986 }
15987
15988+#ifdef CONFIG_PAX_KERNEXEC
15989+static inline unsigned long pax_open_kernel(void)
15990+{
15991+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
15992+}
15993+
15994+static inline unsigned long pax_close_kernel(void)
15995+{
15996+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
15997+}
15998+#else
15999+static inline unsigned long pax_open_kernel(void) { return 0; }
16000+static inline unsigned long pax_close_kernel(void) { return 0; }
16001+#endif
16002+
16003 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
16004
16005 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
16006@@ -926,7 +953,7 @@ extern void default_banner(void);
16007
16008 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
16009 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
16010-#define PARA_INDIRECT(addr) *%cs:addr
16011+#define PARA_INDIRECT(addr) *%ss:addr
16012 #endif
16013
16014 #define INTERRUPT_RETURN \
16015@@ -1001,6 +1028,21 @@ extern void default_banner(void);
16016 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16017 CLBR_NONE, \
16018 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16019+
16020+#define GET_CR0_INTO_RDI \
16021+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16022+ mov %rax,%rdi
16023+
16024+#define SET_RDI_INTO_CR0 \
16025+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16026+
16027+#define GET_CR3_INTO_RDI \
16028+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16029+ mov %rax,%rdi
16030+
16031+#define SET_RDI_INTO_CR3 \
16032+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16033+
16034 #endif /* CONFIG_X86_32 */
16035
16036 #endif /* __ASSEMBLY__ */
16037diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16038index 0db1fca..52310cc 100644
16039--- a/arch/x86/include/asm/paravirt_types.h
16040+++ b/arch/x86/include/asm/paravirt_types.h
16041@@ -84,7 +84,7 @@ struct pv_init_ops {
16042 */
16043 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16044 unsigned long addr, unsigned len);
16045-};
16046+} __no_const;
16047
16048
16049 struct pv_lazy_ops {
16050@@ -98,7 +98,7 @@ struct pv_time_ops {
16051 unsigned long long (*sched_clock)(void);
16052 unsigned long long (*steal_clock)(int cpu);
16053 unsigned long (*get_tsc_khz)(void);
16054-};
16055+} __no_const;
16056
16057 struct pv_cpu_ops {
16058 /* hooks for various privileged instructions */
16059@@ -192,7 +192,7 @@ struct pv_cpu_ops {
16060
16061 void (*start_context_switch)(struct task_struct *prev);
16062 void (*end_context_switch)(struct task_struct *next);
16063-};
16064+} __no_const;
16065
16066 struct pv_irq_ops {
16067 /*
16068@@ -223,7 +223,7 @@ struct pv_apic_ops {
16069 unsigned long start_eip,
16070 unsigned long start_esp);
16071 #endif
16072-};
16073+} __no_const;
16074
16075 struct pv_mmu_ops {
16076 unsigned long (*read_cr2)(void);
16077@@ -313,6 +313,7 @@ struct pv_mmu_ops {
16078 struct paravirt_callee_save make_pud;
16079
16080 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16081+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16082 #endif /* PAGETABLE_LEVELS == 4 */
16083 #endif /* PAGETABLE_LEVELS >= 3 */
16084
16085@@ -324,6 +325,12 @@ struct pv_mmu_ops {
16086 an mfn. We can tell which is which from the index. */
16087 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16088 phys_addr_t phys, pgprot_t flags);
16089+
16090+#ifdef CONFIG_PAX_KERNEXEC
16091+ unsigned long (*pax_open_kernel)(void);
16092+ unsigned long (*pax_close_kernel)(void);
16093+#endif
16094+
16095 };
16096
16097 struct arch_spinlock;
16098@@ -334,7 +341,7 @@ struct pv_lock_ops {
16099 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
16100 int (*spin_trylock)(struct arch_spinlock *lock);
16101 void (*spin_unlock)(struct arch_spinlock *lock);
16102-};
16103+} __no_const;
16104
16105 /* This contains all the paravirt structures: we get a convenient
16106 * number for each function using the offset which we use to indicate
16107diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16108index b4389a4..7024269 100644
16109--- a/arch/x86/include/asm/pgalloc.h
16110+++ b/arch/x86/include/asm/pgalloc.h
16111@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16112 pmd_t *pmd, pte_t *pte)
16113 {
16114 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16115+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16116+}
16117+
16118+static inline void pmd_populate_user(struct mm_struct *mm,
16119+ pmd_t *pmd, pte_t *pte)
16120+{
16121+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16122 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16123 }
16124
16125@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16126
16127 #ifdef CONFIG_X86_PAE
16128 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16129+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16130+{
16131+ pud_populate(mm, pudp, pmd);
16132+}
16133 #else /* !CONFIG_X86_PAE */
16134 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16135 {
16136 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16137 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16138 }
16139+
16140+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16141+{
16142+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16143+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
16144+}
16145 #endif /* CONFIG_X86_PAE */
16146
16147 #if PAGETABLE_LEVELS > 3
16148@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16149 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
16150 }
16151
16152+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16153+{
16154+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
16155+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
16156+}
16157+
16158 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
16159 {
16160 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
16161diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
16162index f2b489c..4f7e2e5 100644
16163--- a/arch/x86/include/asm/pgtable-2level.h
16164+++ b/arch/x86/include/asm/pgtable-2level.h
16165@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
16166
16167 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16168 {
16169+ pax_open_kernel();
16170 *pmdp = pmd;
16171+ pax_close_kernel();
16172 }
16173
16174 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16175diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
16176index 4cc9f2b..5fd9226 100644
16177--- a/arch/x86/include/asm/pgtable-3level.h
16178+++ b/arch/x86/include/asm/pgtable-3level.h
16179@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16180
16181 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16182 {
16183+ pax_open_kernel();
16184 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
16185+ pax_close_kernel();
16186 }
16187
16188 static inline void native_set_pud(pud_t *pudp, pud_t pud)
16189 {
16190+ pax_open_kernel();
16191 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
16192+ pax_close_kernel();
16193 }
16194
16195 /*
16196diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
16197index 1e67223..92a9585 100644
16198--- a/arch/x86/include/asm/pgtable.h
16199+++ b/arch/x86/include/asm/pgtable.h
16200@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
16201
16202 #ifndef __PAGETABLE_PUD_FOLDED
16203 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
16204+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
16205 #define pgd_clear(pgd) native_pgd_clear(pgd)
16206 #endif
16207
16208@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
16209
16210 #define arch_end_context_switch(prev) do {} while(0)
16211
16212+#define pax_open_kernel() native_pax_open_kernel()
16213+#define pax_close_kernel() native_pax_close_kernel()
16214 #endif /* CONFIG_PARAVIRT */
16215
16216+#define __HAVE_ARCH_PAX_OPEN_KERNEL
16217+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
16218+
16219+#ifdef CONFIG_PAX_KERNEXEC
16220+static inline unsigned long native_pax_open_kernel(void)
16221+{
16222+ unsigned long cr0;
16223+
16224+ preempt_disable();
16225+ barrier();
16226+ cr0 = read_cr0() ^ X86_CR0_WP;
16227+ BUG_ON(cr0 & X86_CR0_WP);
16228+ write_cr0(cr0);
16229+ return cr0 ^ X86_CR0_WP;
16230+}
16231+
16232+static inline unsigned long native_pax_close_kernel(void)
16233+{
16234+ unsigned long cr0;
16235+
16236+ cr0 = read_cr0() ^ X86_CR0_WP;
16237+ BUG_ON(!(cr0 & X86_CR0_WP));
16238+ write_cr0(cr0);
16239+ barrier();
16240+ preempt_enable_no_resched();
16241+ return cr0 ^ X86_CR0_WP;
16242+}
16243+#else
16244+static inline unsigned long native_pax_open_kernel(void) { return 0; }
16245+static inline unsigned long native_pax_close_kernel(void) { return 0; }
16246+#endif
16247+
16248 /*
16249 * The following only work if pte_present() is true.
16250 * Undefined behaviour if not..
16251 */
16252+static inline int pte_user(pte_t pte)
16253+{
16254+ return pte_val(pte) & _PAGE_USER;
16255+}
16256+
16257 static inline int pte_dirty(pte_t pte)
16258 {
16259 return pte_flags(pte) & _PAGE_DIRTY;
16260@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
16261 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
16262 }
16263
16264+static inline unsigned long pgd_pfn(pgd_t pgd)
16265+{
16266+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
16267+}
16268+
16269 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
16270
16271 static inline int pmd_large(pmd_t pte)
16272@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
16273 return pte_clear_flags(pte, _PAGE_RW);
16274 }
16275
16276+static inline pte_t pte_mkread(pte_t pte)
16277+{
16278+ return __pte(pte_val(pte) | _PAGE_USER);
16279+}
16280+
16281 static inline pte_t pte_mkexec(pte_t pte)
16282 {
16283- return pte_clear_flags(pte, _PAGE_NX);
16284+#ifdef CONFIG_X86_PAE
16285+ if (__supported_pte_mask & _PAGE_NX)
16286+ return pte_clear_flags(pte, _PAGE_NX);
16287+ else
16288+#endif
16289+ return pte_set_flags(pte, _PAGE_USER);
16290+}
16291+
16292+static inline pte_t pte_exprotect(pte_t pte)
16293+{
16294+#ifdef CONFIG_X86_PAE
16295+ if (__supported_pte_mask & _PAGE_NX)
16296+ return pte_set_flags(pte, _PAGE_NX);
16297+ else
16298+#endif
16299+ return pte_clear_flags(pte, _PAGE_USER);
16300 }
16301
16302 static inline pte_t pte_mkdirty(pte_t pte)
16303@@ -394,6 +459,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
16304 #endif
16305
16306 #ifndef __ASSEMBLY__
16307+
16308+#ifdef CONFIG_PAX_PER_CPU_PGD
16309+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
16310+enum cpu_pgd_type {kernel = 0, user = 1};
16311+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
16312+{
16313+ return cpu_pgd[cpu][type];
16314+}
16315+#endif
16316+
16317 #include <linux/mm_types.h>
16318 #include <linux/log2.h>
16319
16320@@ -529,7 +604,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
16321 * Currently stuck as a macro due to indirect forward reference to
16322 * linux/mmzone.h's __section_mem_map_addr() definition:
16323 */
16324-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
16325+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
16326
16327 /* Find an entry in the second-level page table.. */
16328 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
16329@@ -569,7 +644,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
16330 * Currently stuck as a macro due to indirect forward reference to
16331 * linux/mmzone.h's __section_mem_map_addr() definition:
16332 */
16333-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
16334+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
16335
16336 /* to find an entry in a page-table-directory. */
16337 static inline unsigned long pud_index(unsigned long address)
16338@@ -584,7 +659,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
16339
16340 static inline int pgd_bad(pgd_t pgd)
16341 {
16342- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
16343+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
16344 }
16345
16346 static inline int pgd_none(pgd_t pgd)
16347@@ -607,7 +682,12 @@ static inline int pgd_none(pgd_t pgd)
16348 * pgd_offset() returns a (pgd_t *)
16349 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
16350 */
16351-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
16352+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
16353+
16354+#ifdef CONFIG_PAX_PER_CPU_PGD
16355+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
16356+#endif
16357+
16358 /*
16359 * a shortcut which implies the use of the kernel's pgd, instead
16360 * of a process's
16361@@ -618,6 +698,23 @@ static inline int pgd_none(pgd_t pgd)
16362 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
16363 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
16364
16365+#ifdef CONFIG_X86_32
16366+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
16367+#else
16368+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
16369+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
16370+
16371+#ifdef CONFIG_PAX_MEMORY_UDEREF
16372+#ifdef __ASSEMBLY__
16373+#define pax_user_shadow_base pax_user_shadow_base(%rip)
16374+#else
16375+extern unsigned long pax_user_shadow_base;
16376+extern pgdval_t clone_pgd_mask;
16377+#endif
16378+#endif
16379+
16380+#endif
16381+
16382 #ifndef __ASSEMBLY__
16383
16384 extern int direct_gbpages;
16385@@ -784,11 +881,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
16386 * dst and src can be on the same page, but the range must not overlap,
16387 * and must not cross a page boundary.
16388 */
16389-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
16390+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
16391 {
16392- memcpy(dst, src, count * sizeof(pgd_t));
16393+ pax_open_kernel();
16394+ while (count--)
16395+ *dst++ = *src++;
16396+ pax_close_kernel();
16397 }
16398
16399+#ifdef CONFIG_PAX_PER_CPU_PGD
16400+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
16401+#endif
16402+
16403+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16404+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
16405+#else
16406+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
16407+#endif
16408+
16409 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
16410 static inline int page_level_shift(enum pg_level level)
16411 {
16412diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
16413index 9ee3221..b979c6b 100644
16414--- a/arch/x86/include/asm/pgtable_32.h
16415+++ b/arch/x86/include/asm/pgtable_32.h
16416@@ -25,9 +25,6 @@
16417 struct mm_struct;
16418 struct vm_area_struct;
16419
16420-extern pgd_t swapper_pg_dir[1024];
16421-extern pgd_t initial_page_table[1024];
16422-
16423 static inline void pgtable_cache_init(void) { }
16424 static inline void check_pgt_cache(void) { }
16425 void paging_init(void);
16426@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
16427 # include <asm/pgtable-2level.h>
16428 #endif
16429
16430+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
16431+extern pgd_t initial_page_table[PTRS_PER_PGD];
16432+#ifdef CONFIG_X86_PAE
16433+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
16434+#endif
16435+
16436 #if defined(CONFIG_HIGHPTE)
16437 #define pte_offset_map(dir, address) \
16438 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
16439@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
16440 /* Clear a kernel PTE and flush it from the TLB */
16441 #define kpte_clear_flush(ptep, vaddr) \
16442 do { \
16443+ pax_open_kernel(); \
16444 pte_clear(&init_mm, (vaddr), (ptep)); \
16445+ pax_close_kernel(); \
16446 __flush_tlb_one((vaddr)); \
16447 } while (0)
16448
16449 #endif /* !__ASSEMBLY__ */
16450
16451+#define HAVE_ARCH_UNMAPPED_AREA
16452+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
16453+
16454 /*
16455 * kern_addr_valid() is (1) for FLATMEM and (0) for
16456 * SPARSEMEM and DISCONTIGMEM
16457diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
16458index ed5903b..c7fe163 100644
16459--- a/arch/x86/include/asm/pgtable_32_types.h
16460+++ b/arch/x86/include/asm/pgtable_32_types.h
16461@@ -8,7 +8,7 @@
16462 */
16463 #ifdef CONFIG_X86_PAE
16464 # include <asm/pgtable-3level_types.h>
16465-# define PMD_SIZE (1UL << PMD_SHIFT)
16466+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
16467 # define PMD_MASK (~(PMD_SIZE - 1))
16468 #else
16469 # include <asm/pgtable-2level_types.h>
16470@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
16471 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
16472 #endif
16473
16474+#ifdef CONFIG_PAX_KERNEXEC
16475+#ifndef __ASSEMBLY__
16476+extern unsigned char MODULES_EXEC_VADDR[];
16477+extern unsigned char MODULES_EXEC_END[];
16478+#endif
16479+#include <asm/boot.h>
16480+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
16481+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
16482+#else
16483+#define ktla_ktva(addr) (addr)
16484+#define ktva_ktla(addr) (addr)
16485+#endif
16486+
16487 #define MODULES_VADDR VMALLOC_START
16488 #define MODULES_END VMALLOC_END
16489 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
16490diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
16491index e22c1db..23a625a 100644
16492--- a/arch/x86/include/asm/pgtable_64.h
16493+++ b/arch/x86/include/asm/pgtable_64.h
16494@@ -16,10 +16,14 @@
16495
16496 extern pud_t level3_kernel_pgt[512];
16497 extern pud_t level3_ident_pgt[512];
16498+extern pud_t level3_vmalloc_start_pgt[512];
16499+extern pud_t level3_vmalloc_end_pgt[512];
16500+extern pud_t level3_vmemmap_pgt[512];
16501+extern pud_t level2_vmemmap_pgt[512];
16502 extern pmd_t level2_kernel_pgt[512];
16503 extern pmd_t level2_fixmap_pgt[512];
16504-extern pmd_t level2_ident_pgt[512];
16505-extern pgd_t init_level4_pgt[];
16506+extern pmd_t level2_ident_pgt[512*2];
16507+extern pgd_t init_level4_pgt[512];
16508
16509 #define swapper_pg_dir init_level4_pgt
16510
16511@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16512
16513 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16514 {
16515+ pax_open_kernel();
16516 *pmdp = pmd;
16517+ pax_close_kernel();
16518 }
16519
16520 static inline void native_pmd_clear(pmd_t *pmd)
16521@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
16522
16523 static inline void native_set_pud(pud_t *pudp, pud_t pud)
16524 {
16525+ pax_open_kernel();
16526 *pudp = pud;
16527+ pax_close_kernel();
16528 }
16529
16530 static inline void native_pud_clear(pud_t *pud)
16531@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
16532
16533 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
16534 {
16535+ pax_open_kernel();
16536+ *pgdp = pgd;
16537+ pax_close_kernel();
16538+}
16539+
16540+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16541+{
16542 *pgdp = pgd;
16543 }
16544
16545diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
16546index 2d88344..4679fc3 100644
16547--- a/arch/x86/include/asm/pgtable_64_types.h
16548+++ b/arch/x86/include/asm/pgtable_64_types.h
16549@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
16550 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
16551 #define MODULES_END _AC(0xffffffffff000000, UL)
16552 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
16553+#define MODULES_EXEC_VADDR MODULES_VADDR
16554+#define MODULES_EXEC_END MODULES_END
16555+
16556+#define ktla_ktva(addr) (addr)
16557+#define ktva_ktla(addr) (addr)
16558
16559 #define EARLY_DYNAMIC_PAGE_TABLES 64
16560
16561diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
16562index e642300..0ef8f31 100644
16563--- a/arch/x86/include/asm/pgtable_types.h
16564+++ b/arch/x86/include/asm/pgtable_types.h
16565@@ -16,13 +16,12 @@
16566 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16567 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
16568 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
16569-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
16570+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
16571 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
16572 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
16573 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
16574-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
16575-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
16576-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
16577+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
16578+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
16579 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
16580
16581 /* If _PAGE_BIT_PRESENT is clear, we use these: */
16582@@ -40,7 +39,6 @@
16583 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
16584 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
16585 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
16586-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
16587 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
16588 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
16589 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
16590@@ -57,8 +55,10 @@
16591
16592 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
16593 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
16594-#else
16595+#elif defined(CONFIG_KMEMCHECK)
16596 #define _PAGE_NX (_AT(pteval_t, 0))
16597+#else
16598+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
16599 #endif
16600
16601 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
16602@@ -116,6 +116,9 @@
16603 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
16604 _PAGE_ACCESSED)
16605
16606+#define PAGE_READONLY_NOEXEC PAGE_READONLY
16607+#define PAGE_SHARED_NOEXEC PAGE_SHARED
16608+
16609 #define __PAGE_KERNEL_EXEC \
16610 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
16611 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
16612@@ -126,7 +129,7 @@
16613 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
16614 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
16615 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
16616-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
16617+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
16618 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
16619 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
16620 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
16621@@ -188,8 +191,8 @@
16622 * bits are combined, this will alow user to access the high address mapped
16623 * VDSO in the presence of CONFIG_COMPAT_VDSO
16624 */
16625-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
16626-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
16627+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
16628+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
16629 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
16630 #endif
16631
16632@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
16633 {
16634 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
16635 }
16636+#endif
16637
16638+#if PAGETABLE_LEVELS == 3
16639+#include <asm-generic/pgtable-nopud.h>
16640+#endif
16641+
16642+#if PAGETABLE_LEVELS == 2
16643+#include <asm-generic/pgtable-nopmd.h>
16644+#endif
16645+
16646+#ifndef __ASSEMBLY__
16647 #if PAGETABLE_LEVELS > 3
16648 typedef struct { pudval_t pud; } pud_t;
16649
16650@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
16651 return pud.pud;
16652 }
16653 #else
16654-#include <asm-generic/pgtable-nopud.h>
16655-
16656 static inline pudval_t native_pud_val(pud_t pud)
16657 {
16658 return native_pgd_val(pud.pgd);
16659@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
16660 return pmd.pmd;
16661 }
16662 #else
16663-#include <asm-generic/pgtable-nopmd.h>
16664-
16665 static inline pmdval_t native_pmd_val(pmd_t pmd)
16666 {
16667 return native_pgd_val(pmd.pud.pgd);
16668@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
16669
16670 extern pteval_t __supported_pte_mask;
16671 extern void set_nx(void);
16672-extern int nx_enabled;
16673
16674 #define pgprot_writecombine pgprot_writecombine
16675 extern pgprot_t pgprot_writecombine(pgprot_t prot);
16676diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
16677index 22224b3..b3a2f90 100644
16678--- a/arch/x86/include/asm/processor.h
16679+++ b/arch/x86/include/asm/processor.h
16680@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
16681 : "memory");
16682 }
16683
16684+/* invpcid (%rdx),%rax */
16685+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
16686+
16687+#define INVPCID_SINGLE_ADDRESS 0UL
16688+#define INVPCID_SINGLE_CONTEXT 1UL
16689+#define INVPCID_ALL_GLOBAL 2UL
16690+#define INVPCID_ALL_MONGLOBAL 3UL
16691+
16692+#define PCID_KERNEL 0UL
16693+#define PCID_USER 1UL
16694+#define PCID_NOFLUSH (1UL << 63)
16695+
16696 static inline void load_cr3(pgd_t *pgdir)
16697 {
16698- write_cr3(__pa(pgdir));
16699+ write_cr3(__pa(pgdir) | PCID_KERNEL);
16700 }
16701
16702 #ifdef CONFIG_X86_32
16703@@ -282,7 +294,7 @@ struct tss_struct {
16704
16705 } ____cacheline_aligned;
16706
16707-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
16708+extern struct tss_struct init_tss[NR_CPUS];
16709
16710 /*
16711 * Save the original ist values for checking stack pointers during debugging
16712@@ -452,6 +464,7 @@ struct thread_struct {
16713 unsigned short ds;
16714 unsigned short fsindex;
16715 unsigned short gsindex;
16716+ unsigned short ss;
16717 #endif
16718 #ifdef CONFIG_X86_32
16719 unsigned long ip;
16720@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
16721 extern unsigned long mmu_cr4_features;
16722 extern u32 *trampoline_cr4_features;
16723
16724-static inline void set_in_cr4(unsigned long mask)
16725-{
16726- unsigned long cr4;
16727-
16728- mmu_cr4_features |= mask;
16729- if (trampoline_cr4_features)
16730- *trampoline_cr4_features = mmu_cr4_features;
16731- cr4 = read_cr4();
16732- cr4 |= mask;
16733- write_cr4(cr4);
16734-}
16735-
16736-static inline void clear_in_cr4(unsigned long mask)
16737-{
16738- unsigned long cr4;
16739-
16740- mmu_cr4_features &= ~mask;
16741- if (trampoline_cr4_features)
16742- *trampoline_cr4_features = mmu_cr4_features;
16743- cr4 = read_cr4();
16744- cr4 &= ~mask;
16745- write_cr4(cr4);
16746-}
16747+extern void set_in_cr4(unsigned long mask);
16748+extern void clear_in_cr4(unsigned long mask);
16749
16750 typedef struct {
16751 unsigned long seg;
16752@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
16753 */
16754 #define TASK_SIZE PAGE_OFFSET
16755 #define TASK_SIZE_MAX TASK_SIZE
16756+
16757+#ifdef CONFIG_PAX_SEGMEXEC
16758+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
16759+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
16760+#else
16761 #define STACK_TOP TASK_SIZE
16762-#define STACK_TOP_MAX STACK_TOP
16763+#endif
16764+
16765+#define STACK_TOP_MAX TASK_SIZE
16766
16767 #define INIT_THREAD { \
16768- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16769+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16770 .vm86_info = NULL, \
16771 .sysenter_cs = __KERNEL_CS, \
16772 .io_bitmap_ptr = NULL, \
16773@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
16774 */
16775 #define INIT_TSS { \
16776 .x86_tss = { \
16777- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16778+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16779 .ss0 = __KERNEL_DS, \
16780 .ss1 = __KERNEL_CS, \
16781 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
16782@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
16783 extern unsigned long thread_saved_pc(struct task_struct *tsk);
16784
16785 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
16786-#define KSTK_TOP(info) \
16787-({ \
16788- unsigned long *__ptr = (unsigned long *)(info); \
16789- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
16790-})
16791+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
16792
16793 /*
16794 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
16795@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16796 #define task_pt_regs(task) \
16797 ({ \
16798 struct pt_regs *__regs__; \
16799- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
16800+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
16801 __regs__ - 1; \
16802 })
16803
16804@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16805 /*
16806 * User space process size. 47bits minus one guard page.
16807 */
16808-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
16809+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
16810
16811 /* This decides where the kernel will search for a free chunk of vm
16812 * space during mmap's.
16813 */
16814 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
16815- 0xc0000000 : 0xFFFFe000)
16816+ 0xc0000000 : 0xFFFFf000)
16817
16818 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
16819 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
16820@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16821 #define STACK_TOP_MAX TASK_SIZE_MAX
16822
16823 #define INIT_THREAD { \
16824- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16825+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16826 }
16827
16828 #define INIT_TSS { \
16829- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16830+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16831 }
16832
16833 /*
16834@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
16835 */
16836 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
16837
16838+#ifdef CONFIG_PAX_SEGMEXEC
16839+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
16840+#endif
16841+
16842 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
16843
16844 /* Get/set a process' ability to use the timestamp counter instruction */
16845@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
16846 extern u16 amd_get_nb_id(int cpu);
16847
16848 struct aperfmperf {
16849- u64 aperf, mperf;
16850+ u64 aperf __intentional_overflow(0);
16851+ u64 mperf __intentional_overflow(0);
16852 };
16853
16854 static inline void get_aperfmperf(struct aperfmperf *am)
16855@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
16856 return ratio;
16857 }
16858
16859-extern unsigned long arch_align_stack(unsigned long sp);
16860+#define arch_align_stack(x) ((x) & ~0xfUL)
16861 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
16862
16863 void default_idle(void);
16864@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
16865 #define xen_set_default_idle 0
16866 #endif
16867
16868-void stop_this_cpu(void *dummy);
16869+void stop_this_cpu(void *dummy) __noreturn;
16870
16871 #endif /* _ASM_X86_PROCESSOR_H */
16872diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
16873index 942a086..6c26446 100644
16874--- a/arch/x86/include/asm/ptrace.h
16875+++ b/arch/x86/include/asm/ptrace.h
16876@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
16877 }
16878
16879 /*
16880- * user_mode_vm(regs) determines whether a register set came from user mode.
16881+ * user_mode(regs) determines whether a register set came from user mode.
16882 * This is true if V8086 mode was enabled OR if the register set was from
16883 * protected mode with RPL-3 CS value. This tricky test checks that with
16884 * one comparison. Many places in the kernel can bypass this full check
16885- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
16886+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
16887+ * be used.
16888 */
16889-static inline int user_mode(struct pt_regs *regs)
16890+static inline int user_mode_novm(struct pt_regs *regs)
16891 {
16892 #ifdef CONFIG_X86_32
16893 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
16894 #else
16895- return !!(regs->cs & 3);
16896+ return !!(regs->cs & SEGMENT_RPL_MASK);
16897 #endif
16898 }
16899
16900-static inline int user_mode_vm(struct pt_regs *regs)
16901+static inline int user_mode(struct pt_regs *regs)
16902 {
16903 #ifdef CONFIG_X86_32
16904 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
16905 USER_RPL;
16906 #else
16907- return user_mode(regs);
16908+ return user_mode_novm(regs);
16909 #endif
16910 }
16911
16912@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
16913 #ifdef CONFIG_X86_64
16914 static inline bool user_64bit_mode(struct pt_regs *regs)
16915 {
16916+ unsigned long cs = regs->cs & 0xffff;
16917 #ifndef CONFIG_PARAVIRT
16918 /*
16919 * On non-paravirt systems, this is the only long mode CPL 3
16920 * selector. We do not allow long mode selectors in the LDT.
16921 */
16922- return regs->cs == __USER_CS;
16923+ return cs == __USER_CS;
16924 #else
16925 /* Headers are too twisted for this to go in paravirt.h. */
16926- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
16927+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
16928 #endif
16929 }
16930
16931@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
16932 * Traps from the kernel do not save sp and ss.
16933 * Use the helper function to retrieve sp.
16934 */
16935- if (offset == offsetof(struct pt_regs, sp) &&
16936- regs->cs == __KERNEL_CS)
16937- return kernel_stack_pointer(regs);
16938+ if (offset == offsetof(struct pt_regs, sp)) {
16939+ unsigned long cs = regs->cs & 0xffff;
16940+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
16941+ return kernel_stack_pointer(regs);
16942+ }
16943 #endif
16944 return *(unsigned long *)((unsigned long)regs + offset);
16945 }
16946diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
16947index 9c6b890..5305f53 100644
16948--- a/arch/x86/include/asm/realmode.h
16949+++ b/arch/x86/include/asm/realmode.h
16950@@ -22,16 +22,14 @@ struct real_mode_header {
16951 #endif
16952 /* APM/BIOS reboot */
16953 u32 machine_real_restart_asm;
16954-#ifdef CONFIG_X86_64
16955 u32 machine_real_restart_seg;
16956-#endif
16957 };
16958
16959 /* This must match data at trampoline_32/64.S */
16960 struct trampoline_header {
16961 #ifdef CONFIG_X86_32
16962 u32 start;
16963- u16 gdt_pad;
16964+ u16 boot_cs;
16965 u16 gdt_limit;
16966 u32 gdt_base;
16967 #else
16968diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
16969index a82c4f1..ac45053 100644
16970--- a/arch/x86/include/asm/reboot.h
16971+++ b/arch/x86/include/asm/reboot.h
16972@@ -6,13 +6,13 @@
16973 struct pt_regs;
16974
16975 struct machine_ops {
16976- void (*restart)(char *cmd);
16977- void (*halt)(void);
16978- void (*power_off)(void);
16979+ void (* __noreturn restart)(char *cmd);
16980+ void (* __noreturn halt)(void);
16981+ void (* __noreturn power_off)(void);
16982 void (*shutdown)(void);
16983 void (*crash_shutdown)(struct pt_regs *);
16984- void (*emergency_restart)(void);
16985-};
16986+ void (* __noreturn emergency_restart)(void);
16987+} __no_const;
16988
16989 extern struct machine_ops machine_ops;
16990
16991diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
16992index cad82c9..2e5c5c1 100644
16993--- a/arch/x86/include/asm/rwsem.h
16994+++ b/arch/x86/include/asm/rwsem.h
16995@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
16996 {
16997 asm volatile("# beginning down_read\n\t"
16998 LOCK_PREFIX _ASM_INC "(%1)\n\t"
16999+
17000+#ifdef CONFIG_PAX_REFCOUNT
17001+ "jno 0f\n"
17002+ LOCK_PREFIX _ASM_DEC "(%1)\n"
17003+ "int $4\n0:\n"
17004+ _ASM_EXTABLE(0b, 0b)
17005+#endif
17006+
17007 /* adds 0x00000001 */
17008 " jns 1f\n"
17009 " call call_rwsem_down_read_failed\n"
17010@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
17011 "1:\n\t"
17012 " mov %1,%2\n\t"
17013 " add %3,%2\n\t"
17014+
17015+#ifdef CONFIG_PAX_REFCOUNT
17016+ "jno 0f\n"
17017+ "sub %3,%2\n"
17018+ "int $4\n0:\n"
17019+ _ASM_EXTABLE(0b, 0b)
17020+#endif
17021+
17022 " jle 2f\n\t"
17023 LOCK_PREFIX " cmpxchg %2,%0\n\t"
17024 " jnz 1b\n\t"
17025@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
17026 long tmp;
17027 asm volatile("# beginning down_write\n\t"
17028 LOCK_PREFIX " xadd %1,(%2)\n\t"
17029+
17030+#ifdef CONFIG_PAX_REFCOUNT
17031+ "jno 0f\n"
17032+ "mov %1,(%2)\n"
17033+ "int $4\n0:\n"
17034+ _ASM_EXTABLE(0b, 0b)
17035+#endif
17036+
17037 /* adds 0xffff0001, returns the old value */
17038 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
17039 /* was the active mask 0 before? */
17040@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
17041 long tmp;
17042 asm volatile("# beginning __up_read\n\t"
17043 LOCK_PREFIX " xadd %1,(%2)\n\t"
17044+
17045+#ifdef CONFIG_PAX_REFCOUNT
17046+ "jno 0f\n"
17047+ "mov %1,(%2)\n"
17048+ "int $4\n0:\n"
17049+ _ASM_EXTABLE(0b, 0b)
17050+#endif
17051+
17052 /* subtracts 1, returns the old value */
17053 " jns 1f\n\t"
17054 " call call_rwsem_wake\n" /* expects old value in %edx */
17055@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
17056 long tmp;
17057 asm volatile("# beginning __up_write\n\t"
17058 LOCK_PREFIX " xadd %1,(%2)\n\t"
17059+
17060+#ifdef CONFIG_PAX_REFCOUNT
17061+ "jno 0f\n"
17062+ "mov %1,(%2)\n"
17063+ "int $4\n0:\n"
17064+ _ASM_EXTABLE(0b, 0b)
17065+#endif
17066+
17067 /* subtracts 0xffff0001, returns the old value */
17068 " jns 1f\n\t"
17069 " call call_rwsem_wake\n" /* expects old value in %edx */
17070@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17071 {
17072 asm volatile("# beginning __downgrade_write\n\t"
17073 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
17074+
17075+#ifdef CONFIG_PAX_REFCOUNT
17076+ "jno 0f\n"
17077+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
17078+ "int $4\n0:\n"
17079+ _ASM_EXTABLE(0b, 0b)
17080+#endif
17081+
17082 /*
17083 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
17084 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
17085@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17086 */
17087 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17088 {
17089- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
17090+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
17091+
17092+#ifdef CONFIG_PAX_REFCOUNT
17093+ "jno 0f\n"
17094+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
17095+ "int $4\n0:\n"
17096+ _ASM_EXTABLE(0b, 0b)
17097+#endif
17098+
17099 : "+m" (sem->count)
17100 : "er" (delta));
17101 }
17102@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17103 */
17104 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
17105 {
17106- return delta + xadd(&sem->count, delta);
17107+ return delta + xadd_check_overflow(&sem->count, delta);
17108 }
17109
17110 #endif /* __KERNEL__ */
17111diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
17112index c48a950..bc40804 100644
17113--- a/arch/x86/include/asm/segment.h
17114+++ b/arch/x86/include/asm/segment.h
17115@@ -64,10 +64,15 @@
17116 * 26 - ESPFIX small SS
17117 * 27 - per-cpu [ offset to per-cpu data area ]
17118 * 28 - stack_canary-20 [ for stack protector ]
17119- * 29 - unused
17120- * 30 - unused
17121+ * 29 - PCI BIOS CS
17122+ * 30 - PCI BIOS DS
17123 * 31 - TSS for double fault handler
17124 */
17125+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
17126+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
17127+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
17128+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
17129+
17130 #define GDT_ENTRY_TLS_MIN 6
17131 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
17132
17133@@ -79,6 +84,8 @@
17134
17135 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
17136
17137+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
17138+
17139 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
17140
17141 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
17142@@ -104,6 +111,12 @@
17143 #define __KERNEL_STACK_CANARY 0
17144 #endif
17145
17146+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
17147+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
17148+
17149+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
17150+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
17151+
17152 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
17153
17154 /*
17155@@ -141,7 +154,7 @@
17156 */
17157
17158 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
17159-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
17160+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
17161
17162
17163 #else
17164@@ -165,6 +178,8 @@
17165 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
17166 #define __USER32_DS __USER_DS
17167
17168+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
17169+
17170 #define GDT_ENTRY_TSS 8 /* needs two entries */
17171 #define GDT_ENTRY_LDT 10 /* needs two entries */
17172 #define GDT_ENTRY_TLS_MIN 12
17173@@ -173,6 +188,8 @@
17174 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
17175 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
17176
17177+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
17178+
17179 /* TLS indexes for 64bit - hardcoded in arch_prctl */
17180 #define FS_TLS 0
17181 #define GS_TLS 1
17182@@ -180,12 +197,14 @@
17183 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
17184 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
17185
17186-#define GDT_ENTRIES 16
17187+#define GDT_ENTRIES 17
17188
17189 #endif
17190
17191 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
17192+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
17193 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
17194+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
17195 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
17196 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
17197 #ifndef CONFIG_PARAVIRT
17198@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
17199 {
17200 unsigned long __limit;
17201 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
17202- return __limit + 1;
17203+ return __limit;
17204 }
17205
17206 #endif /* !__ASSEMBLY__ */
17207diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
17208index 8d3120f..352b440 100644
17209--- a/arch/x86/include/asm/smap.h
17210+++ b/arch/x86/include/asm/smap.h
17211@@ -25,11 +25,40 @@
17212
17213 #include <asm/alternative-asm.h>
17214
17215+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17216+#define ASM_PAX_OPEN_USERLAND \
17217+ 661: jmp 663f; \
17218+ .pushsection .altinstr_replacement, "a" ; \
17219+ 662: pushq %rax; nop; \
17220+ .popsection ; \
17221+ .pushsection .altinstructions, "a" ; \
17222+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
17223+ .popsection ; \
17224+ call __pax_open_userland; \
17225+ popq %rax; \
17226+ 663:
17227+
17228+#define ASM_PAX_CLOSE_USERLAND \
17229+ 661: jmp 663f; \
17230+ .pushsection .altinstr_replacement, "a" ; \
17231+ 662: pushq %rax; nop; \
17232+ .popsection; \
17233+ .pushsection .altinstructions, "a" ; \
17234+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
17235+ .popsection; \
17236+ call __pax_close_userland; \
17237+ popq %rax; \
17238+ 663:
17239+#else
17240+#define ASM_PAX_OPEN_USERLAND
17241+#define ASM_PAX_CLOSE_USERLAND
17242+#endif
17243+
17244 #ifdef CONFIG_X86_SMAP
17245
17246 #define ASM_CLAC \
17247 661: ASM_NOP3 ; \
17248- .pushsection .altinstr_replacement, "ax" ; \
17249+ .pushsection .altinstr_replacement, "a" ; \
17250 662: __ASM_CLAC ; \
17251 .popsection ; \
17252 .pushsection .altinstructions, "a" ; \
17253@@ -38,7 +67,7 @@
17254
17255 #define ASM_STAC \
17256 661: ASM_NOP3 ; \
17257- .pushsection .altinstr_replacement, "ax" ; \
17258+ .pushsection .altinstr_replacement, "a" ; \
17259 662: __ASM_STAC ; \
17260 .popsection ; \
17261 .pushsection .altinstructions, "a" ; \
17262@@ -56,6 +85,37 @@
17263
17264 #include <asm/alternative.h>
17265
17266+#define __HAVE_ARCH_PAX_OPEN_USERLAND
17267+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
17268+
17269+extern void __pax_open_userland(void);
17270+static __always_inline unsigned long pax_open_userland(void)
17271+{
17272+
17273+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17274+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
17275+ :
17276+ : [open] "i" (__pax_open_userland)
17277+ : "memory", "rax");
17278+#endif
17279+
17280+ return 0;
17281+}
17282+
17283+extern void __pax_close_userland(void);
17284+static __always_inline unsigned long pax_close_userland(void)
17285+{
17286+
17287+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17288+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
17289+ :
17290+ : [close] "i" (__pax_close_userland)
17291+ : "memory", "rax");
17292+#endif
17293+
17294+ return 0;
17295+}
17296+
17297 #ifdef CONFIG_X86_SMAP
17298
17299 static __always_inline void clac(void)
17300diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
17301index b073aae..39f9bdd 100644
17302--- a/arch/x86/include/asm/smp.h
17303+++ b/arch/x86/include/asm/smp.h
17304@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
17305 /* cpus sharing the last level cache: */
17306 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
17307 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
17308-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
17309+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
17310
17311 static inline struct cpumask *cpu_sibling_mask(int cpu)
17312 {
17313@@ -79,7 +79,7 @@ struct smp_ops {
17314
17315 void (*send_call_func_ipi)(const struct cpumask *mask);
17316 void (*send_call_func_single_ipi)(int cpu);
17317-};
17318+} __no_const;
17319
17320 /* Globals due to paravirt */
17321 extern void set_cpu_sibling_map(int cpu);
17322@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
17323 extern int safe_smp_processor_id(void);
17324
17325 #elif defined(CONFIG_X86_64_SMP)
17326-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
17327-
17328-#define stack_smp_processor_id() \
17329-({ \
17330- struct thread_info *ti; \
17331- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
17332- ti->cpu; \
17333-})
17334+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
17335+#define stack_smp_processor_id() raw_smp_processor_id()
17336 #define safe_smp_processor_id() smp_processor_id()
17337
17338 #endif
17339diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
17340index 33692ea..350a534 100644
17341--- a/arch/x86/include/asm/spinlock.h
17342+++ b/arch/x86/include/asm/spinlock.h
17343@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
17344 static inline void arch_read_lock(arch_rwlock_t *rw)
17345 {
17346 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
17347+
17348+#ifdef CONFIG_PAX_REFCOUNT
17349+ "jno 0f\n"
17350+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
17351+ "int $4\n0:\n"
17352+ _ASM_EXTABLE(0b, 0b)
17353+#endif
17354+
17355 "jns 1f\n"
17356 "call __read_lock_failed\n\t"
17357 "1:\n"
17358@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
17359 static inline void arch_write_lock(arch_rwlock_t *rw)
17360 {
17361 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
17362+
17363+#ifdef CONFIG_PAX_REFCOUNT
17364+ "jno 0f\n"
17365+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
17366+ "int $4\n0:\n"
17367+ _ASM_EXTABLE(0b, 0b)
17368+#endif
17369+
17370 "jz 1f\n"
17371 "call __write_lock_failed\n\t"
17372 "1:\n"
17373@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
17374
17375 static inline void arch_read_unlock(arch_rwlock_t *rw)
17376 {
17377- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
17378+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
17379+
17380+#ifdef CONFIG_PAX_REFCOUNT
17381+ "jno 0f\n"
17382+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
17383+ "int $4\n0:\n"
17384+ _ASM_EXTABLE(0b, 0b)
17385+#endif
17386+
17387 :"+m" (rw->lock) : : "memory");
17388 }
17389
17390 static inline void arch_write_unlock(arch_rwlock_t *rw)
17391 {
17392- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
17393+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
17394+
17395+#ifdef CONFIG_PAX_REFCOUNT
17396+ "jno 0f\n"
17397+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
17398+ "int $4\n0:\n"
17399+ _ASM_EXTABLE(0b, 0b)
17400+#endif
17401+
17402 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
17403 }
17404
17405diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
17406index 6a99859..03cb807 100644
17407--- a/arch/x86/include/asm/stackprotector.h
17408+++ b/arch/x86/include/asm/stackprotector.h
17409@@ -47,7 +47,7 @@
17410 * head_32 for boot CPU and setup_per_cpu_areas() for others.
17411 */
17412 #define GDT_STACK_CANARY_INIT \
17413- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
17414+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
17415
17416 /*
17417 * Initialize the stackprotector canary value.
17418@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
17419
17420 static inline void load_stack_canary_segment(void)
17421 {
17422-#ifdef CONFIG_X86_32
17423+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17424 asm volatile ("mov %0, %%gs" : : "r" (0));
17425 #endif
17426 }
17427diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
17428index 70bbe39..4ae2bd4 100644
17429--- a/arch/x86/include/asm/stacktrace.h
17430+++ b/arch/x86/include/asm/stacktrace.h
17431@@ -11,28 +11,20 @@
17432
17433 extern int kstack_depth_to_print;
17434
17435-struct thread_info;
17436+struct task_struct;
17437 struct stacktrace_ops;
17438
17439-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
17440- unsigned long *stack,
17441- unsigned long bp,
17442- const struct stacktrace_ops *ops,
17443- void *data,
17444- unsigned long *end,
17445- int *graph);
17446+typedef unsigned long walk_stack_t(struct task_struct *task,
17447+ void *stack_start,
17448+ unsigned long *stack,
17449+ unsigned long bp,
17450+ const struct stacktrace_ops *ops,
17451+ void *data,
17452+ unsigned long *end,
17453+ int *graph);
17454
17455-extern unsigned long
17456-print_context_stack(struct thread_info *tinfo,
17457- unsigned long *stack, unsigned long bp,
17458- const struct stacktrace_ops *ops, void *data,
17459- unsigned long *end, int *graph);
17460-
17461-extern unsigned long
17462-print_context_stack_bp(struct thread_info *tinfo,
17463- unsigned long *stack, unsigned long bp,
17464- const struct stacktrace_ops *ops, void *data,
17465- unsigned long *end, int *graph);
17466+extern walk_stack_t print_context_stack;
17467+extern walk_stack_t print_context_stack_bp;
17468
17469 /* Generic stack tracer with callbacks */
17470
17471@@ -40,7 +32,7 @@ struct stacktrace_ops {
17472 void (*address)(void *data, unsigned long address, int reliable);
17473 /* On negative return stop dumping */
17474 int (*stack)(void *data, char *name);
17475- walk_stack_t walk_stack;
17476+ walk_stack_t *walk_stack;
17477 };
17478
17479 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
17480diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
17481index 4ec45b3..a4f0a8a 100644
17482--- a/arch/x86/include/asm/switch_to.h
17483+++ b/arch/x86/include/asm/switch_to.h
17484@@ -108,7 +108,7 @@ do { \
17485 "call __switch_to\n\t" \
17486 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
17487 __switch_canary \
17488- "movq %P[thread_info](%%rsi),%%r8\n\t" \
17489+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
17490 "movq %%rax,%%rdi\n\t" \
17491 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
17492 "jnz ret_from_fork\n\t" \
17493@@ -119,7 +119,7 @@ do { \
17494 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
17495 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
17496 [_tif_fork] "i" (_TIF_FORK), \
17497- [thread_info] "i" (offsetof(struct task_struct, stack)), \
17498+ [thread_info] "m" (current_tinfo), \
17499 [current_task] "m" (current_task) \
17500 __switch_canary_iparam \
17501 : "memory", "cc" __EXTRA_CLOBBER)
17502diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
17503index a1df6e8..e002940 100644
17504--- a/arch/x86/include/asm/thread_info.h
17505+++ b/arch/x86/include/asm/thread_info.h
17506@@ -10,6 +10,7 @@
17507 #include <linux/compiler.h>
17508 #include <asm/page.h>
17509 #include <asm/types.h>
17510+#include <asm/percpu.h>
17511
17512 /*
17513 * low level task data that entry.S needs immediate access to
17514@@ -23,7 +24,6 @@ struct exec_domain;
17515 #include <linux/atomic.h>
17516
17517 struct thread_info {
17518- struct task_struct *task; /* main task structure */
17519 struct exec_domain *exec_domain; /* execution domain */
17520 __u32 flags; /* low level flags */
17521 __u32 status; /* thread synchronous flags */
17522@@ -33,19 +33,13 @@ struct thread_info {
17523 mm_segment_t addr_limit;
17524 struct restart_block restart_block;
17525 void __user *sysenter_return;
17526-#ifdef CONFIG_X86_32
17527- unsigned long previous_esp; /* ESP of the previous stack in
17528- case of nested (IRQ) stacks
17529- */
17530- __u8 supervisor_stack[0];
17531-#endif
17532+ unsigned long lowest_stack;
17533 unsigned int sig_on_uaccess_error:1;
17534 unsigned int uaccess_err:1; /* uaccess failed */
17535 };
17536
17537-#define INIT_THREAD_INFO(tsk) \
17538+#define INIT_THREAD_INFO \
17539 { \
17540- .task = &tsk, \
17541 .exec_domain = &default_exec_domain, \
17542 .flags = 0, \
17543 .cpu = 0, \
17544@@ -56,7 +50,7 @@ struct thread_info {
17545 }, \
17546 }
17547
17548-#define init_thread_info (init_thread_union.thread_info)
17549+#define init_thread_info (init_thread_union.stack)
17550 #define init_stack (init_thread_union.stack)
17551
17552 #else /* !__ASSEMBLY__ */
17553@@ -97,6 +91,7 @@ struct thread_info {
17554 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
17555 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
17556 #define TIF_X32 30 /* 32-bit native x86-64 binary */
17557+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
17558
17559 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
17560 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
17561@@ -121,17 +116,18 @@ struct thread_info {
17562 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
17563 #define _TIF_ADDR32 (1 << TIF_ADDR32)
17564 #define _TIF_X32 (1 << TIF_X32)
17565+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
17566
17567 /* work to do in syscall_trace_enter() */
17568 #define _TIF_WORK_SYSCALL_ENTRY \
17569 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
17570 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
17571- _TIF_NOHZ)
17572+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
17573
17574 /* work to do in syscall_trace_leave() */
17575 #define _TIF_WORK_SYSCALL_EXIT \
17576 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
17577- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
17578+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
17579
17580 /* work to do on interrupt/exception return */
17581 #define _TIF_WORK_MASK \
17582@@ -142,7 +138,7 @@ struct thread_info {
17583 /* work to do on any return to user space */
17584 #define _TIF_ALLWORK_MASK \
17585 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
17586- _TIF_NOHZ)
17587+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
17588
17589 /* Only used for 64 bit */
17590 #define _TIF_DO_NOTIFY_MASK \
17591@@ -158,45 +154,40 @@ struct thread_info {
17592
17593 #define PREEMPT_ACTIVE 0x10000000
17594
17595-#ifdef CONFIG_X86_32
17596-
17597-#define STACK_WARN (THREAD_SIZE/8)
17598-/*
17599- * macros/functions for gaining access to the thread information structure
17600- *
17601- * preempt_count needs to be 1 initially, until the scheduler is functional.
17602- */
17603-#ifndef __ASSEMBLY__
17604-
17605-
17606-/* how to get the current stack pointer from C */
17607-register unsigned long current_stack_pointer asm("esp") __used;
17608-
17609-/* how to get the thread information struct from C */
17610-static inline struct thread_info *current_thread_info(void)
17611-{
17612- return (struct thread_info *)
17613- (current_stack_pointer & ~(THREAD_SIZE - 1));
17614-}
17615-
17616-#else /* !__ASSEMBLY__ */
17617-
17618+#ifdef __ASSEMBLY__
17619 /* how to get the thread information struct from ASM */
17620 #define GET_THREAD_INFO(reg) \
17621- movl $-THREAD_SIZE, reg; \
17622- andl %esp, reg
17623+ mov PER_CPU_VAR(current_tinfo), reg
17624
17625 /* use this one if reg already contains %esp */
17626-#define GET_THREAD_INFO_WITH_ESP(reg) \
17627- andl $-THREAD_SIZE, reg
17628+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
17629+#else
17630+/* how to get the thread information struct from C */
17631+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
17632+
17633+static __always_inline struct thread_info *current_thread_info(void)
17634+{
17635+ return this_cpu_read_stable(current_tinfo);
17636+}
17637+#endif
17638+
17639+#ifdef CONFIG_X86_32
17640+
17641+#define STACK_WARN (THREAD_SIZE/8)
17642+/*
17643+ * macros/functions for gaining access to the thread information structure
17644+ *
17645+ * preempt_count needs to be 1 initially, until the scheduler is functional.
17646+ */
17647+#ifndef __ASSEMBLY__
17648+
17649+/* how to get the current stack pointer from C */
17650+register unsigned long current_stack_pointer asm("esp") __used;
17651
17652 #endif
17653
17654 #else /* X86_32 */
17655
17656-#include <asm/percpu.h>
17657-#define KERNEL_STACK_OFFSET (5*8)
17658-
17659 /*
17660 * macros/functions for gaining access to the thread information structure
17661 * preempt_count needs to be 1 initially, until the scheduler is functional.
17662@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
17663 #ifndef __ASSEMBLY__
17664 DECLARE_PER_CPU(unsigned long, kernel_stack);
17665
17666-static inline struct thread_info *current_thread_info(void)
17667-{
17668- struct thread_info *ti;
17669- ti = (void *)(this_cpu_read_stable(kernel_stack) +
17670- KERNEL_STACK_OFFSET - THREAD_SIZE);
17671- return ti;
17672-}
17673-
17674-#else /* !__ASSEMBLY__ */
17675-
17676-/* how to get the thread information struct from ASM */
17677-#define GET_THREAD_INFO(reg) \
17678- movq PER_CPU_VAR(kernel_stack),reg ; \
17679- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
17680-
17681-/*
17682- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
17683- * a certain register (to be used in assembler memory operands).
17684- */
17685-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
17686-
17687+/* how to get the current stack pointer from C */
17688+register unsigned long current_stack_pointer asm("rsp") __used;
17689 #endif
17690
17691 #endif /* !X86_32 */
17692@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
17693 extern void arch_task_cache_init(void);
17694 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
17695 extern void arch_release_task_struct(struct task_struct *tsk);
17696+
17697+#define __HAVE_THREAD_FUNCTIONS
17698+#define task_thread_info(task) (&(task)->tinfo)
17699+#define task_stack_page(task) ((task)->stack)
17700+#define setup_thread_stack(p, org) do {} while (0)
17701+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
17702+
17703 #endif
17704 #endif /* _ASM_X86_THREAD_INFO_H */
17705diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
17706index 50a7fc0..45844c0 100644
17707--- a/arch/x86/include/asm/tlbflush.h
17708+++ b/arch/x86/include/asm/tlbflush.h
17709@@ -17,18 +17,44 @@
17710
17711 static inline void __native_flush_tlb(void)
17712 {
17713+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17714+ unsigned long descriptor[2];
17715+
17716+ descriptor[0] = PCID_KERNEL;
17717+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
17718+ return;
17719+ }
17720+
17721+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17722+ if (static_cpu_has(X86_FEATURE_PCID)) {
17723+ unsigned int cpu = raw_get_cpu();
17724+
17725+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17726+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17727+ raw_put_cpu_no_resched();
17728+ return;
17729+ }
17730+#endif
17731+
17732 native_write_cr3(native_read_cr3());
17733 }
17734
17735 static inline void __native_flush_tlb_global_irq_disabled(void)
17736 {
17737- unsigned long cr4;
17738+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17739+ unsigned long descriptor[2];
17740
17741- cr4 = native_read_cr4();
17742- /* clear PGE */
17743- native_write_cr4(cr4 & ~X86_CR4_PGE);
17744- /* write old PGE again and flush TLBs */
17745- native_write_cr4(cr4);
17746+ descriptor[0] = PCID_KERNEL;
17747+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
17748+ } else {
17749+ unsigned long cr4;
17750+
17751+ cr4 = native_read_cr4();
17752+ /* clear PGE */
17753+ native_write_cr4(cr4 & ~X86_CR4_PGE);
17754+ /* write old PGE again and flush TLBs */
17755+ native_write_cr4(cr4);
17756+ }
17757 }
17758
17759 static inline void __native_flush_tlb_global(void)
17760@@ -49,6 +75,42 @@ static inline void __native_flush_tlb_global(void)
17761
17762 static inline void __native_flush_tlb_single(unsigned long addr)
17763 {
17764+
17765+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17766+ unsigned long descriptor[2];
17767+
17768+ descriptor[0] = PCID_KERNEL;
17769+ descriptor[1] = addr;
17770+
17771+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17772+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
17773+ if (addr < TASK_SIZE_MAX)
17774+ descriptor[1] += pax_user_shadow_base;
17775+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17776+ }
17777+
17778+ descriptor[0] = PCID_USER;
17779+ descriptor[1] = addr;
17780+#endif
17781+
17782+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17783+ return;
17784+ }
17785+
17786+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17787+ if (static_cpu_has(X86_FEATURE_PCID)) {
17788+ unsigned int cpu = raw_get_cpu();
17789+
17790+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
17791+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17792+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17793+ raw_put_cpu_no_resched();
17794+
17795+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
17796+ addr += pax_user_shadow_base;
17797+ }
17798+#endif
17799+
17800 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17801 }
17802
17803diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
17804index 5ee2687..74590b9 100644
17805--- a/arch/x86/include/asm/uaccess.h
17806+++ b/arch/x86/include/asm/uaccess.h
17807@@ -7,6 +7,7 @@
17808 #include <linux/compiler.h>
17809 #include <linux/thread_info.h>
17810 #include <linux/string.h>
17811+#include <linux/sched.h>
17812 #include <asm/asm.h>
17813 #include <asm/page.h>
17814 #include <asm/smap.h>
17815@@ -29,7 +30,12 @@
17816
17817 #define get_ds() (KERNEL_DS)
17818 #define get_fs() (current_thread_info()->addr_limit)
17819+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17820+void __set_fs(mm_segment_t x);
17821+void set_fs(mm_segment_t x);
17822+#else
17823 #define set_fs(x) (current_thread_info()->addr_limit = (x))
17824+#endif
17825
17826 #define segment_eq(a, b) ((a).seg == (b).seg)
17827
17828@@ -77,8 +83,33 @@
17829 * checks that the pointer is in the user space range - after calling
17830 * this function, memory access functions may still return -EFAULT.
17831 */
17832-#define access_ok(type, addr, size) \
17833- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17834+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17835+#define access_ok(type, addr, size) \
17836+({ \
17837+ long __size = size; \
17838+ unsigned long __addr = (unsigned long)addr; \
17839+ unsigned long __addr_ao = __addr & PAGE_MASK; \
17840+ unsigned long __end_ao = __addr + __size - 1; \
17841+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
17842+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
17843+ while(__addr_ao <= __end_ao) { \
17844+ char __c_ao; \
17845+ __addr_ao += PAGE_SIZE; \
17846+ if (__size > PAGE_SIZE) \
17847+ cond_resched(); \
17848+ if (__get_user(__c_ao, (char __user *)__addr)) \
17849+ break; \
17850+ if (type != VERIFY_WRITE) { \
17851+ __addr = __addr_ao; \
17852+ continue; \
17853+ } \
17854+ if (__put_user(__c_ao, (char __user *)__addr)) \
17855+ break; \
17856+ __addr = __addr_ao; \
17857+ } \
17858+ } \
17859+ __ret_ao; \
17860+})
17861
17862 /*
17863 * The exception table consists of pairs of addresses relative to the
17864@@ -165,10 +196,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17865 register __inttype(*(ptr)) __val_gu asm("%edx"); \
17866 __chk_user_ptr(ptr); \
17867 might_fault(); \
17868+ pax_open_userland(); \
17869 asm volatile("call __get_user_%P3" \
17870 : "=a" (__ret_gu), "=r" (__val_gu) \
17871 : "0" (ptr), "i" (sizeof(*(ptr)))); \
17872 (x) = (__typeof__(*(ptr))) __val_gu; \
17873+ pax_close_userland(); \
17874 __ret_gu; \
17875 })
17876
17877@@ -176,13 +209,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17878 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
17879 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
17880
17881-
17882+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17883+#define __copyuser_seg "gs;"
17884+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
17885+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
17886+#else
17887+#define __copyuser_seg
17888+#define __COPYUSER_SET_ES
17889+#define __COPYUSER_RESTORE_ES
17890+#endif
17891
17892 #ifdef CONFIG_X86_32
17893 #define __put_user_asm_u64(x, addr, err, errret) \
17894 asm volatile(ASM_STAC "\n" \
17895- "1: movl %%eax,0(%2)\n" \
17896- "2: movl %%edx,4(%2)\n" \
17897+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
17898+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
17899 "3: " ASM_CLAC "\n" \
17900 ".section .fixup,\"ax\"\n" \
17901 "4: movl %3,%0\n" \
17902@@ -195,8 +236,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17903
17904 #define __put_user_asm_ex_u64(x, addr) \
17905 asm volatile(ASM_STAC "\n" \
17906- "1: movl %%eax,0(%1)\n" \
17907- "2: movl %%edx,4(%1)\n" \
17908+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
17909+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
17910 "3: " ASM_CLAC "\n" \
17911 _ASM_EXTABLE_EX(1b, 2b) \
17912 _ASM_EXTABLE_EX(2b, 3b) \
17913@@ -246,7 +287,8 @@ extern void __put_user_8(void);
17914 __typeof__(*(ptr)) __pu_val; \
17915 __chk_user_ptr(ptr); \
17916 might_fault(); \
17917- __pu_val = x; \
17918+ __pu_val = (x); \
17919+ pax_open_userland(); \
17920 switch (sizeof(*(ptr))) { \
17921 case 1: \
17922 __put_user_x(1, __pu_val, ptr, __ret_pu); \
17923@@ -264,6 +306,7 @@ extern void __put_user_8(void);
17924 __put_user_x(X, __pu_val, ptr, __ret_pu); \
17925 break; \
17926 } \
17927+ pax_close_userland(); \
17928 __ret_pu; \
17929 })
17930
17931@@ -344,8 +387,10 @@ do { \
17932 } while (0)
17933
17934 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17935+do { \
17936+ pax_open_userland(); \
17937 asm volatile(ASM_STAC "\n" \
17938- "1: mov"itype" %2,%"rtype"1\n" \
17939+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
17940 "2: " ASM_CLAC "\n" \
17941 ".section .fixup,\"ax\"\n" \
17942 "3: mov %3,%0\n" \
17943@@ -353,8 +398,10 @@ do { \
17944 " jmp 2b\n" \
17945 ".previous\n" \
17946 _ASM_EXTABLE(1b, 3b) \
17947- : "=r" (err), ltype(x) \
17948- : "m" (__m(addr)), "i" (errret), "0" (err))
17949+ : "=r" (err), ltype (x) \
17950+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
17951+ pax_close_userland(); \
17952+} while (0)
17953
17954 #define __get_user_size_ex(x, ptr, size) \
17955 do { \
17956@@ -378,7 +425,7 @@ do { \
17957 } while (0)
17958
17959 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
17960- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
17961+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
17962 "2:\n" \
17963 _ASM_EXTABLE_EX(1b, 2b) \
17964 : ltype(x) : "m" (__m(addr)))
17965@@ -395,13 +442,24 @@ do { \
17966 int __gu_err; \
17967 unsigned long __gu_val; \
17968 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
17969- (x) = (__force __typeof__(*(ptr)))__gu_val; \
17970+ (x) = (__typeof__(*(ptr)))__gu_val; \
17971 __gu_err; \
17972 })
17973
17974 /* FIXME: this hack is definitely wrong -AK */
17975 struct __large_struct { unsigned long buf[100]; };
17976-#define __m(x) (*(struct __large_struct __user *)(x))
17977+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17978+#define ____m(x) \
17979+({ \
17980+ unsigned long ____x = (unsigned long)(x); \
17981+ if (____x < pax_user_shadow_base) \
17982+ ____x += pax_user_shadow_base; \
17983+ (typeof(x))____x; \
17984+})
17985+#else
17986+#define ____m(x) (x)
17987+#endif
17988+#define __m(x) (*(struct __large_struct __user *)____m(x))
17989
17990 /*
17991 * Tell gcc we read from memory instead of writing: this is because
17992@@ -409,8 +467,10 @@ struct __large_struct { unsigned long buf[100]; };
17993 * aliasing issues.
17994 */
17995 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17996+do { \
17997+ pax_open_userland(); \
17998 asm volatile(ASM_STAC "\n" \
17999- "1: mov"itype" %"rtype"1,%2\n" \
18000+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
18001 "2: " ASM_CLAC "\n" \
18002 ".section .fixup,\"ax\"\n" \
18003 "3: mov %3,%0\n" \
18004@@ -418,10 +478,12 @@ struct __large_struct { unsigned long buf[100]; };
18005 ".previous\n" \
18006 _ASM_EXTABLE(1b, 3b) \
18007 : "=r"(err) \
18008- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
18009+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
18010+ pax_close_userland(); \
18011+} while (0)
18012
18013 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
18014- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
18015+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
18016 "2:\n" \
18017 _ASM_EXTABLE_EX(1b, 2b) \
18018 : : ltype(x), "m" (__m(addr)))
18019@@ -431,11 +493,13 @@ struct __large_struct { unsigned long buf[100]; };
18020 */
18021 #define uaccess_try do { \
18022 current_thread_info()->uaccess_err = 0; \
18023+ pax_open_userland(); \
18024 stac(); \
18025 barrier();
18026
18027 #define uaccess_catch(err) \
18028 clac(); \
18029+ pax_close_userland(); \
18030 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
18031 } while (0)
18032
18033@@ -460,8 +524,12 @@ struct __large_struct { unsigned long buf[100]; };
18034 * On error, the variable @x is set to zero.
18035 */
18036
18037+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18038+#define __get_user(x, ptr) get_user((x), (ptr))
18039+#else
18040 #define __get_user(x, ptr) \
18041 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
18042+#endif
18043
18044 /**
18045 * __put_user: - Write a simple value into user space, with less checking.
18046@@ -483,8 +551,12 @@ struct __large_struct { unsigned long buf[100]; };
18047 * Returns zero on success, or -EFAULT on error.
18048 */
18049
18050+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18051+#define __put_user(x, ptr) put_user((x), (ptr))
18052+#else
18053 #define __put_user(x, ptr) \
18054 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
18055+#endif
18056
18057 #define __get_user_unaligned __get_user
18058 #define __put_user_unaligned __put_user
18059@@ -502,7 +574,7 @@ struct __large_struct { unsigned long buf[100]; };
18060 #define get_user_ex(x, ptr) do { \
18061 unsigned long __gue_val; \
18062 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
18063- (x) = (__force __typeof__(*(ptr)))__gue_val; \
18064+ (x) = (__typeof__(*(ptr)))__gue_val; \
18065 } while (0)
18066
18067 #define put_user_try uaccess_try
18068@@ -519,8 +591,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
18069 extern __must_check long strlen_user(const char __user *str);
18070 extern __must_check long strnlen_user(const char __user *str, long n);
18071
18072-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
18073-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
18074+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
18075+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
18076
18077 /*
18078 * movsl can be slow when source and dest are not both 8-byte aligned
18079diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
18080index 7f760a9..04b1c65 100644
18081--- a/arch/x86/include/asm/uaccess_32.h
18082+++ b/arch/x86/include/asm/uaccess_32.h
18083@@ -11,15 +11,15 @@
18084 #include <asm/page.h>
18085
18086 unsigned long __must_check __copy_to_user_ll
18087- (void __user *to, const void *from, unsigned long n);
18088+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
18089 unsigned long __must_check __copy_from_user_ll
18090- (void *to, const void __user *from, unsigned long n);
18091+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18092 unsigned long __must_check __copy_from_user_ll_nozero
18093- (void *to, const void __user *from, unsigned long n);
18094+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18095 unsigned long __must_check __copy_from_user_ll_nocache
18096- (void *to, const void __user *from, unsigned long n);
18097+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18098 unsigned long __must_check __copy_from_user_ll_nocache_nozero
18099- (void *to, const void __user *from, unsigned long n);
18100+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18101
18102 /**
18103 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
18104@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
18105 static __always_inline unsigned long __must_check
18106 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
18107 {
18108+ if ((long)n < 0)
18109+ return n;
18110+
18111+ check_object_size(from, n, true);
18112+
18113 if (__builtin_constant_p(n)) {
18114 unsigned long ret;
18115
18116@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
18117 __copy_to_user(void __user *to, const void *from, unsigned long n)
18118 {
18119 might_fault();
18120+
18121 return __copy_to_user_inatomic(to, from, n);
18122 }
18123
18124 static __always_inline unsigned long
18125 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
18126 {
18127+ if ((long)n < 0)
18128+ return n;
18129+
18130 /* Avoid zeroing the tail if the copy fails..
18131 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
18132 * but as the zeroing behaviour is only significant when n is not
18133@@ -137,6 +146,12 @@ static __always_inline unsigned long
18134 __copy_from_user(void *to, const void __user *from, unsigned long n)
18135 {
18136 might_fault();
18137+
18138+ if ((long)n < 0)
18139+ return n;
18140+
18141+ check_object_size(to, n, false);
18142+
18143 if (__builtin_constant_p(n)) {
18144 unsigned long ret;
18145
18146@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
18147 const void __user *from, unsigned long n)
18148 {
18149 might_fault();
18150+
18151+ if ((long)n < 0)
18152+ return n;
18153+
18154 if (__builtin_constant_p(n)) {
18155 unsigned long ret;
18156
18157@@ -181,15 +200,19 @@ static __always_inline unsigned long
18158 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
18159 unsigned long n)
18160 {
18161- return __copy_from_user_ll_nocache_nozero(to, from, n);
18162+ if ((long)n < 0)
18163+ return n;
18164+
18165+ return __copy_from_user_ll_nocache_nozero(to, from, n);
18166 }
18167
18168-unsigned long __must_check copy_to_user(void __user *to,
18169- const void *from, unsigned long n);
18170-unsigned long __must_check _copy_from_user(void *to,
18171- const void __user *from,
18172- unsigned long n);
18173-
18174+extern void copy_to_user_overflow(void)
18175+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18176+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18177+#else
18178+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18179+#endif
18180+;
18181
18182 extern void copy_from_user_overflow(void)
18183 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18184@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
18185 #endif
18186 ;
18187
18188-static inline unsigned long __must_check copy_from_user(void *to,
18189- const void __user *from,
18190- unsigned long n)
18191+/**
18192+ * copy_to_user: - Copy a block of data into user space.
18193+ * @to: Destination address, in user space.
18194+ * @from: Source address, in kernel space.
18195+ * @n: Number of bytes to copy.
18196+ *
18197+ * Context: User context only. This function may sleep.
18198+ *
18199+ * Copy data from kernel space to user space.
18200+ *
18201+ * Returns number of bytes that could not be copied.
18202+ * On success, this will be zero.
18203+ */
18204+static inline unsigned long __must_check
18205+copy_to_user(void __user *to, const void *from, unsigned long n)
18206 {
18207- int sz = __compiletime_object_size(to);
18208+ size_t sz = __compiletime_object_size(from);
18209
18210- if (likely(sz == -1 || sz >= n))
18211- n = _copy_from_user(to, from, n);
18212- else
18213+ if (unlikely(sz != (size_t)-1 && sz < n))
18214+ copy_to_user_overflow();
18215+ else if (access_ok(VERIFY_WRITE, to, n))
18216+ n = __copy_to_user(to, from, n);
18217+ return n;
18218+}
18219+
18220+/**
18221+ * copy_from_user: - Copy a block of data from user space.
18222+ * @to: Destination address, in kernel space.
18223+ * @from: Source address, in user space.
18224+ * @n: Number of bytes to copy.
18225+ *
18226+ * Context: User context only. This function may sleep.
18227+ *
18228+ * Copy data from user space to kernel space.
18229+ *
18230+ * Returns number of bytes that could not be copied.
18231+ * On success, this will be zero.
18232+ *
18233+ * If some data could not be copied, this function will pad the copied
18234+ * data to the requested size using zero bytes.
18235+ */
18236+static inline unsigned long __must_check
18237+copy_from_user(void *to, const void __user *from, unsigned long n)
18238+{
18239+ size_t sz = __compiletime_object_size(to);
18240+
18241+ check_object_size(to, n, false);
18242+
18243+ if (unlikely(sz != (size_t)-1 && sz < n))
18244 copy_from_user_overflow();
18245-
18246+ else if (access_ok(VERIFY_READ, from, n))
18247+ n = __copy_from_user(to, from, n);
18248+ else if ((long)n > 0)
18249+ memset(to, 0, n);
18250 return n;
18251 }
18252
18253diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
18254index 142810c..1dbe82f 100644
18255--- a/arch/x86/include/asm/uaccess_64.h
18256+++ b/arch/x86/include/asm/uaccess_64.h
18257@@ -10,6 +10,9 @@
18258 #include <asm/alternative.h>
18259 #include <asm/cpufeature.h>
18260 #include <asm/page.h>
18261+#include <asm/pgtable.h>
18262+
18263+#define set_fs(x) (current_thread_info()->addr_limit = (x))
18264
18265 /*
18266 * Copy To/From Userspace
18267@@ -17,13 +20,13 @@
18268
18269 /* Handles exceptions in both to and from, but doesn't do access_ok */
18270 __must_check unsigned long
18271-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
18272+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
18273 __must_check unsigned long
18274-copy_user_generic_string(void *to, const void *from, unsigned len);
18275+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
18276 __must_check unsigned long
18277-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
18278+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
18279
18280-static __always_inline __must_check unsigned long
18281+static __always_inline __must_check __size_overflow(3) unsigned long
18282 copy_user_generic(void *to, const void *from, unsigned len)
18283 {
18284 unsigned ret;
18285@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
18286 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
18287 "=d" (len)),
18288 "1" (to), "2" (from), "3" (len)
18289- : "memory", "rcx", "r8", "r9", "r10", "r11");
18290+ : "memory", "rcx", "r8", "r9", "r11");
18291 return ret;
18292 }
18293
18294+static __always_inline __must_check unsigned long
18295+__copy_to_user(void __user *to, const void *from, unsigned long len);
18296+static __always_inline __must_check unsigned long
18297+__copy_from_user(void *to, const void __user *from, unsigned long len);
18298 __must_check unsigned long
18299-_copy_to_user(void __user *to, const void *from, unsigned len);
18300-__must_check unsigned long
18301-_copy_from_user(void *to, const void __user *from, unsigned len);
18302-__must_check unsigned long
18303-copy_in_user(void __user *to, const void __user *from, unsigned len);
18304+copy_in_user(void __user *to, const void __user *from, unsigned long len);
18305+
18306+extern void copy_to_user_overflow(void)
18307+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18308+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18309+#else
18310+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18311+#endif
18312+;
18313+
18314+extern void copy_from_user_overflow(void)
18315+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18316+ __compiletime_error("copy_from_user() buffer size is not provably correct")
18317+#else
18318+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
18319+#endif
18320+;
18321
18322 static inline unsigned long __must_check copy_from_user(void *to,
18323 const void __user *from,
18324 unsigned long n)
18325 {
18326- int sz = __compiletime_object_size(to);
18327-
18328 might_fault();
18329- if (likely(sz == -1 || sz >= n))
18330- n = _copy_from_user(to, from, n);
18331-#ifdef CONFIG_DEBUG_VM
18332- else
18333- WARN(1, "Buffer overflow detected!\n");
18334-#endif
18335+
18336+ check_object_size(to, n, false);
18337+
18338+ if (access_ok(VERIFY_READ, from, n))
18339+ n = __copy_from_user(to, from, n);
18340+ else if (n < INT_MAX)
18341+ memset(to, 0, n);
18342 return n;
18343 }
18344
18345 static __always_inline __must_check
18346-int copy_to_user(void __user *dst, const void *src, unsigned size)
18347+int copy_to_user(void __user *dst, const void *src, unsigned long size)
18348 {
18349 might_fault();
18350
18351- return _copy_to_user(dst, src, size);
18352+ if (access_ok(VERIFY_WRITE, dst, size))
18353+ size = __copy_to_user(dst, src, size);
18354+ return size;
18355 }
18356
18357 static __always_inline __must_check
18358-int __copy_from_user(void *dst, const void __user *src, unsigned size)
18359+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
18360 {
18361- int ret = 0;
18362+ size_t sz = __compiletime_object_size(dst);
18363+ unsigned ret = 0;
18364
18365 might_fault();
18366+
18367+ if (size > INT_MAX)
18368+ return size;
18369+
18370+ check_object_size(dst, size, false);
18371+
18372+#ifdef CONFIG_PAX_MEMORY_UDEREF
18373+ if (!__access_ok(VERIFY_READ, src, size))
18374+ return size;
18375+#endif
18376+
18377+ if (unlikely(sz != (size_t)-1 && sz < size)) {
18378+ copy_from_user_overflow();
18379+ return size;
18380+ }
18381+
18382 if (!__builtin_constant_p(size))
18383- return copy_user_generic(dst, (__force void *)src, size);
18384+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18385 switch (size) {
18386- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
18387+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
18388 ret, "b", "b", "=q", 1);
18389 return ret;
18390- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
18391+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
18392 ret, "w", "w", "=r", 2);
18393 return ret;
18394- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
18395+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
18396 ret, "l", "k", "=r", 4);
18397 return ret;
18398- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
18399+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18400 ret, "q", "", "=r", 8);
18401 return ret;
18402 case 10:
18403- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
18404+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18405 ret, "q", "", "=r", 10);
18406 if (unlikely(ret))
18407 return ret;
18408 __get_user_asm(*(u16 *)(8 + (char *)dst),
18409- (u16 __user *)(8 + (char __user *)src),
18410+ (const u16 __user *)(8 + (const char __user *)src),
18411 ret, "w", "w", "=r", 2);
18412 return ret;
18413 case 16:
18414- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
18415+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18416 ret, "q", "", "=r", 16);
18417 if (unlikely(ret))
18418 return ret;
18419 __get_user_asm(*(u64 *)(8 + (char *)dst),
18420- (u64 __user *)(8 + (char __user *)src),
18421+ (const u64 __user *)(8 + (const char __user *)src),
18422 ret, "q", "", "=r", 8);
18423 return ret;
18424 default:
18425- return copy_user_generic(dst, (__force void *)src, size);
18426+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18427 }
18428 }
18429
18430 static __always_inline __must_check
18431-int __copy_to_user(void __user *dst, const void *src, unsigned size)
18432+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
18433 {
18434- int ret = 0;
18435+ size_t sz = __compiletime_object_size(src);
18436+ unsigned ret = 0;
18437
18438 might_fault();
18439+
18440+ if (size > INT_MAX)
18441+ return size;
18442+
18443+ check_object_size(src, size, true);
18444+
18445+#ifdef CONFIG_PAX_MEMORY_UDEREF
18446+ if (!__access_ok(VERIFY_WRITE, dst, size))
18447+ return size;
18448+#endif
18449+
18450+ if (unlikely(sz != (size_t)-1 && sz < size)) {
18451+ copy_to_user_overflow();
18452+ return size;
18453+ }
18454+
18455 if (!__builtin_constant_p(size))
18456- return copy_user_generic((__force void *)dst, src, size);
18457+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18458 switch (size) {
18459- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
18460+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
18461 ret, "b", "b", "iq", 1);
18462 return ret;
18463- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
18464+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
18465 ret, "w", "w", "ir", 2);
18466 return ret;
18467- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
18468+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
18469 ret, "l", "k", "ir", 4);
18470 return ret;
18471- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
18472+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18473 ret, "q", "", "er", 8);
18474 return ret;
18475 case 10:
18476- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
18477+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18478 ret, "q", "", "er", 10);
18479 if (unlikely(ret))
18480 return ret;
18481 asm("":::"memory");
18482- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
18483+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
18484 ret, "w", "w", "ir", 2);
18485 return ret;
18486 case 16:
18487- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
18488+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18489 ret, "q", "", "er", 16);
18490 if (unlikely(ret))
18491 return ret;
18492 asm("":::"memory");
18493- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
18494+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
18495 ret, "q", "", "er", 8);
18496 return ret;
18497 default:
18498- return copy_user_generic((__force void *)dst, src, size);
18499+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18500 }
18501 }
18502
18503 static __always_inline __must_check
18504-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18505+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
18506 {
18507- int ret = 0;
18508+ unsigned ret = 0;
18509
18510 might_fault();
18511+
18512+ if (size > INT_MAX)
18513+ return size;
18514+
18515+#ifdef CONFIG_PAX_MEMORY_UDEREF
18516+ if (!__access_ok(VERIFY_READ, src, size))
18517+ return size;
18518+ if (!__access_ok(VERIFY_WRITE, dst, size))
18519+ return size;
18520+#endif
18521+
18522 if (!__builtin_constant_p(size))
18523- return copy_user_generic((__force void *)dst,
18524- (__force void *)src, size);
18525+ return copy_user_generic((__force_kernel void *)____m(dst),
18526+ (__force_kernel const void *)____m(src), size);
18527 switch (size) {
18528 case 1: {
18529 u8 tmp;
18530- __get_user_asm(tmp, (u8 __user *)src,
18531+ __get_user_asm(tmp, (const u8 __user *)src,
18532 ret, "b", "b", "=q", 1);
18533 if (likely(!ret))
18534 __put_user_asm(tmp, (u8 __user *)dst,
18535@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18536 }
18537 case 2: {
18538 u16 tmp;
18539- __get_user_asm(tmp, (u16 __user *)src,
18540+ __get_user_asm(tmp, (const u16 __user *)src,
18541 ret, "w", "w", "=r", 2);
18542 if (likely(!ret))
18543 __put_user_asm(tmp, (u16 __user *)dst,
18544@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18545
18546 case 4: {
18547 u32 tmp;
18548- __get_user_asm(tmp, (u32 __user *)src,
18549+ __get_user_asm(tmp, (const u32 __user *)src,
18550 ret, "l", "k", "=r", 4);
18551 if (likely(!ret))
18552 __put_user_asm(tmp, (u32 __user *)dst,
18553@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18554 }
18555 case 8: {
18556 u64 tmp;
18557- __get_user_asm(tmp, (u64 __user *)src,
18558+ __get_user_asm(tmp, (const u64 __user *)src,
18559 ret, "q", "", "=r", 8);
18560 if (likely(!ret))
18561 __put_user_asm(tmp, (u64 __user *)dst,
18562@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18563 return ret;
18564 }
18565 default:
18566- return copy_user_generic((__force void *)dst,
18567- (__force void *)src, size);
18568+ return copy_user_generic((__force_kernel void *)____m(dst),
18569+ (__force_kernel const void *)____m(src), size);
18570 }
18571 }
18572
18573-static __must_check __always_inline int
18574-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
18575+static __must_check __always_inline unsigned long
18576+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
18577 {
18578- return copy_user_generic(dst, (__force const void *)src, size);
18579+ if (size > INT_MAX)
18580+ return size;
18581+
18582+#ifdef CONFIG_PAX_MEMORY_UDEREF
18583+ if (!__access_ok(VERIFY_READ, src, size))
18584+ return size;
18585+#endif
18586+
18587+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18588 }
18589
18590-static __must_check __always_inline int
18591-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
18592+static __must_check __always_inline unsigned long
18593+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
18594 {
18595- return copy_user_generic((__force void *)dst, src, size);
18596+ if (size > INT_MAX)
18597+ return size;
18598+
18599+#ifdef CONFIG_PAX_MEMORY_UDEREF
18600+ if (!__access_ok(VERIFY_WRITE, dst, size))
18601+ return size;
18602+#endif
18603+
18604+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18605 }
18606
18607-extern long __copy_user_nocache(void *dst, const void __user *src,
18608- unsigned size, int zerorest);
18609+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
18610+ unsigned long size, int zerorest) __size_overflow(3);
18611
18612-static inline int
18613-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
18614+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
18615 {
18616 might_sleep();
18617+
18618+ if (size > INT_MAX)
18619+ return size;
18620+
18621+#ifdef CONFIG_PAX_MEMORY_UDEREF
18622+ if (!__access_ok(VERIFY_READ, src, size))
18623+ return size;
18624+#endif
18625+
18626 return __copy_user_nocache(dst, src, size, 1);
18627 }
18628
18629-static inline int
18630-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
18631- unsigned size)
18632+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
18633+ unsigned long size)
18634 {
18635+ if (size > INT_MAX)
18636+ return size;
18637+
18638+#ifdef CONFIG_PAX_MEMORY_UDEREF
18639+ if (!__access_ok(VERIFY_READ, src, size))
18640+ return size;
18641+#endif
18642+
18643 return __copy_user_nocache(dst, src, size, 0);
18644 }
18645
18646-unsigned long
18647-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
18648+extern unsigned long
18649+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
18650
18651 #endif /* _ASM_X86_UACCESS_64_H */
18652diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
18653index 5b238981..77fdd78 100644
18654--- a/arch/x86/include/asm/word-at-a-time.h
18655+++ b/arch/x86/include/asm/word-at-a-time.h
18656@@ -11,7 +11,7 @@
18657 * and shift, for example.
18658 */
18659 struct word_at_a_time {
18660- const unsigned long one_bits, high_bits;
18661+ unsigned long one_bits, high_bits;
18662 };
18663
18664 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
18665diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
18666index d8d9922..bf6cecb 100644
18667--- a/arch/x86/include/asm/x86_init.h
18668+++ b/arch/x86/include/asm/x86_init.h
18669@@ -129,7 +129,7 @@ struct x86_init_ops {
18670 struct x86_init_timers timers;
18671 struct x86_init_iommu iommu;
18672 struct x86_init_pci pci;
18673-};
18674+} __no_const;
18675
18676 /**
18677 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
18678@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
18679 void (*setup_percpu_clockev)(void);
18680 void (*early_percpu_clock_init)(void);
18681 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
18682-};
18683+} __no_const;
18684
18685 /**
18686 * struct x86_platform_ops - platform specific runtime functions
18687@@ -166,7 +166,7 @@ struct x86_platform_ops {
18688 void (*save_sched_clock_state)(void);
18689 void (*restore_sched_clock_state)(void);
18690 void (*apic_post_init)(void);
18691-};
18692+} __no_const;
18693
18694 struct pci_dev;
18695 struct msi_msg;
18696@@ -180,7 +180,7 @@ struct x86_msi_ops {
18697 void (*teardown_msi_irqs)(struct pci_dev *dev);
18698 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
18699 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
18700-};
18701+} __no_const;
18702
18703 struct IO_APIC_route_entry;
18704 struct io_apic_irq_attr;
18705@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
18706 unsigned int destination, int vector,
18707 struct io_apic_irq_attr *attr);
18708 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
18709-};
18710+} __no_const;
18711
18712 extern struct x86_init_ops x86_init;
18713 extern struct x86_cpuinit_ops x86_cpuinit;
18714diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
18715index 0415cda..3b22adc 100644
18716--- a/arch/x86/include/asm/xsave.h
18717+++ b/arch/x86/include/asm/xsave.h
18718@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
18719 if (unlikely(err))
18720 return -EFAULT;
18721
18722+ pax_open_userland();
18723 __asm__ __volatile__(ASM_STAC "\n"
18724- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
18725+ "1:"
18726+ __copyuser_seg
18727+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
18728 "2: " ASM_CLAC "\n"
18729 ".section .fixup,\"ax\"\n"
18730 "3: movl $-1,%[err]\n"
18731@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
18732 : [err] "=r" (err)
18733 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
18734 : "memory");
18735+ pax_close_userland();
18736 return err;
18737 }
18738
18739 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18740 {
18741 int err;
18742- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
18743+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
18744 u32 lmask = mask;
18745 u32 hmask = mask >> 32;
18746
18747+ pax_open_userland();
18748 __asm__ __volatile__(ASM_STAC "\n"
18749- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18750+ "1:"
18751+ __copyuser_seg
18752+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18753 "2: " ASM_CLAC "\n"
18754 ".section .fixup,\"ax\"\n"
18755 "3: movl $-1,%[err]\n"
18756@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18757 : [err] "=r" (err)
18758 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
18759 : "memory"); /* memory required? */
18760+ pax_close_userland();
18761 return err;
18762 }
18763
18764diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
18765index bbae024..e1528f9 100644
18766--- a/arch/x86/include/uapi/asm/e820.h
18767+++ b/arch/x86/include/uapi/asm/e820.h
18768@@ -63,7 +63,7 @@ struct e820map {
18769 #define ISA_START_ADDRESS 0xa0000
18770 #define ISA_END_ADDRESS 0x100000
18771
18772-#define BIOS_BEGIN 0x000a0000
18773+#define BIOS_BEGIN 0x000c0000
18774 #define BIOS_END 0x00100000
18775
18776 #define BIOS_ROM_BASE 0xffe00000
18777diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
18778index 7bd3bd3..5dac791 100644
18779--- a/arch/x86/kernel/Makefile
18780+++ b/arch/x86/kernel/Makefile
18781@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
18782 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
18783 obj-$(CONFIG_IRQ_WORK) += irq_work.o
18784 obj-y += probe_roms.o
18785-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
18786+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
18787 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
18788 obj-y += syscall_$(BITS).o
18789 obj-$(CONFIG_X86_64) += vsyscall_64.o
18790diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
18791index 230c8ea..f915130 100644
18792--- a/arch/x86/kernel/acpi/boot.c
18793+++ b/arch/x86/kernel/acpi/boot.c
18794@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
18795 * If your system is blacklisted here, but you find that acpi=force
18796 * works for you, please contact linux-acpi@vger.kernel.org
18797 */
18798-static struct dmi_system_id __initdata acpi_dmi_table[] = {
18799+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
18800 /*
18801 * Boxes that need ACPI disabled
18802 */
18803@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
18804 };
18805
18806 /* second table for DMI checks that should run after early-quirks */
18807-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
18808+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
18809 /*
18810 * HP laptops which use a DSDT reporting as HP/SB400/10000,
18811 * which includes some code which overrides all temperature
18812diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
18813index ec94e11..7fbbec0 100644
18814--- a/arch/x86/kernel/acpi/sleep.c
18815+++ b/arch/x86/kernel/acpi/sleep.c
18816@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
18817 #else /* CONFIG_64BIT */
18818 #ifdef CONFIG_SMP
18819 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
18820+
18821+ pax_open_kernel();
18822 early_gdt_descr.address =
18823 (unsigned long)get_cpu_gdt_table(smp_processor_id());
18824+ pax_close_kernel();
18825+
18826 initial_gs = per_cpu_offset(smp_processor_id());
18827 #endif
18828 initial_code = (unsigned long)wakeup_long64;
18829diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
18830index d1daa66..59fecba 100644
18831--- a/arch/x86/kernel/acpi/wakeup_32.S
18832+++ b/arch/x86/kernel/acpi/wakeup_32.S
18833@@ -29,13 +29,11 @@ wakeup_pmode_return:
18834 # and restore the stack ... but you need gdt for this to work
18835 movl saved_context_esp, %esp
18836
18837- movl %cs:saved_magic, %eax
18838- cmpl $0x12345678, %eax
18839+ cmpl $0x12345678, saved_magic
18840 jne bogus_magic
18841
18842 # jump to place where we left off
18843- movl saved_eip, %eax
18844- jmp *%eax
18845+ jmp *(saved_eip)
18846
18847 bogus_magic:
18848 jmp bogus_magic
18849diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
18850index c15cf9a..0e63558 100644
18851--- a/arch/x86/kernel/alternative.c
18852+++ b/arch/x86/kernel/alternative.c
18853@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
18854 */
18855 for (a = start; a < end; a++) {
18856 instr = (u8 *)&a->instr_offset + a->instr_offset;
18857+
18858+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18859+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18860+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
18861+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18862+#endif
18863+
18864 replacement = (u8 *)&a->repl_offset + a->repl_offset;
18865 BUG_ON(a->replacementlen > a->instrlen);
18866 BUG_ON(a->instrlen > sizeof(insnbuf));
18867@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
18868 for (poff = start; poff < end; poff++) {
18869 u8 *ptr = (u8 *)poff + *poff;
18870
18871+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18872+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18873+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18874+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18875+#endif
18876+
18877 if (!*poff || ptr < text || ptr >= text_end)
18878 continue;
18879 /* turn DS segment override prefix into lock prefix */
18880- if (*ptr == 0x3e)
18881+ if (*ktla_ktva(ptr) == 0x3e)
18882 text_poke(ptr, ((unsigned char []){0xf0}), 1);
18883 }
18884 mutex_unlock(&text_mutex);
18885@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
18886 for (poff = start; poff < end; poff++) {
18887 u8 *ptr = (u8 *)poff + *poff;
18888
18889+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18890+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18891+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18892+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18893+#endif
18894+
18895 if (!*poff || ptr < text || ptr >= text_end)
18896 continue;
18897 /* turn lock prefix into DS segment override prefix */
18898- if (*ptr == 0xf0)
18899+ if (*ktla_ktva(ptr) == 0xf0)
18900 text_poke(ptr, ((unsigned char []){0x3E}), 1);
18901 }
18902 mutex_unlock(&text_mutex);
18903@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
18904
18905 BUG_ON(p->len > MAX_PATCH_LEN);
18906 /* prep the buffer with the original instructions */
18907- memcpy(insnbuf, p->instr, p->len);
18908+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
18909 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
18910 (unsigned long)p->instr, p->len);
18911
18912@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
18913 if (!uniproc_patched || num_possible_cpus() == 1)
18914 free_init_pages("SMP alternatives",
18915 (unsigned long)__smp_locks,
18916- (unsigned long)__smp_locks_end);
18917+ PAGE_ALIGN((unsigned long)__smp_locks_end));
18918 #endif
18919
18920 apply_paravirt(__parainstructions, __parainstructions_end);
18921@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
18922 * instructions. And on the local CPU you need to be protected again NMI or MCE
18923 * handlers seeing an inconsistent instruction while you patch.
18924 */
18925-void *__init_or_module text_poke_early(void *addr, const void *opcode,
18926+void *__kprobes text_poke_early(void *addr, const void *opcode,
18927 size_t len)
18928 {
18929 unsigned long flags;
18930 local_irq_save(flags);
18931- memcpy(addr, opcode, len);
18932+
18933+ pax_open_kernel();
18934+ memcpy(ktla_ktva(addr), opcode, len);
18935 sync_core();
18936+ pax_close_kernel();
18937+
18938 local_irq_restore(flags);
18939 /* Could also do a CLFLUSH here to speed up CPU recovery; but
18940 that causes hangs on some VIA CPUs. */
18941@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
18942 */
18943 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
18944 {
18945- unsigned long flags;
18946- char *vaddr;
18947+ unsigned char *vaddr = ktla_ktva(addr);
18948 struct page *pages[2];
18949- int i;
18950+ size_t i;
18951
18952 if (!core_kernel_text((unsigned long)addr)) {
18953- pages[0] = vmalloc_to_page(addr);
18954- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
18955+ pages[0] = vmalloc_to_page(vaddr);
18956+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
18957 } else {
18958- pages[0] = virt_to_page(addr);
18959+ pages[0] = virt_to_page(vaddr);
18960 WARN_ON(!PageReserved(pages[0]));
18961- pages[1] = virt_to_page(addr + PAGE_SIZE);
18962+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
18963 }
18964 BUG_ON(!pages[0]);
18965- local_irq_save(flags);
18966- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
18967- if (pages[1])
18968- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
18969- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
18970- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
18971- clear_fixmap(FIX_TEXT_POKE0);
18972- if (pages[1])
18973- clear_fixmap(FIX_TEXT_POKE1);
18974- local_flush_tlb();
18975- sync_core();
18976- /* Could also do a CLFLUSH here to speed up CPU recovery; but
18977- that causes hangs on some VIA CPUs. */
18978+ text_poke_early(addr, opcode, len);
18979 for (i = 0; i < len; i++)
18980- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
18981- local_irq_restore(flags);
18982+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
18983 return addr;
18984 }
18985
18986diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
18987index 904611b..004dde6 100644
18988--- a/arch/x86/kernel/apic/apic.c
18989+++ b/arch/x86/kernel/apic/apic.c
18990@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
18991 /*
18992 * Debug level, exported for io_apic.c
18993 */
18994-unsigned int apic_verbosity;
18995+int apic_verbosity;
18996
18997 int pic_mode;
18998
18999@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
19000 apic_write(APIC_ESR, 0);
19001 v1 = apic_read(APIC_ESR);
19002 ack_APIC_irq();
19003- atomic_inc(&irq_err_count);
19004+ atomic_inc_unchecked(&irq_err_count);
19005
19006 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
19007 smp_processor_id(), v0 , v1);
19008diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
19009index 00c77cf..2dc6a2d 100644
19010--- a/arch/x86/kernel/apic/apic_flat_64.c
19011+++ b/arch/x86/kernel/apic/apic_flat_64.c
19012@@ -157,7 +157,7 @@ static int flat_probe(void)
19013 return 1;
19014 }
19015
19016-static struct apic apic_flat = {
19017+static struct apic apic_flat __read_only = {
19018 .name = "flat",
19019 .probe = flat_probe,
19020 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19021@@ -271,7 +271,7 @@ static int physflat_probe(void)
19022 return 0;
19023 }
19024
19025-static struct apic apic_physflat = {
19026+static struct apic apic_physflat __read_only = {
19027
19028 .name = "physical flat",
19029 .probe = physflat_probe,
19030diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19031index e145f28..2752888 100644
19032--- a/arch/x86/kernel/apic/apic_noop.c
19033+++ b/arch/x86/kernel/apic/apic_noop.c
19034@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19035 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19036 }
19037
19038-struct apic apic_noop = {
19039+struct apic apic_noop __read_only = {
19040 .name = "noop",
19041 .probe = noop_probe,
19042 .acpi_madt_oem_check = NULL,
19043diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19044index d50e364..543bee3 100644
19045--- a/arch/x86/kernel/apic/bigsmp_32.c
19046+++ b/arch/x86/kernel/apic/bigsmp_32.c
19047@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
19048 return dmi_bigsmp;
19049 }
19050
19051-static struct apic apic_bigsmp = {
19052+static struct apic apic_bigsmp __read_only = {
19053
19054 .name = "bigsmp",
19055 .probe = probe_bigsmp,
19056diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
19057index 0874799..a7a7892 100644
19058--- a/arch/x86/kernel/apic/es7000_32.c
19059+++ b/arch/x86/kernel/apic/es7000_32.c
19060@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
19061 return ret && es7000_apic_is_cluster();
19062 }
19063
19064-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
19065-static struct apic __refdata apic_es7000_cluster = {
19066+static struct apic apic_es7000_cluster __read_only = {
19067
19068 .name = "es7000",
19069 .probe = probe_es7000,
19070@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
19071 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
19072 };
19073
19074-static struct apic __refdata apic_es7000 = {
19075+static struct apic apic_es7000 __read_only = {
19076
19077 .name = "es7000",
19078 .probe = probe_es7000,
19079diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
19080index 9ed796c..e930fe4 100644
19081--- a/arch/x86/kernel/apic/io_apic.c
19082+++ b/arch/x86/kernel/apic/io_apic.c
19083@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
19084 }
19085 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
19086
19087-void lock_vector_lock(void)
19088+void lock_vector_lock(void) __acquires(vector_lock)
19089 {
19090 /* Used to the online set of cpus does not change
19091 * during assign_irq_vector.
19092@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
19093 raw_spin_lock(&vector_lock);
19094 }
19095
19096-void unlock_vector_lock(void)
19097+void unlock_vector_lock(void) __releases(vector_lock)
19098 {
19099 raw_spin_unlock(&vector_lock);
19100 }
19101@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
19102 ack_APIC_irq();
19103 }
19104
19105-atomic_t irq_mis_count;
19106+atomic_unchecked_t irq_mis_count;
19107
19108 #ifdef CONFIG_GENERIC_PENDING_IRQ
19109 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
19110@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
19111 * at the cpu.
19112 */
19113 if (!(v & (1 << (i & 0x1f)))) {
19114- atomic_inc(&irq_mis_count);
19115+ atomic_inc_unchecked(&irq_mis_count);
19116
19117 eoi_ioapic_irq(irq, cfg);
19118 }
19119diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
19120index d661ee9..791fd33 100644
19121--- a/arch/x86/kernel/apic/numaq_32.c
19122+++ b/arch/x86/kernel/apic/numaq_32.c
19123@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
19124 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
19125 }
19126
19127-/* Use __refdata to keep false positive warning calm. */
19128-static struct apic __refdata apic_numaq = {
19129+static struct apic apic_numaq __read_only = {
19130
19131 .name = "NUMAQ",
19132 .probe = probe_numaq,
19133diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
19134index eb35ef9..f184a21 100644
19135--- a/arch/x86/kernel/apic/probe_32.c
19136+++ b/arch/x86/kernel/apic/probe_32.c
19137@@ -72,7 +72,7 @@ static int probe_default(void)
19138 return 1;
19139 }
19140
19141-static struct apic apic_default = {
19142+static struct apic apic_default __read_only = {
19143
19144 .name = "default",
19145 .probe = probe_default,
19146diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
19147index 77c95c0..434f8a4 100644
19148--- a/arch/x86/kernel/apic/summit_32.c
19149+++ b/arch/x86/kernel/apic/summit_32.c
19150@@ -486,7 +486,7 @@ void setup_summit(void)
19151 }
19152 #endif
19153
19154-static struct apic apic_summit = {
19155+static struct apic apic_summit __read_only = {
19156
19157 .name = "summit",
19158 .probe = probe_summit,
19159diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
19160index c88baa4..757aee1 100644
19161--- a/arch/x86/kernel/apic/x2apic_cluster.c
19162+++ b/arch/x86/kernel/apic/x2apic_cluster.c
19163@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
19164 return notifier_from_errno(err);
19165 }
19166
19167-static struct notifier_block __refdata x2apic_cpu_notifier = {
19168+static struct notifier_block x2apic_cpu_notifier = {
19169 .notifier_call = update_clusterinfo,
19170 };
19171
19172@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
19173 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
19174 }
19175
19176-static struct apic apic_x2apic_cluster = {
19177+static struct apic apic_x2apic_cluster __read_only = {
19178
19179 .name = "cluster x2apic",
19180 .probe = x2apic_cluster_probe,
19181diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
19182index 562a76d..a003c0f 100644
19183--- a/arch/x86/kernel/apic/x2apic_phys.c
19184+++ b/arch/x86/kernel/apic/x2apic_phys.c
19185@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
19186 return apic == &apic_x2apic_phys;
19187 }
19188
19189-static struct apic apic_x2apic_phys = {
19190+static struct apic apic_x2apic_phys __read_only = {
19191
19192 .name = "physical x2apic",
19193 .probe = x2apic_phys_probe,
19194diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
19195index 794f6eb..67e1db2 100644
19196--- a/arch/x86/kernel/apic/x2apic_uv_x.c
19197+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
19198@@ -342,7 +342,7 @@ static int uv_probe(void)
19199 return apic == &apic_x2apic_uv_x;
19200 }
19201
19202-static struct apic __refdata apic_x2apic_uv_x = {
19203+static struct apic apic_x2apic_uv_x __read_only = {
19204
19205 .name = "UV large system",
19206 .probe = uv_probe,
19207diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
19208index 53a4e27..038760a 100644
19209--- a/arch/x86/kernel/apm_32.c
19210+++ b/arch/x86/kernel/apm_32.c
19211@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
19212 * This is for buggy BIOS's that refer to (real mode) segment 0x40
19213 * even though they are called in protected mode.
19214 */
19215-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
19216+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
19217 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
19218
19219 static const char driver_version[] = "1.16ac"; /* no spaces */
19220@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
19221 BUG_ON(cpu != 0);
19222 gdt = get_cpu_gdt_table(cpu);
19223 save_desc_40 = gdt[0x40 / 8];
19224+
19225+ pax_open_kernel();
19226 gdt[0x40 / 8] = bad_bios_desc;
19227+ pax_close_kernel();
19228
19229 apm_irq_save(flags);
19230 APM_DO_SAVE_SEGS;
19231@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
19232 &call->esi);
19233 APM_DO_RESTORE_SEGS;
19234 apm_irq_restore(flags);
19235+
19236+ pax_open_kernel();
19237 gdt[0x40 / 8] = save_desc_40;
19238+ pax_close_kernel();
19239+
19240 put_cpu();
19241
19242 return call->eax & 0xff;
19243@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
19244 BUG_ON(cpu != 0);
19245 gdt = get_cpu_gdt_table(cpu);
19246 save_desc_40 = gdt[0x40 / 8];
19247+
19248+ pax_open_kernel();
19249 gdt[0x40 / 8] = bad_bios_desc;
19250+ pax_close_kernel();
19251
19252 apm_irq_save(flags);
19253 APM_DO_SAVE_SEGS;
19254@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
19255 &call->eax);
19256 APM_DO_RESTORE_SEGS;
19257 apm_irq_restore(flags);
19258+
19259+ pax_open_kernel();
19260 gdt[0x40 / 8] = save_desc_40;
19261+ pax_close_kernel();
19262+
19263 put_cpu();
19264 return error;
19265 }
19266@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
19267 * code to that CPU.
19268 */
19269 gdt = get_cpu_gdt_table(0);
19270+
19271+ pax_open_kernel();
19272 set_desc_base(&gdt[APM_CS >> 3],
19273 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
19274 set_desc_base(&gdt[APM_CS_16 >> 3],
19275 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
19276 set_desc_base(&gdt[APM_DS >> 3],
19277 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
19278+ pax_close_kernel();
19279
19280 proc_create("apm", 0, NULL, &apm_file_ops);
19281
19282diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
19283index 2861082..6d4718e 100644
19284--- a/arch/x86/kernel/asm-offsets.c
19285+++ b/arch/x86/kernel/asm-offsets.c
19286@@ -33,6 +33,8 @@ void common(void) {
19287 OFFSET(TI_status, thread_info, status);
19288 OFFSET(TI_addr_limit, thread_info, addr_limit);
19289 OFFSET(TI_preempt_count, thread_info, preempt_count);
19290+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
19291+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
19292
19293 BLANK();
19294 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
19295@@ -53,8 +55,26 @@ void common(void) {
19296 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
19297 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
19298 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
19299+
19300+#ifdef CONFIG_PAX_KERNEXEC
19301+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
19302 #endif
19303
19304+#ifdef CONFIG_PAX_MEMORY_UDEREF
19305+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
19306+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
19307+#ifdef CONFIG_X86_64
19308+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
19309+#endif
19310+#endif
19311+
19312+#endif
19313+
19314+ BLANK();
19315+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
19316+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
19317+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
19318+
19319 #ifdef CONFIG_XEN
19320 BLANK();
19321 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
19322diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
19323index e7c798b..2b2019b 100644
19324--- a/arch/x86/kernel/asm-offsets_64.c
19325+++ b/arch/x86/kernel/asm-offsets_64.c
19326@@ -77,6 +77,7 @@ int main(void)
19327 BLANK();
19328 #undef ENTRY
19329
19330+ DEFINE(TSS_size, sizeof(struct tss_struct));
19331 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
19332 BLANK();
19333
19334diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
19335index b0684e4..22ccfd7 100644
19336--- a/arch/x86/kernel/cpu/Makefile
19337+++ b/arch/x86/kernel/cpu/Makefile
19338@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
19339 CFLAGS_REMOVE_perf_event.o = -pg
19340 endif
19341
19342-# Make sure load_percpu_segment has no stackprotector
19343-nostackp := $(call cc-option, -fno-stack-protector)
19344-CFLAGS_common.o := $(nostackp)
19345-
19346 obj-y := intel_cacheinfo.o scattered.o topology.o
19347 obj-y += proc.o capflags.o powerflags.o common.o
19348 obj-y += rdrand.o
19349diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
19350index 5013a48..0782c53 100644
19351--- a/arch/x86/kernel/cpu/amd.c
19352+++ b/arch/x86/kernel/cpu/amd.c
19353@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
19354 unsigned int size)
19355 {
19356 /* AMD errata T13 (order #21922) */
19357- if ((c->x86 == 6)) {
19358+ if (c->x86 == 6) {
19359 /* Duron Rev A0 */
19360 if (c->x86_model == 3 && c->x86_mask == 0)
19361 size = 64;
19362diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
19363index 22018f7..df77e23 100644
19364--- a/arch/x86/kernel/cpu/common.c
19365+++ b/arch/x86/kernel/cpu/common.c
19366@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
19367
19368 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
19369
19370-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
19371-#ifdef CONFIG_X86_64
19372- /*
19373- * We need valid kernel segments for data and code in long mode too
19374- * IRET will check the segment types kkeil 2000/10/28
19375- * Also sysret mandates a special GDT layout
19376- *
19377- * TLS descriptors are currently at a different place compared to i386.
19378- * Hopefully nobody expects them at a fixed place (Wine?)
19379- */
19380- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
19381- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
19382- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
19383- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
19384- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
19385- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
19386-#else
19387- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
19388- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19389- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
19390- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
19391- /*
19392- * Segments used for calling PnP BIOS have byte granularity.
19393- * They code segments and data segments have fixed 64k limits,
19394- * the transfer segment sizes are set at run time.
19395- */
19396- /* 32-bit code */
19397- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
19398- /* 16-bit code */
19399- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
19400- /* 16-bit data */
19401- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
19402- /* 16-bit data */
19403- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
19404- /* 16-bit data */
19405- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
19406- /*
19407- * The APM segments have byte granularity and their bases
19408- * are set at run time. All have 64k limits.
19409- */
19410- /* 32-bit code */
19411- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
19412- /* 16-bit code */
19413- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
19414- /* data */
19415- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
19416-
19417- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19418- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19419- GDT_STACK_CANARY_INIT
19420-#endif
19421-} };
19422-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
19423-
19424 static int __init x86_xsave_setup(char *s)
19425 {
19426 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
19427@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
19428 set_in_cr4(X86_CR4_SMAP);
19429 }
19430
19431+#ifdef CONFIG_X86_64
19432+static __init int setup_disable_pcid(char *arg)
19433+{
19434+ setup_clear_cpu_cap(X86_FEATURE_PCID);
19435+
19436+#ifdef CONFIG_PAX_MEMORY_UDEREF
19437+ if (clone_pgd_mask != ~(pgdval_t)0UL)
19438+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
19439+#endif
19440+
19441+ return 1;
19442+}
19443+__setup("nopcid", setup_disable_pcid);
19444+
19445+static void setup_pcid(struct cpuinfo_x86 *c)
19446+{
19447+ if (!cpu_has(c, X86_FEATURE_PCID)) {
19448+
19449+#ifdef CONFIG_PAX_MEMORY_UDEREF
19450+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
19451+ pax_open_kernel();
19452+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
19453+ pax_close_kernel();
19454+ printk("PAX: slow and weak UDEREF enabled\n");
19455+ } else
19456+ printk("PAX: UDEREF disabled\n");
19457+#endif
19458+
19459+ return;
19460+ }
19461+
19462+ printk("PAX: PCID detected\n");
19463+ set_in_cr4(X86_CR4_PCIDE);
19464+
19465+#ifdef CONFIG_PAX_MEMORY_UDEREF
19466+ pax_open_kernel();
19467+ clone_pgd_mask = ~(pgdval_t)0UL;
19468+ pax_close_kernel();
19469+ if (pax_user_shadow_base)
19470+ printk("PAX: weak UDEREF enabled\n");
19471+ else {
19472+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
19473+ printk("PAX: strong UDEREF enabled\n");
19474+ }
19475+#endif
19476+
19477+ if (cpu_has(c, X86_FEATURE_INVPCID))
19478+ printk("PAX: INVPCID detected\n");
19479+}
19480+#endif
19481+
19482 /*
19483 * Some CPU features depend on higher CPUID levels, which may not always
19484 * be available due to CPUID level capping or broken virtualization
19485@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
19486 {
19487 struct desc_ptr gdt_descr;
19488
19489- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
19490+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19491 gdt_descr.size = GDT_SIZE - 1;
19492 load_gdt(&gdt_descr);
19493 /* Reload the per-cpu base */
19494@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
19495 setup_smep(c);
19496 setup_smap(c);
19497
19498+#ifdef CONFIG_X86_64
19499+ setup_pcid(c);
19500+#endif
19501+
19502 /*
19503 * The vendor-specific functions might have changed features.
19504 * Now we do "generic changes."
19505@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
19506 /* Filter out anything that depends on CPUID levels we don't have */
19507 filter_cpuid_features(c, true);
19508
19509+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19510+ setup_clear_cpu_cap(X86_FEATURE_SEP);
19511+#endif
19512+
19513 /* If the model name is still unset, do table lookup. */
19514 if (!c->x86_model_id[0]) {
19515 const char *p;
19516@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
19517 }
19518 __setup("clearcpuid=", setup_disablecpuid);
19519
19520+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
19521+EXPORT_PER_CPU_SYMBOL(current_tinfo);
19522+
19523 #ifdef CONFIG_X86_64
19524 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
19525-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
19526- (unsigned long) nmi_idt_table };
19527+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
19528
19529 DEFINE_PER_CPU_FIRST(union irq_stack_union,
19530 irq_stack_union) __aligned(PAGE_SIZE);
19531@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
19532 EXPORT_PER_CPU_SYMBOL(current_task);
19533
19534 DEFINE_PER_CPU(unsigned long, kernel_stack) =
19535- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
19536+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
19537 EXPORT_PER_CPU_SYMBOL(kernel_stack);
19538
19539 DEFINE_PER_CPU(char *, irq_stack_ptr) =
19540@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
19541 load_ucode_ap();
19542
19543 cpu = stack_smp_processor_id();
19544- t = &per_cpu(init_tss, cpu);
19545+ t = init_tss + cpu;
19546 oist = &per_cpu(orig_ist, cpu);
19547
19548 #ifdef CONFIG_NUMA
19549@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
19550 switch_to_new_gdt(cpu);
19551 loadsegment(fs, 0);
19552
19553- load_idt((const struct desc_ptr *)&idt_descr);
19554+ load_idt(&idt_descr);
19555
19556 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
19557 syscall_init();
19558@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
19559 wrmsrl(MSR_KERNEL_GS_BASE, 0);
19560 barrier();
19561
19562- x86_configure_nx();
19563 enable_x2apic();
19564
19565 /*
19566@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
19567 {
19568 int cpu = smp_processor_id();
19569 struct task_struct *curr = current;
19570- struct tss_struct *t = &per_cpu(init_tss, cpu);
19571+ struct tss_struct *t = init_tss + cpu;
19572 struct thread_struct *thread = &curr->thread;
19573
19574 show_ucode_info_early();
19575diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
19576index 7c6f7d5..8cac382 100644
19577--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
19578+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
19579@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
19580 };
19581
19582 #ifdef CONFIG_AMD_NB
19583+static struct attribute *default_attrs_amd_nb[] = {
19584+ &type.attr,
19585+ &level.attr,
19586+ &coherency_line_size.attr,
19587+ &physical_line_partition.attr,
19588+ &ways_of_associativity.attr,
19589+ &number_of_sets.attr,
19590+ &size.attr,
19591+ &shared_cpu_map.attr,
19592+ &shared_cpu_list.attr,
19593+ NULL,
19594+ NULL,
19595+ NULL,
19596+ NULL
19597+};
19598+
19599 static struct attribute ** __cpuinit amd_l3_attrs(void)
19600 {
19601 static struct attribute **attrs;
19602@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
19603
19604 n = ARRAY_SIZE(default_attrs);
19605
19606- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
19607- n += 2;
19608-
19609- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
19610- n += 1;
19611-
19612- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
19613- if (attrs == NULL)
19614- return attrs = default_attrs;
19615-
19616- for (n = 0; default_attrs[n]; n++)
19617- attrs[n] = default_attrs[n];
19618+ attrs = default_attrs_amd_nb;
19619
19620 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
19621 attrs[n++] = &cache_disable_0.attr;
19622@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
19623 .default_attrs = default_attrs,
19624 };
19625
19626+#ifdef CONFIG_AMD_NB
19627+static struct kobj_type ktype_cache_amd_nb = {
19628+ .sysfs_ops = &sysfs_ops,
19629+ .default_attrs = default_attrs_amd_nb,
19630+};
19631+#endif
19632+
19633 static struct kobj_type ktype_percpu_entry = {
19634 .sysfs_ops = &sysfs_ops,
19635 };
19636@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
19637 return retval;
19638 }
19639
19640+#ifdef CONFIG_AMD_NB
19641+ amd_l3_attrs();
19642+#endif
19643+
19644 for (i = 0; i < num_cache_leaves; i++) {
19645+ struct kobj_type *ktype;
19646+
19647 this_object = INDEX_KOBJECT_PTR(cpu, i);
19648 this_object->cpu = cpu;
19649 this_object->index = i;
19650
19651 this_leaf = CPUID4_INFO_IDX(cpu, i);
19652
19653- ktype_cache.default_attrs = default_attrs;
19654+ ktype = &ktype_cache;
19655 #ifdef CONFIG_AMD_NB
19656 if (this_leaf->base.nb)
19657- ktype_cache.default_attrs = amd_l3_attrs();
19658+ ktype = &ktype_cache_amd_nb;
19659 #endif
19660 retval = kobject_init_and_add(&(this_object->kobj),
19661- &ktype_cache,
19662+ ktype,
19663 per_cpu(ici_cache_kobject, cpu),
19664 "index%1lu", i);
19665 if (unlikely(retval)) {
19666@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
19667 return NOTIFY_OK;
19668 }
19669
19670-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
19671+static struct notifier_block cacheinfo_cpu_notifier = {
19672 .notifier_call = cacheinfo_cpu_callback,
19673 };
19674
19675diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
19676index 9239504..b2471ce 100644
19677--- a/arch/x86/kernel/cpu/mcheck/mce.c
19678+++ b/arch/x86/kernel/cpu/mcheck/mce.c
19679@@ -45,6 +45,7 @@
19680 #include <asm/processor.h>
19681 #include <asm/mce.h>
19682 #include <asm/msr.h>
19683+#include <asm/local.h>
19684
19685 #include "mce-internal.h"
19686
19687@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
19688 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
19689 m->cs, m->ip);
19690
19691- if (m->cs == __KERNEL_CS)
19692+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
19693 print_symbol("{%s}", m->ip);
19694 pr_cont("\n");
19695 }
19696@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
19697
19698 #define PANIC_TIMEOUT 5 /* 5 seconds */
19699
19700-static atomic_t mce_paniced;
19701+static atomic_unchecked_t mce_paniced;
19702
19703 static int fake_panic;
19704-static atomic_t mce_fake_paniced;
19705+static atomic_unchecked_t mce_fake_paniced;
19706
19707 /* Panic in progress. Enable interrupts and wait for final IPI */
19708 static void wait_for_panic(void)
19709@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19710 /*
19711 * Make sure only one CPU runs in machine check panic
19712 */
19713- if (atomic_inc_return(&mce_paniced) > 1)
19714+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
19715 wait_for_panic();
19716 barrier();
19717
19718@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19719 console_verbose();
19720 } else {
19721 /* Don't log too much for fake panic */
19722- if (atomic_inc_return(&mce_fake_paniced) > 1)
19723+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
19724 return;
19725 }
19726 /* First print corrected ones that are still unlogged */
19727@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19728 if (!fake_panic) {
19729 if (panic_timeout == 0)
19730 panic_timeout = mca_cfg.panic_timeout;
19731- panic(msg);
19732+ panic("%s", msg);
19733 } else
19734 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
19735 }
19736@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
19737 * might have been modified by someone else.
19738 */
19739 rmb();
19740- if (atomic_read(&mce_paniced))
19741+ if (atomic_read_unchecked(&mce_paniced))
19742 wait_for_panic();
19743 if (!mca_cfg.monarch_timeout)
19744 goto out;
19745@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
19746 }
19747
19748 /* Call the installed machine check handler for this CPU setup. */
19749-void (*machine_check_vector)(struct pt_regs *, long error_code) =
19750+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
19751 unexpected_machine_check;
19752
19753 /*
19754@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19755 return;
19756 }
19757
19758+ pax_open_kernel();
19759 machine_check_vector = do_machine_check;
19760+ pax_close_kernel();
19761
19762 __mcheck_cpu_init_generic();
19763 __mcheck_cpu_init_vendor(c);
19764@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19765 */
19766
19767 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
19768-static int mce_chrdev_open_count; /* #times opened */
19769+static local_t mce_chrdev_open_count; /* #times opened */
19770 static int mce_chrdev_open_exclu; /* already open exclusive? */
19771
19772 static int mce_chrdev_open(struct inode *inode, struct file *file)
19773@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19774 spin_lock(&mce_chrdev_state_lock);
19775
19776 if (mce_chrdev_open_exclu ||
19777- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
19778+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
19779 spin_unlock(&mce_chrdev_state_lock);
19780
19781 return -EBUSY;
19782@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19783
19784 if (file->f_flags & O_EXCL)
19785 mce_chrdev_open_exclu = 1;
19786- mce_chrdev_open_count++;
19787+ local_inc(&mce_chrdev_open_count);
19788
19789 spin_unlock(&mce_chrdev_state_lock);
19790
19791@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
19792 {
19793 spin_lock(&mce_chrdev_state_lock);
19794
19795- mce_chrdev_open_count--;
19796+ local_dec(&mce_chrdev_open_count);
19797 mce_chrdev_open_exclu = 0;
19798
19799 spin_unlock(&mce_chrdev_state_lock);
19800@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
19801 return NOTIFY_OK;
19802 }
19803
19804-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
19805+static struct notifier_block mce_cpu_notifier = {
19806 .notifier_call = mce_cpu_callback,
19807 };
19808
19809@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
19810
19811 for (i = 0; i < mca_cfg.banks; i++) {
19812 struct mce_bank *b = &mce_banks[i];
19813- struct device_attribute *a = &b->attr;
19814+ device_attribute_no_const *a = &b->attr;
19815
19816 sysfs_attr_init(&a->attr);
19817 a->attr.name = b->attrname;
19818@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
19819 static void mce_reset(void)
19820 {
19821 cpu_missing = 0;
19822- atomic_set(&mce_fake_paniced, 0);
19823+ atomic_set_unchecked(&mce_fake_paniced, 0);
19824 atomic_set(&mce_executing, 0);
19825 atomic_set(&mce_callin, 0);
19826 atomic_set(&global_nwo, 0);
19827diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
19828index 1c044b1..37a2a43 100644
19829--- a/arch/x86/kernel/cpu/mcheck/p5.c
19830+++ b/arch/x86/kernel/cpu/mcheck/p5.c
19831@@ -11,6 +11,7 @@
19832 #include <asm/processor.h>
19833 #include <asm/mce.h>
19834 #include <asm/msr.h>
19835+#include <asm/pgtable.h>
19836
19837 /* By default disabled */
19838 int mce_p5_enabled __read_mostly;
19839@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
19840 if (!cpu_has(c, X86_FEATURE_MCE))
19841 return;
19842
19843+ pax_open_kernel();
19844 machine_check_vector = pentium_machine_check;
19845+ pax_close_kernel();
19846 /* Make sure the vector pointer is visible before we enable MCEs: */
19847 wmb();
19848
19849diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19850index 47a1870..8c019a7 100644
19851--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
19852+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19853@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
19854 return notifier_from_errno(err);
19855 }
19856
19857-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
19858+static struct notifier_block thermal_throttle_cpu_notifier =
19859 {
19860 .notifier_call = thermal_throttle_cpu_callback,
19861 };
19862diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
19863index e9a701a..35317d6 100644
19864--- a/arch/x86/kernel/cpu/mcheck/winchip.c
19865+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
19866@@ -10,6 +10,7 @@
19867 #include <asm/processor.h>
19868 #include <asm/mce.h>
19869 #include <asm/msr.h>
19870+#include <asm/pgtable.h>
19871
19872 /* Machine check handler for WinChip C6: */
19873 static void winchip_machine_check(struct pt_regs *regs, long error_code)
19874@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
19875 {
19876 u32 lo, hi;
19877
19878+ pax_open_kernel();
19879 machine_check_vector = winchip_machine_check;
19880+ pax_close_kernel();
19881 /* Make sure the vector pointer is visible before we enable MCEs: */
19882 wmb();
19883
19884diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
19885index ca22b73..9987afe 100644
19886--- a/arch/x86/kernel/cpu/mtrr/main.c
19887+++ b/arch/x86/kernel/cpu/mtrr/main.c
19888@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
19889 u64 size_or_mask, size_and_mask;
19890 static bool mtrr_aps_delayed_init;
19891
19892-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
19893+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
19894
19895 const struct mtrr_ops *mtrr_if;
19896
19897diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
19898index df5e41f..816c719 100644
19899--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
19900+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
19901@@ -25,7 +25,7 @@ struct mtrr_ops {
19902 int (*validate_add_page)(unsigned long base, unsigned long size,
19903 unsigned int type);
19904 int (*have_wrcomb)(void);
19905-};
19906+} __do_const;
19907
19908 extern int generic_get_free_region(unsigned long base, unsigned long size,
19909 int replace_reg);
19910diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
19911index 1025f3c..824f677 100644
19912--- a/arch/x86/kernel/cpu/perf_event.c
19913+++ b/arch/x86/kernel/cpu/perf_event.c
19914@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
19915 pr_info("no hardware sampling interrupt available.\n");
19916 }
19917
19918-static struct attribute_group x86_pmu_format_group = {
19919+static attribute_group_no_const x86_pmu_format_group = {
19920 .name = "format",
19921 .attrs = NULL,
19922 };
19923@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
19924 NULL,
19925 };
19926
19927-static struct attribute_group x86_pmu_events_group = {
19928+static attribute_group_no_const x86_pmu_events_group = {
19929 .name = "events",
19930 .attrs = events_attr,
19931 };
19932@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
19933 if (idx > GDT_ENTRIES)
19934 return 0;
19935
19936- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
19937+ desc = get_cpu_gdt_table(smp_processor_id());
19938 }
19939
19940 return get_desc_base(desc + idx);
19941@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
19942 break;
19943
19944 perf_callchain_store(entry, frame.return_address);
19945- fp = frame.next_frame;
19946+ fp = (const void __force_user *)frame.next_frame;
19947 }
19948 }
19949
19950diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
19951index a9e2207..d70c83a 100644
19952--- a/arch/x86/kernel/cpu/perf_event_intel.c
19953+++ b/arch/x86/kernel/cpu/perf_event_intel.c
19954@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
19955 * v2 and above have a perf capabilities MSR
19956 */
19957 if (version > 1) {
19958- u64 capabilities;
19959+ u64 capabilities = x86_pmu.intel_cap.capabilities;
19960
19961- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
19962- x86_pmu.intel_cap.capabilities = capabilities;
19963+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
19964+ x86_pmu.intel_cap.capabilities = capabilities;
19965 }
19966
19967 intel_ds_init();
19968diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19969index 8aac56b..588fb13 100644
19970--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19971+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19972@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
19973 static int __init uncore_type_init(struct intel_uncore_type *type)
19974 {
19975 struct intel_uncore_pmu *pmus;
19976- struct attribute_group *attr_group;
19977+ attribute_group_no_const *attr_group;
19978 struct attribute **attrs;
19979 int i, j;
19980
19981@@ -3518,7 +3518,7 @@ static int
19982 return NOTIFY_OK;
19983 }
19984
19985-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
19986+static struct notifier_block uncore_cpu_nb = {
19987 .notifier_call = uncore_cpu_notifier,
19988 /*
19989 * to migrate uncore events, our notifier should be executed
19990diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19991index f952891..4722ad4 100644
19992--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19993+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19994@@ -488,7 +488,7 @@ struct intel_uncore_box {
19995 struct uncore_event_desc {
19996 struct kobj_attribute attr;
19997 const char *config;
19998-};
19999+} __do_const;
20000
20001 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
20002 { \
20003diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
20004index 1e4dbcf..b9a34c2 100644
20005--- a/arch/x86/kernel/cpuid.c
20006+++ b/arch/x86/kernel/cpuid.c
20007@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
20008 return notifier_from_errno(err);
20009 }
20010
20011-static struct notifier_block __refdata cpuid_class_cpu_notifier =
20012+static struct notifier_block cpuid_class_cpu_notifier =
20013 {
20014 .notifier_call = cpuid_class_cpu_callback,
20015 };
20016diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20017index 74467fe..18793d5 100644
20018--- a/arch/x86/kernel/crash.c
20019+++ b/arch/x86/kernel/crash.c
20020@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20021 {
20022 #ifdef CONFIG_X86_32
20023 struct pt_regs fixed_regs;
20024-#endif
20025
20026-#ifdef CONFIG_X86_32
20027- if (!user_mode_vm(regs)) {
20028+ if (!user_mode(regs)) {
20029 crash_fixup_ss_esp(&fixed_regs, regs);
20030 regs = &fixed_regs;
20031 }
20032diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20033index afa64ad..dce67dd 100644
20034--- a/arch/x86/kernel/crash_dump_64.c
20035+++ b/arch/x86/kernel/crash_dump_64.c
20036@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20037 return -ENOMEM;
20038
20039 if (userbuf) {
20040- if (copy_to_user(buf, vaddr + offset, csize)) {
20041+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20042 iounmap(vaddr);
20043 return -EFAULT;
20044 }
20045diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
20046index 155a13f..1672b9b 100644
20047--- a/arch/x86/kernel/doublefault_32.c
20048+++ b/arch/x86/kernel/doublefault_32.c
20049@@ -11,7 +11,7 @@
20050
20051 #define DOUBLEFAULT_STACKSIZE (1024)
20052 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20053-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20054+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20055
20056 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20057
20058@@ -21,7 +21,7 @@ static void doublefault_fn(void)
20059 unsigned long gdt, tss;
20060
20061 native_store_gdt(&gdt_desc);
20062- gdt = gdt_desc.address;
20063+ gdt = (unsigned long)gdt_desc.address;
20064
20065 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20066
20067@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20068 /* 0x2 bit is always set */
20069 .flags = X86_EFLAGS_SF | 0x2,
20070 .sp = STACK_START,
20071- .es = __USER_DS,
20072+ .es = __KERNEL_DS,
20073 .cs = __KERNEL_CS,
20074 .ss = __KERNEL_DS,
20075- .ds = __USER_DS,
20076+ .ds = __KERNEL_DS,
20077 .fs = __KERNEL_PERCPU,
20078
20079 .__cr3 = __pa_nodebug(swapper_pg_dir),
20080diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
20081index deb6421..76bbc12 100644
20082--- a/arch/x86/kernel/dumpstack.c
20083+++ b/arch/x86/kernel/dumpstack.c
20084@@ -2,6 +2,9 @@
20085 * Copyright (C) 1991, 1992 Linus Torvalds
20086 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
20087 */
20088+#ifdef CONFIG_GRKERNSEC_HIDESYM
20089+#define __INCLUDED_BY_HIDESYM 1
20090+#endif
20091 #include <linux/kallsyms.h>
20092 #include <linux/kprobes.h>
20093 #include <linux/uaccess.h>
20094@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
20095 static void
20096 print_ftrace_graph_addr(unsigned long addr, void *data,
20097 const struct stacktrace_ops *ops,
20098- struct thread_info *tinfo, int *graph)
20099+ struct task_struct *task, int *graph)
20100 {
20101- struct task_struct *task;
20102 unsigned long ret_addr;
20103 int index;
20104
20105 if (addr != (unsigned long)return_to_handler)
20106 return;
20107
20108- task = tinfo->task;
20109 index = task->curr_ret_stack;
20110
20111 if (!task->ret_stack || index < *graph)
20112@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20113 static inline void
20114 print_ftrace_graph_addr(unsigned long addr, void *data,
20115 const struct stacktrace_ops *ops,
20116- struct thread_info *tinfo, int *graph)
20117+ struct task_struct *task, int *graph)
20118 { }
20119 #endif
20120
20121@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20122 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
20123 */
20124
20125-static inline int valid_stack_ptr(struct thread_info *tinfo,
20126- void *p, unsigned int size, void *end)
20127+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
20128 {
20129- void *t = tinfo;
20130 if (end) {
20131 if (p < end && p >= (end-THREAD_SIZE))
20132 return 1;
20133@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
20134 }
20135
20136 unsigned long
20137-print_context_stack(struct thread_info *tinfo,
20138+print_context_stack(struct task_struct *task, void *stack_start,
20139 unsigned long *stack, unsigned long bp,
20140 const struct stacktrace_ops *ops, void *data,
20141 unsigned long *end, int *graph)
20142 {
20143 struct stack_frame *frame = (struct stack_frame *)bp;
20144
20145- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
20146+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
20147 unsigned long addr;
20148
20149 addr = *stack;
20150@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
20151 } else {
20152 ops->address(data, addr, 0);
20153 }
20154- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20155+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20156 }
20157 stack++;
20158 }
20159@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
20160 EXPORT_SYMBOL_GPL(print_context_stack);
20161
20162 unsigned long
20163-print_context_stack_bp(struct thread_info *tinfo,
20164+print_context_stack_bp(struct task_struct *task, void *stack_start,
20165 unsigned long *stack, unsigned long bp,
20166 const struct stacktrace_ops *ops, void *data,
20167 unsigned long *end, int *graph)
20168@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20169 struct stack_frame *frame = (struct stack_frame *)bp;
20170 unsigned long *ret_addr = &frame->return_address;
20171
20172- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
20173+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
20174 unsigned long addr = *ret_addr;
20175
20176 if (!__kernel_text_address(addr))
20177@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20178 ops->address(data, addr, 1);
20179 frame = frame->next_frame;
20180 ret_addr = &frame->return_address;
20181- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20182+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20183 }
20184
20185 return (unsigned long)frame;
20186@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
20187 static void print_trace_address(void *data, unsigned long addr, int reliable)
20188 {
20189 touch_nmi_watchdog();
20190- printk(data);
20191+ printk("%s", (char *)data);
20192 printk_address(addr, reliable);
20193 }
20194
20195@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
20196 }
20197 EXPORT_SYMBOL_GPL(oops_begin);
20198
20199+extern void gr_handle_kernel_exploit(void);
20200+
20201 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20202 {
20203 if (regs && kexec_should_crash(current))
20204@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20205 panic("Fatal exception in interrupt");
20206 if (panic_on_oops)
20207 panic("Fatal exception");
20208- do_exit(signr);
20209+
20210+ gr_handle_kernel_exploit();
20211+
20212+ do_group_exit(signr);
20213 }
20214
20215 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20216@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20217 print_modules();
20218 show_regs(regs);
20219 #ifdef CONFIG_X86_32
20220- if (user_mode_vm(regs)) {
20221+ if (user_mode(regs)) {
20222 sp = regs->sp;
20223 ss = regs->ss & 0xffff;
20224 } else {
20225@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
20226 unsigned long flags = oops_begin();
20227 int sig = SIGSEGV;
20228
20229- if (!user_mode_vm(regs))
20230+ if (!user_mode(regs))
20231 report_bug(regs->ip, regs);
20232
20233 if (__die(str, regs, err))
20234diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
20235index f2a1770..540657f 100644
20236--- a/arch/x86/kernel/dumpstack_32.c
20237+++ b/arch/x86/kernel/dumpstack_32.c
20238@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20239 bp = stack_frame(task, regs);
20240
20241 for (;;) {
20242- struct thread_info *context;
20243+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
20244
20245- context = (struct thread_info *)
20246- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
20247- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
20248+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
20249
20250- stack = (unsigned long *)context->previous_esp;
20251- if (!stack)
20252+ if (stack_start == task_stack_page(task))
20253 break;
20254+ stack = *(unsigned long **)stack_start;
20255 if (ops->stack(data, "IRQ") < 0)
20256 break;
20257 touch_nmi_watchdog();
20258@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
20259 int i;
20260
20261 show_regs_print_info(KERN_EMERG);
20262- __show_regs(regs, !user_mode_vm(regs));
20263+ __show_regs(regs, !user_mode(regs));
20264
20265 /*
20266 * When in-kernel, we also print out the stack and code at the
20267 * time of the fault..
20268 */
20269- if (!user_mode_vm(regs)) {
20270+ if (!user_mode(regs)) {
20271 unsigned int code_prologue = code_bytes * 43 / 64;
20272 unsigned int code_len = code_bytes;
20273 unsigned char c;
20274 u8 *ip;
20275+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
20276
20277 pr_emerg("Stack:\n");
20278 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
20279
20280 pr_emerg("Code:");
20281
20282- ip = (u8 *)regs->ip - code_prologue;
20283+ ip = (u8 *)regs->ip - code_prologue + cs_base;
20284 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
20285 /* try starting at IP */
20286- ip = (u8 *)regs->ip;
20287+ ip = (u8 *)regs->ip + cs_base;
20288 code_len = code_len - code_prologue + 1;
20289 }
20290 for (i = 0; i < code_len; i++, ip++) {
20291@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
20292 pr_cont(" Bad EIP value.");
20293 break;
20294 }
20295- if (ip == (u8 *)regs->ip)
20296+ if (ip == (u8 *)regs->ip + cs_base)
20297 pr_cont(" <%02x>", c);
20298 else
20299 pr_cont(" %02x", c);
20300@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
20301 {
20302 unsigned short ud2;
20303
20304+ ip = ktla_ktva(ip);
20305 if (ip < PAGE_OFFSET)
20306 return 0;
20307 if (probe_kernel_address((unsigned short *)ip, ud2))
20308@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
20309
20310 return ud2 == 0x0b0f;
20311 }
20312+
20313+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20314+void pax_check_alloca(unsigned long size)
20315+{
20316+ unsigned long sp = (unsigned long)&sp, stack_left;
20317+
20318+ /* all kernel stacks are of the same size */
20319+ stack_left = sp & (THREAD_SIZE - 1);
20320+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20321+}
20322+EXPORT_SYMBOL(pax_check_alloca);
20323+#endif
20324diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
20325index addb207..99635fa 100644
20326--- a/arch/x86/kernel/dumpstack_64.c
20327+++ b/arch/x86/kernel/dumpstack_64.c
20328@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20329 unsigned long *irq_stack_end =
20330 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
20331 unsigned used = 0;
20332- struct thread_info *tinfo;
20333 int graph = 0;
20334 unsigned long dummy;
20335+ void *stack_start;
20336
20337 if (!task)
20338 task = current;
20339@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20340 * current stack address. If the stacks consist of nested
20341 * exceptions
20342 */
20343- tinfo = task_thread_info(task);
20344 for (;;) {
20345 char *id;
20346 unsigned long *estack_end;
20347+
20348 estack_end = in_exception_stack(cpu, (unsigned long)stack,
20349 &used, &id);
20350
20351@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20352 if (ops->stack(data, id) < 0)
20353 break;
20354
20355- bp = ops->walk_stack(tinfo, stack, bp, ops,
20356+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
20357 data, estack_end, &graph);
20358 ops->stack(data, "<EOE>");
20359 /*
20360@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20361 * second-to-last pointer (index -2 to end) in the
20362 * exception stack:
20363 */
20364+ if ((u16)estack_end[-1] != __KERNEL_DS)
20365+ goto out;
20366 stack = (unsigned long *) estack_end[-2];
20367 continue;
20368 }
20369@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20370 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
20371 if (ops->stack(data, "IRQ") < 0)
20372 break;
20373- bp = ops->walk_stack(tinfo, stack, bp,
20374+ bp = ops->walk_stack(task, irq_stack, stack, bp,
20375 ops, data, irq_stack_end, &graph);
20376 /*
20377 * We link to the next stack (which would be
20378@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20379 /*
20380 * This handles the process stack:
20381 */
20382- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
20383+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
20384+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
20385+out:
20386 put_cpu();
20387 }
20388 EXPORT_SYMBOL(dump_trace);
20389@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
20390
20391 return ud2 == 0x0b0f;
20392 }
20393+
20394+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20395+void pax_check_alloca(unsigned long size)
20396+{
20397+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
20398+ unsigned cpu, used;
20399+ char *id;
20400+
20401+ /* check the process stack first */
20402+ stack_start = (unsigned long)task_stack_page(current);
20403+ stack_end = stack_start + THREAD_SIZE;
20404+ if (likely(stack_start <= sp && sp < stack_end)) {
20405+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
20406+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20407+ return;
20408+ }
20409+
20410+ cpu = get_cpu();
20411+
20412+ /* check the irq stacks */
20413+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
20414+ stack_start = stack_end - IRQ_STACK_SIZE;
20415+ if (stack_start <= sp && sp < stack_end) {
20416+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
20417+ put_cpu();
20418+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20419+ return;
20420+ }
20421+
20422+ /* check the exception stacks */
20423+ used = 0;
20424+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
20425+ stack_start = stack_end - EXCEPTION_STKSZ;
20426+ if (stack_end && stack_start <= sp && sp < stack_end) {
20427+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
20428+ put_cpu();
20429+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20430+ return;
20431+ }
20432+
20433+ put_cpu();
20434+
20435+ /* unknown stack */
20436+ BUG();
20437+}
20438+EXPORT_SYMBOL(pax_check_alloca);
20439+#endif
20440diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
20441index d32abea..74daf4f 100644
20442--- a/arch/x86/kernel/e820.c
20443+++ b/arch/x86/kernel/e820.c
20444@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
20445
20446 static void early_panic(char *msg)
20447 {
20448- early_printk(msg);
20449- panic(msg);
20450+ early_printk("%s", msg);
20451+ panic("%s", msg);
20452 }
20453
20454 static int userdef __initdata;
20455diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
20456index d15f575..d692043 100644
20457--- a/arch/x86/kernel/early_printk.c
20458+++ b/arch/x86/kernel/early_printk.c
20459@@ -7,6 +7,7 @@
20460 #include <linux/pci_regs.h>
20461 #include <linux/pci_ids.h>
20462 #include <linux/errno.h>
20463+#include <linux/sched.h>
20464 #include <asm/io.h>
20465 #include <asm/processor.h>
20466 #include <asm/fcntl.h>
20467diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
20468index 8f3e2de..6b71e39 100644
20469--- a/arch/x86/kernel/entry_32.S
20470+++ b/arch/x86/kernel/entry_32.S
20471@@ -177,13 +177,153 @@
20472 /*CFI_REL_OFFSET gs, PT_GS*/
20473 .endm
20474 .macro SET_KERNEL_GS reg
20475+
20476+#ifdef CONFIG_CC_STACKPROTECTOR
20477 movl $(__KERNEL_STACK_CANARY), \reg
20478+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20479+ movl $(__USER_DS), \reg
20480+#else
20481+ xorl \reg, \reg
20482+#endif
20483+
20484 movl \reg, %gs
20485 .endm
20486
20487 #endif /* CONFIG_X86_32_LAZY_GS */
20488
20489-.macro SAVE_ALL
20490+.macro pax_enter_kernel
20491+#ifdef CONFIG_PAX_KERNEXEC
20492+ call pax_enter_kernel
20493+#endif
20494+.endm
20495+
20496+.macro pax_exit_kernel
20497+#ifdef CONFIG_PAX_KERNEXEC
20498+ call pax_exit_kernel
20499+#endif
20500+.endm
20501+
20502+#ifdef CONFIG_PAX_KERNEXEC
20503+ENTRY(pax_enter_kernel)
20504+#ifdef CONFIG_PARAVIRT
20505+ pushl %eax
20506+ pushl %ecx
20507+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
20508+ mov %eax, %esi
20509+#else
20510+ mov %cr0, %esi
20511+#endif
20512+ bts $16, %esi
20513+ jnc 1f
20514+ mov %cs, %esi
20515+ cmp $__KERNEL_CS, %esi
20516+ jz 3f
20517+ ljmp $__KERNEL_CS, $3f
20518+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
20519+2:
20520+#ifdef CONFIG_PARAVIRT
20521+ mov %esi, %eax
20522+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
20523+#else
20524+ mov %esi, %cr0
20525+#endif
20526+3:
20527+#ifdef CONFIG_PARAVIRT
20528+ popl %ecx
20529+ popl %eax
20530+#endif
20531+ ret
20532+ENDPROC(pax_enter_kernel)
20533+
20534+ENTRY(pax_exit_kernel)
20535+#ifdef CONFIG_PARAVIRT
20536+ pushl %eax
20537+ pushl %ecx
20538+#endif
20539+ mov %cs, %esi
20540+ cmp $__KERNEXEC_KERNEL_CS, %esi
20541+ jnz 2f
20542+#ifdef CONFIG_PARAVIRT
20543+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
20544+ mov %eax, %esi
20545+#else
20546+ mov %cr0, %esi
20547+#endif
20548+ btr $16, %esi
20549+ ljmp $__KERNEL_CS, $1f
20550+1:
20551+#ifdef CONFIG_PARAVIRT
20552+ mov %esi, %eax
20553+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
20554+#else
20555+ mov %esi, %cr0
20556+#endif
20557+2:
20558+#ifdef CONFIG_PARAVIRT
20559+ popl %ecx
20560+ popl %eax
20561+#endif
20562+ ret
20563+ENDPROC(pax_exit_kernel)
20564+#endif
20565+
20566+ .macro pax_erase_kstack
20567+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20568+ call pax_erase_kstack
20569+#endif
20570+ .endm
20571+
20572+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20573+/*
20574+ * ebp: thread_info
20575+ */
20576+ENTRY(pax_erase_kstack)
20577+ pushl %edi
20578+ pushl %ecx
20579+ pushl %eax
20580+
20581+ mov TI_lowest_stack(%ebp), %edi
20582+ mov $-0xBEEF, %eax
20583+ std
20584+
20585+1: mov %edi, %ecx
20586+ and $THREAD_SIZE_asm - 1, %ecx
20587+ shr $2, %ecx
20588+ repne scasl
20589+ jecxz 2f
20590+
20591+ cmp $2*16, %ecx
20592+ jc 2f
20593+
20594+ mov $2*16, %ecx
20595+ repe scasl
20596+ jecxz 2f
20597+ jne 1b
20598+
20599+2: cld
20600+ mov %esp, %ecx
20601+ sub %edi, %ecx
20602+
20603+ cmp $THREAD_SIZE_asm, %ecx
20604+ jb 3f
20605+ ud2
20606+3:
20607+
20608+ shr $2, %ecx
20609+ rep stosl
20610+
20611+ mov TI_task_thread_sp0(%ebp), %edi
20612+ sub $128, %edi
20613+ mov %edi, TI_lowest_stack(%ebp)
20614+
20615+ popl %eax
20616+ popl %ecx
20617+ popl %edi
20618+ ret
20619+ENDPROC(pax_erase_kstack)
20620+#endif
20621+
20622+.macro __SAVE_ALL _DS
20623 cld
20624 PUSH_GS
20625 pushl_cfi %fs
20626@@ -206,7 +346,7 @@
20627 CFI_REL_OFFSET ecx, 0
20628 pushl_cfi %ebx
20629 CFI_REL_OFFSET ebx, 0
20630- movl $(__USER_DS), %edx
20631+ movl $\_DS, %edx
20632 movl %edx, %ds
20633 movl %edx, %es
20634 movl $(__KERNEL_PERCPU), %edx
20635@@ -214,6 +354,15 @@
20636 SET_KERNEL_GS %edx
20637 .endm
20638
20639+.macro SAVE_ALL
20640+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20641+ __SAVE_ALL __KERNEL_DS
20642+ pax_enter_kernel
20643+#else
20644+ __SAVE_ALL __USER_DS
20645+#endif
20646+.endm
20647+
20648 .macro RESTORE_INT_REGS
20649 popl_cfi %ebx
20650 CFI_RESTORE ebx
20651@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
20652 popfl_cfi
20653 jmp syscall_exit
20654 CFI_ENDPROC
20655-END(ret_from_fork)
20656+ENDPROC(ret_from_fork)
20657
20658 ENTRY(ret_from_kernel_thread)
20659 CFI_STARTPROC
20660@@ -344,7 +493,15 @@ ret_from_intr:
20661 andl $SEGMENT_RPL_MASK, %eax
20662 #endif
20663 cmpl $USER_RPL, %eax
20664+
20665+#ifdef CONFIG_PAX_KERNEXEC
20666+ jae resume_userspace
20667+
20668+ pax_exit_kernel
20669+ jmp resume_kernel
20670+#else
20671 jb resume_kernel # not returning to v8086 or userspace
20672+#endif
20673
20674 ENTRY(resume_userspace)
20675 LOCKDEP_SYS_EXIT
20676@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
20677 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
20678 # int/exception return?
20679 jne work_pending
20680- jmp restore_all
20681-END(ret_from_exception)
20682+ jmp restore_all_pax
20683+ENDPROC(ret_from_exception)
20684
20685 #ifdef CONFIG_PREEMPT
20686 ENTRY(resume_kernel)
20687@@ -372,7 +529,7 @@ need_resched:
20688 jz restore_all
20689 call preempt_schedule_irq
20690 jmp need_resched
20691-END(resume_kernel)
20692+ENDPROC(resume_kernel)
20693 #endif
20694 CFI_ENDPROC
20695 /*
20696@@ -406,30 +563,45 @@ sysenter_past_esp:
20697 /*CFI_REL_OFFSET cs, 0*/
20698 /*
20699 * Push current_thread_info()->sysenter_return to the stack.
20700- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
20701- * pushed above; +8 corresponds to copy_thread's esp0 setting.
20702 */
20703- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
20704+ pushl_cfi $0
20705 CFI_REL_OFFSET eip, 0
20706
20707 pushl_cfi %eax
20708 SAVE_ALL
20709+ GET_THREAD_INFO(%ebp)
20710+ movl TI_sysenter_return(%ebp),%ebp
20711+ movl %ebp,PT_EIP(%esp)
20712 ENABLE_INTERRUPTS(CLBR_NONE)
20713
20714 /*
20715 * Load the potential sixth argument from user stack.
20716 * Careful about security.
20717 */
20718+ movl PT_OLDESP(%esp),%ebp
20719+
20720+#ifdef CONFIG_PAX_MEMORY_UDEREF
20721+ mov PT_OLDSS(%esp),%ds
20722+1: movl %ds:(%ebp),%ebp
20723+ push %ss
20724+ pop %ds
20725+#else
20726 cmpl $__PAGE_OFFSET-3,%ebp
20727 jae syscall_fault
20728 ASM_STAC
20729 1: movl (%ebp),%ebp
20730 ASM_CLAC
20731+#endif
20732+
20733 movl %ebp,PT_EBP(%esp)
20734 _ASM_EXTABLE(1b,syscall_fault)
20735
20736 GET_THREAD_INFO(%ebp)
20737
20738+#ifdef CONFIG_PAX_RANDKSTACK
20739+ pax_erase_kstack
20740+#endif
20741+
20742 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20743 jnz sysenter_audit
20744 sysenter_do_call:
20745@@ -444,12 +616,24 @@ sysenter_do_call:
20746 testl $_TIF_ALLWORK_MASK, %ecx
20747 jne sysexit_audit
20748 sysenter_exit:
20749+
20750+#ifdef CONFIG_PAX_RANDKSTACK
20751+ pushl_cfi %eax
20752+ movl %esp, %eax
20753+ call pax_randomize_kstack
20754+ popl_cfi %eax
20755+#endif
20756+
20757+ pax_erase_kstack
20758+
20759 /* if something modifies registers it must also disable sysexit */
20760 movl PT_EIP(%esp), %edx
20761 movl PT_OLDESP(%esp), %ecx
20762 xorl %ebp,%ebp
20763 TRACE_IRQS_ON
20764 1: mov PT_FS(%esp), %fs
20765+2: mov PT_DS(%esp), %ds
20766+3: mov PT_ES(%esp), %es
20767 PTGS_TO_GS
20768 ENABLE_INTERRUPTS_SYSEXIT
20769
20770@@ -466,6 +650,9 @@ sysenter_audit:
20771 movl %eax,%edx /* 2nd arg: syscall number */
20772 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
20773 call __audit_syscall_entry
20774+
20775+ pax_erase_kstack
20776+
20777 pushl_cfi %ebx
20778 movl PT_EAX(%esp),%eax /* reload syscall number */
20779 jmp sysenter_do_call
20780@@ -491,10 +678,16 @@ sysexit_audit:
20781
20782 CFI_ENDPROC
20783 .pushsection .fixup,"ax"
20784-2: movl $0,PT_FS(%esp)
20785+4: movl $0,PT_FS(%esp)
20786+ jmp 1b
20787+5: movl $0,PT_DS(%esp)
20788+ jmp 1b
20789+6: movl $0,PT_ES(%esp)
20790 jmp 1b
20791 .popsection
20792- _ASM_EXTABLE(1b,2b)
20793+ _ASM_EXTABLE(1b,4b)
20794+ _ASM_EXTABLE(2b,5b)
20795+ _ASM_EXTABLE(3b,6b)
20796 PTGS_TO_GS_EX
20797 ENDPROC(ia32_sysenter_target)
20798
20799@@ -509,6 +702,11 @@ ENTRY(system_call)
20800 pushl_cfi %eax # save orig_eax
20801 SAVE_ALL
20802 GET_THREAD_INFO(%ebp)
20803+
20804+#ifdef CONFIG_PAX_RANDKSTACK
20805+ pax_erase_kstack
20806+#endif
20807+
20808 # system call tracing in operation / emulation
20809 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20810 jnz syscall_trace_entry
20811@@ -527,6 +725,15 @@ syscall_exit:
20812 testl $_TIF_ALLWORK_MASK, %ecx # current->work
20813 jne syscall_exit_work
20814
20815+restore_all_pax:
20816+
20817+#ifdef CONFIG_PAX_RANDKSTACK
20818+ movl %esp, %eax
20819+ call pax_randomize_kstack
20820+#endif
20821+
20822+ pax_erase_kstack
20823+
20824 restore_all:
20825 TRACE_IRQS_IRET
20826 restore_all_notrace:
20827@@ -583,14 +790,34 @@ ldt_ss:
20828 * compensating for the offset by changing to the ESPFIX segment with
20829 * a base address that matches for the difference.
20830 */
20831-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
20832+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
20833 mov %esp, %edx /* load kernel esp */
20834 mov PT_OLDESP(%esp), %eax /* load userspace esp */
20835 mov %dx, %ax /* eax: new kernel esp */
20836 sub %eax, %edx /* offset (low word is 0) */
20837+#ifdef CONFIG_SMP
20838+ movl PER_CPU_VAR(cpu_number), %ebx
20839+ shll $PAGE_SHIFT_asm, %ebx
20840+ addl $cpu_gdt_table, %ebx
20841+#else
20842+ movl $cpu_gdt_table, %ebx
20843+#endif
20844 shr $16, %edx
20845- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
20846- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
20847+
20848+#ifdef CONFIG_PAX_KERNEXEC
20849+ mov %cr0, %esi
20850+ btr $16, %esi
20851+ mov %esi, %cr0
20852+#endif
20853+
20854+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
20855+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
20856+
20857+#ifdef CONFIG_PAX_KERNEXEC
20858+ bts $16, %esi
20859+ mov %esi, %cr0
20860+#endif
20861+
20862 pushl_cfi $__ESPFIX_SS
20863 pushl_cfi %eax /* new kernel esp */
20864 /* Disable interrupts, but do not irqtrace this section: we
20865@@ -619,20 +846,18 @@ work_resched:
20866 movl TI_flags(%ebp), %ecx
20867 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
20868 # than syscall tracing?
20869- jz restore_all
20870+ jz restore_all_pax
20871 testb $_TIF_NEED_RESCHED, %cl
20872 jnz work_resched
20873
20874 work_notifysig: # deal with pending signals and
20875 # notify-resume requests
20876+ movl %esp, %eax
20877 #ifdef CONFIG_VM86
20878 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
20879- movl %esp, %eax
20880 jne work_notifysig_v86 # returning to kernel-space or
20881 # vm86-space
20882 1:
20883-#else
20884- movl %esp, %eax
20885 #endif
20886 TRACE_IRQS_ON
20887 ENABLE_INTERRUPTS(CLBR_NONE)
20888@@ -653,7 +878,7 @@ work_notifysig_v86:
20889 movl %eax, %esp
20890 jmp 1b
20891 #endif
20892-END(work_pending)
20893+ENDPROC(work_pending)
20894
20895 # perform syscall exit tracing
20896 ALIGN
20897@@ -661,11 +886,14 @@ syscall_trace_entry:
20898 movl $-ENOSYS,PT_EAX(%esp)
20899 movl %esp, %eax
20900 call syscall_trace_enter
20901+
20902+ pax_erase_kstack
20903+
20904 /* What it returned is what we'll actually use. */
20905 cmpl $(NR_syscalls), %eax
20906 jnae syscall_call
20907 jmp syscall_exit
20908-END(syscall_trace_entry)
20909+ENDPROC(syscall_trace_entry)
20910
20911 # perform syscall exit tracing
20912 ALIGN
20913@@ -678,21 +906,25 @@ syscall_exit_work:
20914 movl %esp, %eax
20915 call syscall_trace_leave
20916 jmp resume_userspace
20917-END(syscall_exit_work)
20918+ENDPROC(syscall_exit_work)
20919 CFI_ENDPROC
20920
20921 RING0_INT_FRAME # can't unwind into user space anyway
20922 syscall_fault:
20923+#ifdef CONFIG_PAX_MEMORY_UDEREF
20924+ push %ss
20925+ pop %ds
20926+#endif
20927 ASM_CLAC
20928 GET_THREAD_INFO(%ebp)
20929 movl $-EFAULT,PT_EAX(%esp)
20930 jmp resume_userspace
20931-END(syscall_fault)
20932+ENDPROC(syscall_fault)
20933
20934 syscall_badsys:
20935 movl $-ENOSYS,PT_EAX(%esp)
20936 jmp resume_userspace
20937-END(syscall_badsys)
20938+ENDPROC(syscall_badsys)
20939 CFI_ENDPROC
20940 /*
20941 * End of kprobes section
20942@@ -708,8 +940,15 @@ END(syscall_badsys)
20943 * normal stack and adjusts ESP with the matching offset.
20944 */
20945 /* fixup the stack */
20946- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
20947- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
20948+#ifdef CONFIG_SMP
20949+ movl PER_CPU_VAR(cpu_number), %ebx
20950+ shll $PAGE_SHIFT_asm, %ebx
20951+ addl $cpu_gdt_table, %ebx
20952+#else
20953+ movl $cpu_gdt_table, %ebx
20954+#endif
20955+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
20956+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
20957 shl $16, %eax
20958 addl %esp, %eax /* the adjusted stack pointer */
20959 pushl_cfi $__KERNEL_DS
20960@@ -762,7 +1001,7 @@ vector=vector+1
20961 .endr
20962 2: jmp common_interrupt
20963 .endr
20964-END(irq_entries_start)
20965+ENDPROC(irq_entries_start)
20966
20967 .previous
20968 END(interrupt)
20969@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
20970 pushl_cfi $do_coprocessor_error
20971 jmp error_code
20972 CFI_ENDPROC
20973-END(coprocessor_error)
20974+ENDPROC(coprocessor_error)
20975
20976 ENTRY(simd_coprocessor_error)
20977 RING0_INT_FRAME
20978@@ -826,7 +1065,7 @@ ENTRY(simd_coprocessor_error)
20979 .section .altinstructions,"a"
20980 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
20981 .previous
20982-.section .altinstr_replacement,"ax"
20983+.section .altinstr_replacement,"a"
20984 663: pushl $do_simd_coprocessor_error
20985 664:
20986 .previous
20987@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
20988 #endif
20989 jmp error_code
20990 CFI_ENDPROC
20991-END(simd_coprocessor_error)
20992+ENDPROC(simd_coprocessor_error)
20993
20994 ENTRY(device_not_available)
20995 RING0_INT_FRAME
20996@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
20997 pushl_cfi $do_device_not_available
20998 jmp error_code
20999 CFI_ENDPROC
21000-END(device_not_available)
21001+ENDPROC(device_not_available)
21002
21003 #ifdef CONFIG_PARAVIRT
21004 ENTRY(native_iret)
21005 iret
21006 _ASM_EXTABLE(native_iret, iret_exc)
21007-END(native_iret)
21008+ENDPROC(native_iret)
21009
21010 ENTRY(native_irq_enable_sysexit)
21011 sti
21012 sysexit
21013-END(native_irq_enable_sysexit)
21014+ENDPROC(native_irq_enable_sysexit)
21015 #endif
21016
21017 ENTRY(overflow)
21018@@ -865,7 +1104,7 @@ ENTRY(overflow)
21019 pushl_cfi $do_overflow
21020 jmp error_code
21021 CFI_ENDPROC
21022-END(overflow)
21023+ENDPROC(overflow)
21024
21025 ENTRY(bounds)
21026 RING0_INT_FRAME
21027@@ -874,7 +1113,7 @@ ENTRY(bounds)
21028 pushl_cfi $do_bounds
21029 jmp error_code
21030 CFI_ENDPROC
21031-END(bounds)
21032+ENDPROC(bounds)
21033
21034 ENTRY(invalid_op)
21035 RING0_INT_FRAME
21036@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
21037 pushl_cfi $do_invalid_op
21038 jmp error_code
21039 CFI_ENDPROC
21040-END(invalid_op)
21041+ENDPROC(invalid_op)
21042
21043 ENTRY(coprocessor_segment_overrun)
21044 RING0_INT_FRAME
21045@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
21046 pushl_cfi $do_coprocessor_segment_overrun
21047 jmp error_code
21048 CFI_ENDPROC
21049-END(coprocessor_segment_overrun)
21050+ENDPROC(coprocessor_segment_overrun)
21051
21052 ENTRY(invalid_TSS)
21053 RING0_EC_FRAME
21054@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
21055 pushl_cfi $do_invalid_TSS
21056 jmp error_code
21057 CFI_ENDPROC
21058-END(invalid_TSS)
21059+ENDPROC(invalid_TSS)
21060
21061 ENTRY(segment_not_present)
21062 RING0_EC_FRAME
21063@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
21064 pushl_cfi $do_segment_not_present
21065 jmp error_code
21066 CFI_ENDPROC
21067-END(segment_not_present)
21068+ENDPROC(segment_not_present)
21069
21070 ENTRY(stack_segment)
21071 RING0_EC_FRAME
21072@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
21073 pushl_cfi $do_stack_segment
21074 jmp error_code
21075 CFI_ENDPROC
21076-END(stack_segment)
21077+ENDPROC(stack_segment)
21078
21079 ENTRY(alignment_check)
21080 RING0_EC_FRAME
21081@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
21082 pushl_cfi $do_alignment_check
21083 jmp error_code
21084 CFI_ENDPROC
21085-END(alignment_check)
21086+ENDPROC(alignment_check)
21087
21088 ENTRY(divide_error)
21089 RING0_INT_FRAME
21090@@ -933,7 +1172,7 @@ ENTRY(divide_error)
21091 pushl_cfi $do_divide_error
21092 jmp error_code
21093 CFI_ENDPROC
21094-END(divide_error)
21095+ENDPROC(divide_error)
21096
21097 #ifdef CONFIG_X86_MCE
21098 ENTRY(machine_check)
21099@@ -943,7 +1182,7 @@ ENTRY(machine_check)
21100 pushl_cfi machine_check_vector
21101 jmp error_code
21102 CFI_ENDPROC
21103-END(machine_check)
21104+ENDPROC(machine_check)
21105 #endif
21106
21107 ENTRY(spurious_interrupt_bug)
21108@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
21109 pushl_cfi $do_spurious_interrupt_bug
21110 jmp error_code
21111 CFI_ENDPROC
21112-END(spurious_interrupt_bug)
21113+ENDPROC(spurious_interrupt_bug)
21114 /*
21115 * End of kprobes section
21116 */
21117@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
21118
21119 ENTRY(mcount)
21120 ret
21121-END(mcount)
21122+ENDPROC(mcount)
21123
21124 ENTRY(ftrace_caller)
21125 cmpl $0, function_trace_stop
21126@@ -1096,7 +1335,7 @@ ftrace_graph_call:
21127 .globl ftrace_stub
21128 ftrace_stub:
21129 ret
21130-END(ftrace_caller)
21131+ENDPROC(ftrace_caller)
21132
21133 ENTRY(ftrace_regs_caller)
21134 pushf /* push flags before compare (in cs location) */
21135@@ -1197,7 +1436,7 @@ trace:
21136 popl %ecx
21137 popl %eax
21138 jmp ftrace_stub
21139-END(mcount)
21140+ENDPROC(mcount)
21141 #endif /* CONFIG_DYNAMIC_FTRACE */
21142 #endif /* CONFIG_FUNCTION_TRACER */
21143
21144@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
21145 popl %ecx
21146 popl %eax
21147 ret
21148-END(ftrace_graph_caller)
21149+ENDPROC(ftrace_graph_caller)
21150
21151 .globl return_to_handler
21152 return_to_handler:
21153@@ -1271,15 +1510,18 @@ error_code:
21154 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
21155 REG_TO_PTGS %ecx
21156 SET_KERNEL_GS %ecx
21157- movl $(__USER_DS), %ecx
21158+ movl $(__KERNEL_DS), %ecx
21159 movl %ecx, %ds
21160 movl %ecx, %es
21161+
21162+ pax_enter_kernel
21163+
21164 TRACE_IRQS_OFF
21165 movl %esp,%eax # pt_regs pointer
21166 call *%edi
21167 jmp ret_from_exception
21168 CFI_ENDPROC
21169-END(page_fault)
21170+ENDPROC(page_fault)
21171
21172 /*
21173 * Debug traps and NMI can happen at the one SYSENTER instruction
21174@@ -1322,7 +1564,7 @@ debug_stack_correct:
21175 call do_debug
21176 jmp ret_from_exception
21177 CFI_ENDPROC
21178-END(debug)
21179+ENDPROC(debug)
21180
21181 /*
21182 * NMI is doubly nasty. It can happen _while_ we're handling
21183@@ -1360,6 +1602,9 @@ nmi_stack_correct:
21184 xorl %edx,%edx # zero error code
21185 movl %esp,%eax # pt_regs pointer
21186 call do_nmi
21187+
21188+ pax_exit_kernel
21189+
21190 jmp restore_all_notrace
21191 CFI_ENDPROC
21192
21193@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
21194 FIXUP_ESPFIX_STACK # %eax == %esp
21195 xorl %edx,%edx # zero error code
21196 call do_nmi
21197+
21198+ pax_exit_kernel
21199+
21200 RESTORE_REGS
21201 lss 12+4(%esp), %esp # back to espfix stack
21202 CFI_ADJUST_CFA_OFFSET -24
21203 jmp irq_return
21204 CFI_ENDPROC
21205-END(nmi)
21206+ENDPROC(nmi)
21207
21208 ENTRY(int3)
21209 RING0_INT_FRAME
21210@@ -1414,14 +1662,14 @@ ENTRY(int3)
21211 call do_int3
21212 jmp ret_from_exception
21213 CFI_ENDPROC
21214-END(int3)
21215+ENDPROC(int3)
21216
21217 ENTRY(general_protection)
21218 RING0_EC_FRAME
21219 pushl_cfi $do_general_protection
21220 jmp error_code
21221 CFI_ENDPROC
21222-END(general_protection)
21223+ENDPROC(general_protection)
21224
21225 #ifdef CONFIG_KVM_GUEST
21226 ENTRY(async_page_fault)
21227@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
21228 pushl_cfi $do_async_page_fault
21229 jmp error_code
21230 CFI_ENDPROC
21231-END(async_page_fault)
21232+ENDPROC(async_page_fault)
21233 #endif
21234
21235 /*
21236diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
21237index 7272089..0b74104 100644
21238--- a/arch/x86/kernel/entry_64.S
21239+++ b/arch/x86/kernel/entry_64.S
21240@@ -59,6 +59,8 @@
21241 #include <asm/context_tracking.h>
21242 #include <asm/smap.h>
21243 #include <linux/err.h>
21244+#include <asm/pgtable.h>
21245+#include <asm/alternative-asm.h>
21246
21247 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
21248 #include <linux/elf-em.h>
21249@@ -80,8 +82,9 @@
21250 #ifdef CONFIG_DYNAMIC_FTRACE
21251
21252 ENTRY(function_hook)
21253+ pax_force_retaddr
21254 retq
21255-END(function_hook)
21256+ENDPROC(function_hook)
21257
21258 /* skip is set if stack has been adjusted */
21259 .macro ftrace_caller_setup skip=0
21260@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
21261 #endif
21262
21263 GLOBAL(ftrace_stub)
21264+ pax_force_retaddr
21265 retq
21266-END(ftrace_caller)
21267+ENDPROC(ftrace_caller)
21268
21269 ENTRY(ftrace_regs_caller)
21270 /* Save the current flags before compare (in SS location)*/
21271@@ -191,7 +195,7 @@ ftrace_restore_flags:
21272 popfq
21273 jmp ftrace_stub
21274
21275-END(ftrace_regs_caller)
21276+ENDPROC(ftrace_regs_caller)
21277
21278
21279 #else /* ! CONFIG_DYNAMIC_FTRACE */
21280@@ -212,6 +216,7 @@ ENTRY(function_hook)
21281 #endif
21282
21283 GLOBAL(ftrace_stub)
21284+ pax_force_retaddr
21285 retq
21286
21287 trace:
21288@@ -225,12 +230,13 @@ trace:
21289 #endif
21290 subq $MCOUNT_INSN_SIZE, %rdi
21291
21292+ pax_force_fptr ftrace_trace_function
21293 call *ftrace_trace_function
21294
21295 MCOUNT_RESTORE_FRAME
21296
21297 jmp ftrace_stub
21298-END(function_hook)
21299+ENDPROC(function_hook)
21300 #endif /* CONFIG_DYNAMIC_FTRACE */
21301 #endif /* CONFIG_FUNCTION_TRACER */
21302
21303@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
21304
21305 MCOUNT_RESTORE_FRAME
21306
21307+ pax_force_retaddr
21308 retq
21309-END(ftrace_graph_caller)
21310+ENDPROC(ftrace_graph_caller)
21311
21312 GLOBAL(return_to_handler)
21313 subq $24, %rsp
21314@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
21315 movq 8(%rsp), %rdx
21316 movq (%rsp), %rax
21317 addq $24, %rsp
21318+ pax_force_fptr %rdi
21319 jmp *%rdi
21320+ENDPROC(return_to_handler)
21321 #endif
21322
21323
21324@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
21325 ENDPROC(native_usergs_sysret64)
21326 #endif /* CONFIG_PARAVIRT */
21327
21328+ .macro ljmpq sel, off
21329+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
21330+ .byte 0x48; ljmp *1234f(%rip)
21331+ .pushsection .rodata
21332+ .align 16
21333+ 1234: .quad \off; .word \sel
21334+ .popsection
21335+#else
21336+ pushq $\sel
21337+ pushq $\off
21338+ lretq
21339+#endif
21340+ .endm
21341+
21342+ .macro pax_enter_kernel
21343+ pax_set_fptr_mask
21344+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21345+ call pax_enter_kernel
21346+#endif
21347+ .endm
21348+
21349+ .macro pax_exit_kernel
21350+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21351+ call pax_exit_kernel
21352+#endif
21353+
21354+ .endm
21355+
21356+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21357+ENTRY(pax_enter_kernel)
21358+ pushq %rdi
21359+
21360+#ifdef CONFIG_PARAVIRT
21361+ PV_SAVE_REGS(CLBR_RDI)
21362+#endif
21363+
21364+#ifdef CONFIG_PAX_KERNEXEC
21365+ GET_CR0_INTO_RDI
21366+ bts $16,%rdi
21367+ jnc 3f
21368+ mov %cs,%edi
21369+ cmp $__KERNEL_CS,%edi
21370+ jnz 2f
21371+1:
21372+#endif
21373+
21374+#ifdef CONFIG_PAX_MEMORY_UDEREF
21375+ 661: jmp 111f
21376+ .pushsection .altinstr_replacement, "a"
21377+ 662: ASM_NOP2
21378+ .popsection
21379+ .pushsection .altinstructions, "a"
21380+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21381+ .popsection
21382+ GET_CR3_INTO_RDI
21383+ cmp $0,%dil
21384+ jnz 112f
21385+ mov $__KERNEL_DS,%edi
21386+ mov %edi,%ss
21387+ jmp 111f
21388+112: cmp $1,%dil
21389+ jz 113f
21390+ ud2
21391+113: sub $4097,%rdi
21392+ bts $63,%rdi
21393+ SET_RDI_INTO_CR3
21394+ mov $__UDEREF_KERNEL_DS,%edi
21395+ mov %edi,%ss
21396+111:
21397+#endif
21398+
21399+#ifdef CONFIG_PARAVIRT
21400+ PV_RESTORE_REGS(CLBR_RDI)
21401+#endif
21402+
21403+ popq %rdi
21404+ pax_force_retaddr
21405+ retq
21406+
21407+#ifdef CONFIG_PAX_KERNEXEC
21408+2: ljmpq __KERNEL_CS,1b
21409+3: ljmpq __KERNEXEC_KERNEL_CS,4f
21410+4: SET_RDI_INTO_CR0
21411+ jmp 1b
21412+#endif
21413+ENDPROC(pax_enter_kernel)
21414+
21415+ENTRY(pax_exit_kernel)
21416+ pushq %rdi
21417+
21418+#ifdef CONFIG_PARAVIRT
21419+ PV_SAVE_REGS(CLBR_RDI)
21420+#endif
21421+
21422+#ifdef CONFIG_PAX_KERNEXEC
21423+ mov %cs,%rdi
21424+ cmp $__KERNEXEC_KERNEL_CS,%edi
21425+ jz 2f
21426+ GET_CR0_INTO_RDI
21427+ bts $16,%rdi
21428+ jnc 4f
21429+1:
21430+#endif
21431+
21432+#ifdef CONFIG_PAX_MEMORY_UDEREF
21433+ 661: jmp 111f
21434+ .pushsection .altinstr_replacement, "a"
21435+ 662: ASM_NOP2
21436+ .popsection
21437+ .pushsection .altinstructions, "a"
21438+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21439+ .popsection
21440+ mov %ss,%edi
21441+ cmp $__UDEREF_KERNEL_DS,%edi
21442+ jnz 111f
21443+ GET_CR3_INTO_RDI
21444+ cmp $0,%dil
21445+ jz 112f
21446+ ud2
21447+112: add $4097,%rdi
21448+ bts $63,%rdi
21449+ SET_RDI_INTO_CR3
21450+ mov $__KERNEL_DS,%edi
21451+ mov %edi,%ss
21452+111:
21453+#endif
21454+
21455+#ifdef CONFIG_PARAVIRT
21456+ PV_RESTORE_REGS(CLBR_RDI);
21457+#endif
21458+
21459+ popq %rdi
21460+ pax_force_retaddr
21461+ retq
21462+
21463+#ifdef CONFIG_PAX_KERNEXEC
21464+2: GET_CR0_INTO_RDI
21465+ btr $16,%rdi
21466+ jnc 4f
21467+ ljmpq __KERNEL_CS,3f
21468+3: SET_RDI_INTO_CR0
21469+ jmp 1b
21470+4: ud2
21471+ jmp 4b
21472+#endif
21473+ENDPROC(pax_exit_kernel)
21474+#endif
21475+
21476+ .macro pax_enter_kernel_user
21477+ pax_set_fptr_mask
21478+#ifdef CONFIG_PAX_MEMORY_UDEREF
21479+ call pax_enter_kernel_user
21480+#endif
21481+ .endm
21482+
21483+ .macro pax_exit_kernel_user
21484+#ifdef CONFIG_PAX_MEMORY_UDEREF
21485+ call pax_exit_kernel_user
21486+#endif
21487+#ifdef CONFIG_PAX_RANDKSTACK
21488+ pushq %rax
21489+ pushq %r11
21490+ call pax_randomize_kstack
21491+ popq %r11
21492+ popq %rax
21493+#endif
21494+ .endm
21495+
21496+#ifdef CONFIG_PAX_MEMORY_UDEREF
21497+ENTRY(pax_enter_kernel_user)
21498+ pushq %rdi
21499+ pushq %rbx
21500+
21501+#ifdef CONFIG_PARAVIRT
21502+ PV_SAVE_REGS(CLBR_RDI)
21503+#endif
21504+
21505+ 661: jmp 111f
21506+ .pushsection .altinstr_replacement, "a"
21507+ 662: ASM_NOP2
21508+ .popsection
21509+ .pushsection .altinstructions, "a"
21510+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21511+ .popsection
21512+ GET_CR3_INTO_RDI
21513+ cmp $1,%dil
21514+ jnz 4f
21515+ sub $4097,%rdi
21516+ bts $63,%rdi
21517+ SET_RDI_INTO_CR3
21518+ jmp 3f
21519+111:
21520+
21521+ GET_CR3_INTO_RDI
21522+ mov %rdi,%rbx
21523+ add $__START_KERNEL_map,%rbx
21524+ sub phys_base(%rip),%rbx
21525+
21526+#ifdef CONFIG_PARAVIRT
21527+ cmpl $0, pv_info+PARAVIRT_enabled
21528+ jz 1f
21529+ pushq %rdi
21530+ i = 0
21531+ .rept USER_PGD_PTRS
21532+ mov i*8(%rbx),%rsi
21533+ mov $0,%sil
21534+ lea i*8(%rbx),%rdi
21535+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
21536+ i = i + 1
21537+ .endr
21538+ popq %rdi
21539+ jmp 2f
21540+1:
21541+#endif
21542+
21543+ i = 0
21544+ .rept USER_PGD_PTRS
21545+ movb $0,i*8(%rbx)
21546+ i = i + 1
21547+ .endr
21548+
21549+2: SET_RDI_INTO_CR3
21550+
21551+#ifdef CONFIG_PAX_KERNEXEC
21552+ GET_CR0_INTO_RDI
21553+ bts $16,%rdi
21554+ SET_RDI_INTO_CR0
21555+#endif
21556+
21557+3:
21558+
21559+#ifdef CONFIG_PARAVIRT
21560+ PV_RESTORE_REGS(CLBR_RDI)
21561+#endif
21562+
21563+ popq %rbx
21564+ popq %rdi
21565+ pax_force_retaddr
21566+ retq
21567+4: ud2
21568+ENDPROC(pax_enter_kernel_user)
21569+
21570+ENTRY(pax_exit_kernel_user)
21571+ pushq %rdi
21572+ pushq %rbx
21573+
21574+#ifdef CONFIG_PARAVIRT
21575+ PV_SAVE_REGS(CLBR_RDI)
21576+#endif
21577+
21578+ GET_CR3_INTO_RDI
21579+ 661: jmp 1f
21580+ .pushsection .altinstr_replacement, "a"
21581+ 662: ASM_NOP2
21582+ .popsection
21583+ .pushsection .altinstructions, "a"
21584+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21585+ .popsection
21586+ cmp $0,%dil
21587+ jnz 3f
21588+ add $4097,%rdi
21589+ bts $63,%rdi
21590+ SET_RDI_INTO_CR3
21591+ jmp 2f
21592+1:
21593+
21594+ mov %rdi,%rbx
21595+
21596+#ifdef CONFIG_PAX_KERNEXEC
21597+ GET_CR0_INTO_RDI
21598+ btr $16,%rdi
21599+ jnc 3f
21600+ SET_RDI_INTO_CR0
21601+#endif
21602+
21603+ add $__START_KERNEL_map,%rbx
21604+ sub phys_base(%rip),%rbx
21605+
21606+#ifdef CONFIG_PARAVIRT
21607+ cmpl $0, pv_info+PARAVIRT_enabled
21608+ jz 1f
21609+ i = 0
21610+ .rept USER_PGD_PTRS
21611+ mov i*8(%rbx),%rsi
21612+ mov $0x67,%sil
21613+ lea i*8(%rbx),%rdi
21614+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
21615+ i = i + 1
21616+ .endr
21617+ jmp 2f
21618+1:
21619+#endif
21620+
21621+ i = 0
21622+ .rept USER_PGD_PTRS
21623+ movb $0x67,i*8(%rbx)
21624+ i = i + 1
21625+ .endr
21626+2:
21627+
21628+#ifdef CONFIG_PARAVIRT
21629+ PV_RESTORE_REGS(CLBR_RDI)
21630+#endif
21631+
21632+ popq %rbx
21633+ popq %rdi
21634+ pax_force_retaddr
21635+ retq
21636+3: ud2
21637+ENDPROC(pax_exit_kernel_user)
21638+#endif
21639+
21640+ .macro pax_enter_kernel_nmi
21641+ pax_set_fptr_mask
21642+
21643+#ifdef CONFIG_PAX_KERNEXEC
21644+ GET_CR0_INTO_RDI
21645+ bts $16,%rdi
21646+ jc 110f
21647+ SET_RDI_INTO_CR0
21648+ or $2,%ebx
21649+110:
21650+#endif
21651+
21652+#ifdef CONFIG_PAX_MEMORY_UDEREF
21653+ 661: jmp 111f
21654+ .pushsection .altinstr_replacement, "a"
21655+ 662: ASM_NOP2
21656+ .popsection
21657+ .pushsection .altinstructions, "a"
21658+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21659+ .popsection
21660+ GET_CR3_INTO_RDI
21661+ cmp $0,%dil
21662+ jz 111f
21663+ sub $4097,%rdi
21664+ or $4,%ebx
21665+ bts $63,%rdi
21666+ SET_RDI_INTO_CR3
21667+ mov $__UDEREF_KERNEL_DS,%edi
21668+ mov %edi,%ss
21669+111:
21670+#endif
21671+ .endm
21672+
21673+ .macro pax_exit_kernel_nmi
21674+#ifdef CONFIG_PAX_KERNEXEC
21675+ btr $1,%ebx
21676+ jnc 110f
21677+ GET_CR0_INTO_RDI
21678+ btr $16,%rdi
21679+ SET_RDI_INTO_CR0
21680+110:
21681+#endif
21682+
21683+#ifdef CONFIG_PAX_MEMORY_UDEREF
21684+ btr $2,%ebx
21685+ jnc 111f
21686+ GET_CR3_INTO_RDI
21687+ add $4097,%rdi
21688+ bts $63,%rdi
21689+ SET_RDI_INTO_CR3
21690+ mov $__KERNEL_DS,%edi
21691+ mov %edi,%ss
21692+111:
21693+#endif
21694+ .endm
21695+
21696+ .macro pax_erase_kstack
21697+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21698+ call pax_erase_kstack
21699+#endif
21700+ .endm
21701+
21702+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21703+ENTRY(pax_erase_kstack)
21704+ pushq %rdi
21705+ pushq %rcx
21706+ pushq %rax
21707+ pushq %r11
21708+
21709+ GET_THREAD_INFO(%r11)
21710+ mov TI_lowest_stack(%r11), %rdi
21711+ mov $-0xBEEF, %rax
21712+ std
21713+
21714+1: mov %edi, %ecx
21715+ and $THREAD_SIZE_asm - 1, %ecx
21716+ shr $3, %ecx
21717+ repne scasq
21718+ jecxz 2f
21719+
21720+ cmp $2*8, %ecx
21721+ jc 2f
21722+
21723+ mov $2*8, %ecx
21724+ repe scasq
21725+ jecxz 2f
21726+ jne 1b
21727+
21728+2: cld
21729+ mov %esp, %ecx
21730+ sub %edi, %ecx
21731+
21732+ cmp $THREAD_SIZE_asm, %rcx
21733+ jb 3f
21734+ ud2
21735+3:
21736+
21737+ shr $3, %ecx
21738+ rep stosq
21739+
21740+ mov TI_task_thread_sp0(%r11), %rdi
21741+ sub $256, %rdi
21742+ mov %rdi, TI_lowest_stack(%r11)
21743+
21744+ popq %r11
21745+ popq %rax
21746+ popq %rcx
21747+ popq %rdi
21748+ pax_force_retaddr
21749+ ret
21750+ENDPROC(pax_erase_kstack)
21751+#endif
21752
21753 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
21754 #ifdef CONFIG_TRACE_IRQFLAGS
21755@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
21756 .endm
21757
21758 .macro UNFAKE_STACK_FRAME
21759- addq $8*6, %rsp
21760- CFI_ADJUST_CFA_OFFSET -(6*8)
21761+ addq $8*6 + ARG_SKIP, %rsp
21762+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
21763 .endm
21764
21765 /*
21766@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
21767 movq %rsp, %rsi
21768
21769 leaq -RBP(%rsp),%rdi /* arg1 for handler */
21770- testl $3, CS-RBP(%rsi)
21771+ testb $3, CS-RBP(%rsi)
21772 je 1f
21773 SWAPGS
21774 /*
21775@@ -498,9 +931,10 @@ ENTRY(save_rest)
21776 movq_cfi r15, R15+16
21777 movq %r11, 8(%rsp) /* return address */
21778 FIXUP_TOP_OF_STACK %r11, 16
21779+ pax_force_retaddr
21780 ret
21781 CFI_ENDPROC
21782-END(save_rest)
21783+ENDPROC(save_rest)
21784
21785 /* save complete stack frame */
21786 .pushsection .kprobes.text, "ax"
21787@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
21788 js 1f /* negative -> in kernel */
21789 SWAPGS
21790 xorl %ebx,%ebx
21791-1: ret
21792+1: pax_force_retaddr_bts
21793+ ret
21794 CFI_ENDPROC
21795-END(save_paranoid)
21796+ENDPROC(save_paranoid)
21797 .popsection
21798
21799 /*
21800@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
21801
21802 RESTORE_REST
21803
21804- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21805+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21806 jz 1f
21807
21808 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
21809@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
21810 RESTORE_REST
21811 jmp int_ret_from_sys_call
21812 CFI_ENDPROC
21813-END(ret_from_fork)
21814+ENDPROC(ret_from_fork)
21815
21816 /*
21817 * System call entry. Up to 6 arguments in registers are supported.
21818@@ -608,7 +1043,7 @@ END(ret_from_fork)
21819 ENTRY(system_call)
21820 CFI_STARTPROC simple
21821 CFI_SIGNAL_FRAME
21822- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
21823+ CFI_DEF_CFA rsp,0
21824 CFI_REGISTER rip,rcx
21825 /*CFI_REGISTER rflags,r11*/
21826 SWAPGS_UNSAFE_STACK
21827@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
21828
21829 movq %rsp,PER_CPU_VAR(old_rsp)
21830 movq PER_CPU_VAR(kernel_stack),%rsp
21831+ SAVE_ARGS 8*6,0
21832+ pax_enter_kernel_user
21833+
21834+#ifdef CONFIG_PAX_RANDKSTACK
21835+ pax_erase_kstack
21836+#endif
21837+
21838 /*
21839 * No need to follow this irqs off/on section - it's straight
21840 * and short:
21841 */
21842 ENABLE_INTERRUPTS(CLBR_NONE)
21843- SAVE_ARGS 8,0
21844 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
21845 movq %rcx,RIP-ARGOFFSET(%rsp)
21846 CFI_REL_OFFSET rip,RIP-ARGOFFSET
21847- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21848+ GET_THREAD_INFO(%rcx)
21849+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
21850 jnz tracesys
21851 system_call_fastpath:
21852 #if __SYSCALL_MASK == ~0
21853@@ -640,7 +1082,7 @@ system_call_fastpath:
21854 cmpl $__NR_syscall_max,%eax
21855 #endif
21856 ja badsys
21857- movq %r10,%rcx
21858+ movq R10-ARGOFFSET(%rsp),%rcx
21859 call *sys_call_table(,%rax,8) # XXX: rip relative
21860 movq %rax,RAX-ARGOFFSET(%rsp)
21861 /*
21862@@ -654,10 +1096,13 @@ sysret_check:
21863 LOCKDEP_SYS_EXIT
21864 DISABLE_INTERRUPTS(CLBR_NONE)
21865 TRACE_IRQS_OFF
21866- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
21867+ GET_THREAD_INFO(%rcx)
21868+ movl TI_flags(%rcx),%edx
21869 andl %edi,%edx
21870 jnz sysret_careful
21871 CFI_REMEMBER_STATE
21872+ pax_exit_kernel_user
21873+ pax_erase_kstack
21874 /*
21875 * sysretq will re-enable interrupts:
21876 */
21877@@ -709,14 +1154,18 @@ badsys:
21878 * jump back to the normal fast path.
21879 */
21880 auditsys:
21881- movq %r10,%r9 /* 6th arg: 4th syscall arg */
21882+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
21883 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
21884 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
21885 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
21886 movq %rax,%rsi /* 2nd arg: syscall number */
21887 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
21888 call __audit_syscall_entry
21889+
21890+ pax_erase_kstack
21891+
21892 LOAD_ARGS 0 /* reload call-clobbered registers */
21893+ pax_set_fptr_mask
21894 jmp system_call_fastpath
21895
21896 /*
21897@@ -737,7 +1186,7 @@ sysret_audit:
21898 /* Do syscall tracing */
21899 tracesys:
21900 #ifdef CONFIG_AUDITSYSCALL
21901- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21902+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
21903 jz auditsys
21904 #endif
21905 SAVE_REST
21906@@ -745,12 +1194,16 @@ tracesys:
21907 FIXUP_TOP_OF_STACK %rdi
21908 movq %rsp,%rdi
21909 call syscall_trace_enter
21910+
21911+ pax_erase_kstack
21912+
21913 /*
21914 * Reload arg registers from stack in case ptrace changed them.
21915 * We don't reload %rax because syscall_trace_enter() returned
21916 * the value it wants us to use in the table lookup.
21917 */
21918 LOAD_ARGS ARGOFFSET, 1
21919+ pax_set_fptr_mask
21920 RESTORE_REST
21921 #if __SYSCALL_MASK == ~0
21922 cmpq $__NR_syscall_max,%rax
21923@@ -759,7 +1212,7 @@ tracesys:
21924 cmpl $__NR_syscall_max,%eax
21925 #endif
21926 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
21927- movq %r10,%rcx /* fixup for C */
21928+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
21929 call *sys_call_table(,%rax,8)
21930 movq %rax,RAX-ARGOFFSET(%rsp)
21931 /* Use IRET because user could have changed frame */
21932@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
21933 andl %edi,%edx
21934 jnz int_careful
21935 andl $~TS_COMPAT,TI_status(%rcx)
21936- jmp retint_swapgs
21937+ pax_exit_kernel_user
21938+ pax_erase_kstack
21939+ jmp retint_swapgs_pax
21940
21941 /* Either reschedule or signal or syscall exit tracking needed. */
21942 /* First do a reschedule test. */
21943@@ -826,7 +1281,7 @@ int_restore_rest:
21944 TRACE_IRQS_OFF
21945 jmp int_with_check
21946 CFI_ENDPROC
21947-END(system_call)
21948+ENDPROC(system_call)
21949
21950 .macro FORK_LIKE func
21951 ENTRY(stub_\func)
21952@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
21953 DEFAULT_FRAME 0 8 /* offset 8: return address */
21954 call sys_\func
21955 RESTORE_TOP_OF_STACK %r11, 8
21956+ pax_force_retaddr
21957 ret $REST_SKIP /* pop extended registers */
21958 CFI_ENDPROC
21959-END(stub_\func)
21960+ENDPROC(stub_\func)
21961 .endm
21962
21963 .macro FIXED_FRAME label,func
21964@@ -851,9 +1307,10 @@ ENTRY(\label)
21965 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
21966 call \func
21967 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
21968+ pax_force_retaddr
21969 ret
21970 CFI_ENDPROC
21971-END(\label)
21972+ENDPROC(\label)
21973 .endm
21974
21975 FORK_LIKE clone
21976@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
21977 movq_cfi_restore R12+8, r12
21978 movq_cfi_restore RBP+8, rbp
21979 movq_cfi_restore RBX+8, rbx
21980+ pax_force_retaddr
21981 ret $REST_SKIP /* pop extended registers */
21982 CFI_ENDPROC
21983-END(ptregscall_common)
21984+ENDPROC(ptregscall_common)
21985
21986 ENTRY(stub_execve)
21987 CFI_STARTPROC
21988@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
21989 RESTORE_REST
21990 jmp int_ret_from_sys_call
21991 CFI_ENDPROC
21992-END(stub_execve)
21993+ENDPROC(stub_execve)
21994
21995 /*
21996 * sigreturn is special because it needs to restore all registers on return.
21997@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
21998 RESTORE_REST
21999 jmp int_ret_from_sys_call
22000 CFI_ENDPROC
22001-END(stub_rt_sigreturn)
22002+ENDPROC(stub_rt_sigreturn)
22003
22004 #ifdef CONFIG_X86_X32_ABI
22005 ENTRY(stub_x32_rt_sigreturn)
22006@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
22007 RESTORE_REST
22008 jmp int_ret_from_sys_call
22009 CFI_ENDPROC
22010-END(stub_x32_rt_sigreturn)
22011+ENDPROC(stub_x32_rt_sigreturn)
22012
22013 ENTRY(stub_x32_execve)
22014 CFI_STARTPROC
22015@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
22016 RESTORE_REST
22017 jmp int_ret_from_sys_call
22018 CFI_ENDPROC
22019-END(stub_x32_execve)
22020+ENDPROC(stub_x32_execve)
22021
22022 #endif
22023
22024@@ -967,7 +1425,7 @@ vector=vector+1
22025 2: jmp common_interrupt
22026 .endr
22027 CFI_ENDPROC
22028-END(irq_entries_start)
22029+ENDPROC(irq_entries_start)
22030
22031 .previous
22032 END(interrupt)
22033@@ -987,6 +1445,16 @@ END(interrupt)
22034 subq $ORIG_RAX-RBP, %rsp
22035 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
22036 SAVE_ARGS_IRQ
22037+#ifdef CONFIG_PAX_MEMORY_UDEREF
22038+ testb $3, CS(%rdi)
22039+ jnz 1f
22040+ pax_enter_kernel
22041+ jmp 2f
22042+1: pax_enter_kernel_user
22043+2:
22044+#else
22045+ pax_enter_kernel
22046+#endif
22047 call \func
22048 .endm
22049
22050@@ -1019,7 +1487,7 @@ ret_from_intr:
22051
22052 exit_intr:
22053 GET_THREAD_INFO(%rcx)
22054- testl $3,CS-ARGOFFSET(%rsp)
22055+ testb $3,CS-ARGOFFSET(%rsp)
22056 je retint_kernel
22057
22058 /* Interrupt came from user space */
22059@@ -1041,12 +1509,16 @@ retint_swapgs: /* return to user-space */
22060 * The iretq could re-enable interrupts:
22061 */
22062 DISABLE_INTERRUPTS(CLBR_ANY)
22063+ pax_exit_kernel_user
22064+retint_swapgs_pax:
22065 TRACE_IRQS_IRETQ
22066 SWAPGS
22067 jmp restore_args
22068
22069 retint_restore_args: /* return to kernel space */
22070 DISABLE_INTERRUPTS(CLBR_ANY)
22071+ pax_exit_kernel
22072+ pax_force_retaddr (RIP-ARGOFFSET)
22073 /*
22074 * The iretq could re-enable interrupts:
22075 */
22076@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
22077 #endif
22078
22079 CFI_ENDPROC
22080-END(common_interrupt)
22081+ENDPROC(common_interrupt)
22082 /*
22083 * End of kprobes section
22084 */
22085@@ -1147,7 +1619,7 @@ ENTRY(\sym)
22086 interrupt \do_sym
22087 jmp ret_from_intr
22088 CFI_ENDPROC
22089-END(\sym)
22090+ENDPROC(\sym)
22091 .endm
22092
22093 #ifdef CONFIG_SMP
22094@@ -1208,12 +1680,22 @@ ENTRY(\sym)
22095 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22096 call error_entry
22097 DEFAULT_FRAME 0
22098+#ifdef CONFIG_PAX_MEMORY_UDEREF
22099+ testb $3, CS(%rsp)
22100+ jnz 1f
22101+ pax_enter_kernel
22102+ jmp 2f
22103+1: pax_enter_kernel_user
22104+2:
22105+#else
22106+ pax_enter_kernel
22107+#endif
22108 movq %rsp,%rdi /* pt_regs pointer */
22109 xorl %esi,%esi /* no error code */
22110 call \do_sym
22111 jmp error_exit /* %ebx: no swapgs flag */
22112 CFI_ENDPROC
22113-END(\sym)
22114+ENDPROC(\sym)
22115 .endm
22116
22117 .macro paranoidzeroentry sym do_sym
22118@@ -1226,15 +1708,25 @@ ENTRY(\sym)
22119 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22120 call save_paranoid
22121 TRACE_IRQS_OFF
22122+#ifdef CONFIG_PAX_MEMORY_UDEREF
22123+ testb $3, CS(%rsp)
22124+ jnz 1f
22125+ pax_enter_kernel
22126+ jmp 2f
22127+1: pax_enter_kernel_user
22128+2:
22129+#else
22130+ pax_enter_kernel
22131+#endif
22132 movq %rsp,%rdi /* pt_regs pointer */
22133 xorl %esi,%esi /* no error code */
22134 call \do_sym
22135 jmp paranoid_exit /* %ebx: no swapgs flag */
22136 CFI_ENDPROC
22137-END(\sym)
22138+ENDPROC(\sym)
22139 .endm
22140
22141-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
22142+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
22143 .macro paranoidzeroentry_ist sym do_sym ist
22144 ENTRY(\sym)
22145 INTR_FRAME
22146@@ -1245,14 +1737,30 @@ ENTRY(\sym)
22147 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22148 call save_paranoid
22149 TRACE_IRQS_OFF_DEBUG
22150+#ifdef CONFIG_PAX_MEMORY_UDEREF
22151+ testb $3, CS(%rsp)
22152+ jnz 1f
22153+ pax_enter_kernel
22154+ jmp 2f
22155+1: pax_enter_kernel_user
22156+2:
22157+#else
22158+ pax_enter_kernel
22159+#endif
22160 movq %rsp,%rdi /* pt_regs pointer */
22161 xorl %esi,%esi /* no error code */
22162+#ifdef CONFIG_SMP
22163+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
22164+ lea init_tss(%r12), %r12
22165+#else
22166+ lea init_tss(%rip), %r12
22167+#endif
22168 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22169 call \do_sym
22170 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22171 jmp paranoid_exit /* %ebx: no swapgs flag */
22172 CFI_ENDPROC
22173-END(\sym)
22174+ENDPROC(\sym)
22175 .endm
22176
22177 .macro errorentry sym do_sym
22178@@ -1264,13 +1772,23 @@ ENTRY(\sym)
22179 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22180 call error_entry
22181 DEFAULT_FRAME 0
22182+#ifdef CONFIG_PAX_MEMORY_UDEREF
22183+ testb $3, CS(%rsp)
22184+ jnz 1f
22185+ pax_enter_kernel
22186+ jmp 2f
22187+1: pax_enter_kernel_user
22188+2:
22189+#else
22190+ pax_enter_kernel
22191+#endif
22192 movq %rsp,%rdi /* pt_regs pointer */
22193 movq ORIG_RAX(%rsp),%rsi /* get error code */
22194 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
22195 call \do_sym
22196 jmp error_exit /* %ebx: no swapgs flag */
22197 CFI_ENDPROC
22198-END(\sym)
22199+ENDPROC(\sym)
22200 .endm
22201
22202 /* error code is on the stack already */
22203@@ -1284,13 +1802,23 @@ ENTRY(\sym)
22204 call save_paranoid
22205 DEFAULT_FRAME 0
22206 TRACE_IRQS_OFF
22207+#ifdef CONFIG_PAX_MEMORY_UDEREF
22208+ testb $3, CS(%rsp)
22209+ jnz 1f
22210+ pax_enter_kernel
22211+ jmp 2f
22212+1: pax_enter_kernel_user
22213+2:
22214+#else
22215+ pax_enter_kernel
22216+#endif
22217 movq %rsp,%rdi /* pt_regs pointer */
22218 movq ORIG_RAX(%rsp),%rsi /* get error code */
22219 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
22220 call \do_sym
22221 jmp paranoid_exit /* %ebx: no swapgs flag */
22222 CFI_ENDPROC
22223-END(\sym)
22224+ENDPROC(\sym)
22225 .endm
22226
22227 zeroentry divide_error do_divide_error
22228@@ -1320,9 +1848,10 @@ gs_change:
22229 2: mfence /* workaround */
22230 SWAPGS
22231 popfq_cfi
22232+ pax_force_retaddr
22233 ret
22234 CFI_ENDPROC
22235-END(native_load_gs_index)
22236+ENDPROC(native_load_gs_index)
22237
22238 _ASM_EXTABLE(gs_change,bad_gs)
22239 .section .fixup,"ax"
22240@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
22241 CFI_DEF_CFA_REGISTER rsp
22242 CFI_ADJUST_CFA_OFFSET -8
22243 decl PER_CPU_VAR(irq_count)
22244+ pax_force_retaddr
22245 ret
22246 CFI_ENDPROC
22247-END(call_softirq)
22248+ENDPROC(call_softirq)
22249
22250 #ifdef CONFIG_XEN
22251 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
22252@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
22253 decl PER_CPU_VAR(irq_count)
22254 jmp error_exit
22255 CFI_ENDPROC
22256-END(xen_do_hypervisor_callback)
22257+ENDPROC(xen_do_hypervisor_callback)
22258
22259 /*
22260 * Hypervisor uses this for application faults while it executes.
22261@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
22262 SAVE_ALL
22263 jmp error_exit
22264 CFI_ENDPROC
22265-END(xen_failsafe_callback)
22266+ENDPROC(xen_failsafe_callback)
22267
22268 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
22269 xen_hvm_callback_vector xen_evtchn_do_upcall
22270@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
22271 DEFAULT_FRAME
22272 DISABLE_INTERRUPTS(CLBR_NONE)
22273 TRACE_IRQS_OFF_DEBUG
22274- testl %ebx,%ebx /* swapgs needed? */
22275+ testl $1,%ebx /* swapgs needed? */
22276 jnz paranoid_restore
22277- testl $3,CS(%rsp)
22278+ testb $3,CS(%rsp)
22279 jnz paranoid_userspace
22280+#ifdef CONFIG_PAX_MEMORY_UDEREF
22281+ pax_exit_kernel
22282+ TRACE_IRQS_IRETQ 0
22283+ SWAPGS_UNSAFE_STACK
22284+ RESTORE_ALL 8
22285+ pax_force_retaddr_bts
22286+ jmp irq_return
22287+#endif
22288 paranoid_swapgs:
22289+#ifdef CONFIG_PAX_MEMORY_UDEREF
22290+ pax_exit_kernel_user
22291+#else
22292+ pax_exit_kernel
22293+#endif
22294 TRACE_IRQS_IRETQ 0
22295 SWAPGS_UNSAFE_STACK
22296 RESTORE_ALL 8
22297 jmp irq_return
22298 paranoid_restore:
22299+ pax_exit_kernel
22300 TRACE_IRQS_IRETQ_DEBUG 0
22301 RESTORE_ALL 8
22302+ pax_force_retaddr_bts
22303 jmp irq_return
22304 paranoid_userspace:
22305 GET_THREAD_INFO(%rcx)
22306@@ -1541,7 +2086,7 @@ paranoid_schedule:
22307 TRACE_IRQS_OFF
22308 jmp paranoid_userspace
22309 CFI_ENDPROC
22310-END(paranoid_exit)
22311+ENDPROC(paranoid_exit)
22312
22313 /*
22314 * Exception entry point. This expects an error code/orig_rax on the stack.
22315@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
22316 movq_cfi r14, R14+8
22317 movq_cfi r15, R15+8
22318 xorl %ebx,%ebx
22319- testl $3,CS+8(%rsp)
22320+ testb $3,CS+8(%rsp)
22321 je error_kernelspace
22322 error_swapgs:
22323 SWAPGS
22324 error_sti:
22325 TRACE_IRQS_OFF
22326+ pax_force_retaddr_bts
22327 ret
22328
22329 /*
22330@@ -1600,7 +2146,7 @@ bstep_iret:
22331 movq %rcx,RIP+8(%rsp)
22332 jmp error_swapgs
22333 CFI_ENDPROC
22334-END(error_entry)
22335+ENDPROC(error_entry)
22336
22337
22338 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
22339@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
22340 DISABLE_INTERRUPTS(CLBR_NONE)
22341 TRACE_IRQS_OFF
22342 GET_THREAD_INFO(%rcx)
22343- testl %eax,%eax
22344+ testl $1,%eax
22345 jne retint_kernel
22346 LOCKDEP_SYS_EXIT_IRQ
22347 movl TI_flags(%rcx),%edx
22348@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
22349 jnz retint_careful
22350 jmp retint_swapgs
22351 CFI_ENDPROC
22352-END(error_exit)
22353+ENDPROC(error_exit)
22354
22355 /*
22356 * Test if a given stack is an NMI stack or not.
22357@@ -1678,9 +2224,11 @@ ENTRY(nmi)
22358 * If %cs was not the kernel segment, then the NMI triggered in user
22359 * space, which means it is definitely not nested.
22360 */
22361+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
22362+ je 1f
22363 cmpl $__KERNEL_CS, 16(%rsp)
22364 jne first_nmi
22365-
22366+1:
22367 /*
22368 * Check the special variable on the stack to see if NMIs are
22369 * executing.
22370@@ -1714,8 +2262,7 @@ nested_nmi:
22371
22372 1:
22373 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
22374- leaq -1*8(%rsp), %rdx
22375- movq %rdx, %rsp
22376+ subq $8, %rsp
22377 CFI_ADJUST_CFA_OFFSET 1*8
22378 leaq -10*8(%rsp), %rdx
22379 pushq_cfi $__KERNEL_DS
22380@@ -1733,6 +2280,7 @@ nested_nmi_out:
22381 CFI_RESTORE rdx
22382
22383 /* No need to check faults here */
22384+# pax_force_retaddr_bts
22385 INTERRUPT_RETURN
22386
22387 CFI_RESTORE_STATE
22388@@ -1849,6 +2397,8 @@ end_repeat_nmi:
22389 */
22390 movq %cr2, %r12
22391
22392+ pax_enter_kernel_nmi
22393+
22394 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
22395 movq %rsp,%rdi
22396 movq $-1,%rsi
22397@@ -1861,26 +2411,31 @@ end_repeat_nmi:
22398 movq %r12, %cr2
22399 1:
22400
22401- testl %ebx,%ebx /* swapgs needed? */
22402+ testl $1,%ebx /* swapgs needed? */
22403 jnz nmi_restore
22404 nmi_swapgs:
22405 SWAPGS_UNSAFE_STACK
22406 nmi_restore:
22407+ pax_exit_kernel_nmi
22408 /* Pop the extra iret frame at once */
22409 RESTORE_ALL 6*8
22410+ testb $3, 8(%rsp)
22411+ jnz 1f
22412+ pax_force_retaddr_bts
22413+1:
22414
22415 /* Clear the NMI executing stack variable */
22416 movq $0, 5*8(%rsp)
22417 jmp irq_return
22418 CFI_ENDPROC
22419-END(nmi)
22420+ENDPROC(nmi)
22421
22422 ENTRY(ignore_sysret)
22423 CFI_STARTPROC
22424 mov $-ENOSYS,%eax
22425 sysret
22426 CFI_ENDPROC
22427-END(ignore_sysret)
22428+ENDPROC(ignore_sysret)
22429
22430 /*
22431 * End of kprobes section
22432diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
22433index 42a392a..fbbd930 100644
22434--- a/arch/x86/kernel/ftrace.c
22435+++ b/arch/x86/kernel/ftrace.c
22436@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
22437 {
22438 unsigned char replaced[MCOUNT_INSN_SIZE];
22439
22440+ ip = ktla_ktva(ip);
22441+
22442 /*
22443 * Note: Due to modules and __init, code can
22444 * disappear and change, we need to protect against faulting
22445@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
22446 unsigned char old[MCOUNT_INSN_SIZE], *new;
22447 int ret;
22448
22449- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
22450+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
22451 new = ftrace_call_replace(ip, (unsigned long)func);
22452
22453 /* See comment above by declaration of modifying_ftrace_code */
22454@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
22455 /* Also update the regs callback function */
22456 if (!ret) {
22457 ip = (unsigned long)(&ftrace_regs_call);
22458- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
22459+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
22460 new = ftrace_call_replace(ip, (unsigned long)func);
22461 ret = ftrace_modify_code(ip, old, new);
22462 }
22463@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
22464 * kernel identity mapping to modify code.
22465 */
22466 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
22467- ip = (unsigned long)__va(__pa_symbol(ip));
22468+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
22469
22470 return probe_kernel_write((void *)ip, val, size);
22471 }
22472@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
22473 unsigned char replaced[MCOUNT_INSN_SIZE];
22474 unsigned char brk = BREAKPOINT_INSTRUCTION;
22475
22476- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
22477+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
22478 return -EFAULT;
22479
22480 /* Make sure it is what we expect it to be */
22481@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
22482 return ret;
22483
22484 fail_update:
22485- probe_kernel_write((void *)ip, &old_code[0], 1);
22486+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
22487 goto out;
22488 }
22489
22490@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
22491 {
22492 unsigned char code[MCOUNT_INSN_SIZE];
22493
22494+ ip = ktla_ktva(ip);
22495+
22496 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
22497 return -EFAULT;
22498
22499diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
22500index 55b6761..a6456fc 100644
22501--- a/arch/x86/kernel/head64.c
22502+++ b/arch/x86/kernel/head64.c
22503@@ -67,12 +67,12 @@ again:
22504 pgd = *pgd_p;
22505
22506 /*
22507- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
22508- * critical -- __PAGE_OFFSET would point us back into the dynamic
22509+ * The use of __early_va rather than __va here is critical:
22510+ * __va would point us back into the dynamic
22511 * range and we might end up looping forever...
22512 */
22513 if (pgd)
22514- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
22515+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
22516 else {
22517 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
22518 reset_early_page_tables();
22519@@ -82,13 +82,13 @@ again:
22520 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
22521 for (i = 0; i < PTRS_PER_PUD; i++)
22522 pud_p[i] = 0;
22523- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
22524+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
22525 }
22526 pud_p += pud_index(address);
22527 pud = *pud_p;
22528
22529 if (pud)
22530- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
22531+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
22532 else {
22533 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
22534 reset_early_page_tables();
22535@@ -98,7 +98,7 @@ again:
22536 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
22537 for (i = 0; i < PTRS_PER_PMD; i++)
22538 pmd_p[i] = 0;
22539- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
22540+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
22541 }
22542 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
22543 pmd_p[pmd_index(address)] = pmd;
22544@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
22545 if (console_loglevel == 10)
22546 early_printk("Kernel alive\n");
22547
22548- clear_page(init_level4_pgt);
22549 /* set init_level4_pgt kernel high mapping*/
22550 init_level4_pgt[511] = early_level4_pgt[511];
22551
22552diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
22553index 73afd11..0ef46f2 100644
22554--- a/arch/x86/kernel/head_32.S
22555+++ b/arch/x86/kernel/head_32.S
22556@@ -26,6 +26,12 @@
22557 /* Physical address */
22558 #define pa(X) ((X) - __PAGE_OFFSET)
22559
22560+#ifdef CONFIG_PAX_KERNEXEC
22561+#define ta(X) (X)
22562+#else
22563+#define ta(X) ((X) - __PAGE_OFFSET)
22564+#endif
22565+
22566 /*
22567 * References to members of the new_cpu_data structure.
22568 */
22569@@ -55,11 +61,7 @@
22570 * and small than max_low_pfn, otherwise will waste some page table entries
22571 */
22572
22573-#if PTRS_PER_PMD > 1
22574-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
22575-#else
22576-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
22577-#endif
22578+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
22579
22580 /* Number of possible pages in the lowmem region */
22581 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
22582@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
22583 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
22584
22585 /*
22586+ * Real beginning of normal "text" segment
22587+ */
22588+ENTRY(stext)
22589+ENTRY(_stext)
22590+
22591+/*
22592 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
22593 * %esi points to the real-mode code as a 32-bit pointer.
22594 * CS and DS must be 4 GB flat segments, but we don't depend on
22595@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
22596 * can.
22597 */
22598 __HEAD
22599+
22600+#ifdef CONFIG_PAX_KERNEXEC
22601+ jmp startup_32
22602+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
22603+.fill PAGE_SIZE-5,1,0xcc
22604+#endif
22605+
22606 ENTRY(startup_32)
22607 movl pa(stack_start),%ecx
22608
22609@@ -106,6 +121,59 @@ ENTRY(startup_32)
22610 2:
22611 leal -__PAGE_OFFSET(%ecx),%esp
22612
22613+#ifdef CONFIG_SMP
22614+ movl $pa(cpu_gdt_table),%edi
22615+ movl $__per_cpu_load,%eax
22616+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
22617+ rorl $16,%eax
22618+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
22619+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
22620+ movl $__per_cpu_end - 1,%eax
22621+ subl $__per_cpu_start,%eax
22622+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
22623+#endif
22624+
22625+#ifdef CONFIG_PAX_MEMORY_UDEREF
22626+ movl $NR_CPUS,%ecx
22627+ movl $pa(cpu_gdt_table),%edi
22628+1:
22629+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
22630+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
22631+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
22632+ addl $PAGE_SIZE_asm,%edi
22633+ loop 1b
22634+#endif
22635+
22636+#ifdef CONFIG_PAX_KERNEXEC
22637+ movl $pa(boot_gdt),%edi
22638+ movl $__LOAD_PHYSICAL_ADDR,%eax
22639+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
22640+ rorl $16,%eax
22641+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
22642+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
22643+ rorl $16,%eax
22644+
22645+ ljmp $(__BOOT_CS),$1f
22646+1:
22647+
22648+ movl $NR_CPUS,%ecx
22649+ movl $pa(cpu_gdt_table),%edi
22650+ addl $__PAGE_OFFSET,%eax
22651+1:
22652+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
22653+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
22654+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
22655+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
22656+ rorl $16,%eax
22657+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
22658+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
22659+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
22660+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
22661+ rorl $16,%eax
22662+ addl $PAGE_SIZE_asm,%edi
22663+ loop 1b
22664+#endif
22665+
22666 /*
22667 * Clear BSS first so that there are no surprises...
22668 */
22669@@ -201,8 +269,11 @@ ENTRY(startup_32)
22670 movl %eax, pa(max_pfn_mapped)
22671
22672 /* Do early initialization of the fixmap area */
22673- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
22674- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
22675+#ifdef CONFIG_COMPAT_VDSO
22676+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
22677+#else
22678+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
22679+#endif
22680 #else /* Not PAE */
22681
22682 page_pde_offset = (__PAGE_OFFSET >> 20);
22683@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
22684 movl %eax, pa(max_pfn_mapped)
22685
22686 /* Do early initialization of the fixmap area */
22687- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
22688- movl %eax,pa(initial_page_table+0xffc)
22689+#ifdef CONFIG_COMPAT_VDSO
22690+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
22691+#else
22692+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
22693+#endif
22694 #endif
22695
22696 #ifdef CONFIG_PARAVIRT
22697@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
22698 cmpl $num_subarch_entries, %eax
22699 jae bad_subarch
22700
22701- movl pa(subarch_entries)(,%eax,4), %eax
22702- subl $__PAGE_OFFSET, %eax
22703- jmp *%eax
22704+ jmp *pa(subarch_entries)(,%eax,4)
22705
22706 bad_subarch:
22707 WEAK(lguest_entry)
22708@@ -261,10 +333,10 @@ WEAK(xen_entry)
22709 __INITDATA
22710
22711 subarch_entries:
22712- .long default_entry /* normal x86/PC */
22713- .long lguest_entry /* lguest hypervisor */
22714- .long xen_entry /* Xen hypervisor */
22715- .long default_entry /* Moorestown MID */
22716+ .long ta(default_entry) /* normal x86/PC */
22717+ .long ta(lguest_entry) /* lguest hypervisor */
22718+ .long ta(xen_entry) /* Xen hypervisor */
22719+ .long ta(default_entry) /* Moorestown MID */
22720 num_subarch_entries = (. - subarch_entries) / 4
22721 .previous
22722 #else
22723@@ -355,6 +427,7 @@ default_entry:
22724 movl pa(mmu_cr4_features),%eax
22725 movl %eax,%cr4
22726
22727+#ifdef CONFIG_X86_PAE
22728 testb $X86_CR4_PAE, %al # check if PAE is enabled
22729 jz enable_paging
22730
22731@@ -383,6 +456,9 @@ default_entry:
22732 /* Make changes effective */
22733 wrmsr
22734
22735+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
22736+#endif
22737+
22738 enable_paging:
22739
22740 /*
22741@@ -451,14 +527,20 @@ is486:
22742 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
22743 movl %eax,%ss # after changing gdt.
22744
22745- movl $(__USER_DS),%eax # DS/ES contains default USER segment
22746+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
22747 movl %eax,%ds
22748 movl %eax,%es
22749
22750 movl $(__KERNEL_PERCPU), %eax
22751 movl %eax,%fs # set this cpu's percpu
22752
22753+#ifdef CONFIG_CC_STACKPROTECTOR
22754 movl $(__KERNEL_STACK_CANARY),%eax
22755+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22756+ movl $(__USER_DS),%eax
22757+#else
22758+ xorl %eax,%eax
22759+#endif
22760 movl %eax,%gs
22761
22762 xorl %eax,%eax # Clear LDT
22763@@ -534,8 +616,11 @@ setup_once:
22764 * relocation. Manually set base address in stack canary
22765 * segment descriptor.
22766 */
22767- movl $gdt_page,%eax
22768+ movl $cpu_gdt_table,%eax
22769 movl $stack_canary,%ecx
22770+#ifdef CONFIG_SMP
22771+ addl $__per_cpu_load,%ecx
22772+#endif
22773 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
22774 shrl $16, %ecx
22775 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
22776@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
22777 /* This is global to keep gas from relaxing the jumps */
22778 ENTRY(early_idt_handler)
22779 cld
22780- cmpl $2,%ss:early_recursion_flag
22781+ cmpl $1,%ss:early_recursion_flag
22782 je hlt_loop
22783 incl %ss:early_recursion_flag
22784
22785@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
22786 pushl (20+6*4)(%esp) /* trapno */
22787 pushl $fault_msg
22788 call printk
22789-#endif
22790 call dump_stack
22791+#endif
22792 hlt_loop:
22793 hlt
22794 jmp hlt_loop
22795@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
22796 /* This is the default interrupt "handler" :-) */
22797 ALIGN
22798 ignore_int:
22799- cld
22800 #ifdef CONFIG_PRINTK
22801+ cmpl $2,%ss:early_recursion_flag
22802+ je hlt_loop
22803+ incl %ss:early_recursion_flag
22804+ cld
22805 pushl %eax
22806 pushl %ecx
22807 pushl %edx
22808@@ -634,9 +722,6 @@ ignore_int:
22809 movl $(__KERNEL_DS),%eax
22810 movl %eax,%ds
22811 movl %eax,%es
22812- cmpl $2,early_recursion_flag
22813- je hlt_loop
22814- incl early_recursion_flag
22815 pushl 16(%esp)
22816 pushl 24(%esp)
22817 pushl 32(%esp)
22818@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
22819 /*
22820 * BSS section
22821 */
22822-__PAGE_ALIGNED_BSS
22823- .align PAGE_SIZE
22824 #ifdef CONFIG_X86_PAE
22825+.section .initial_pg_pmd,"a",@progbits
22826 initial_pg_pmd:
22827 .fill 1024*KPMDS,4,0
22828 #else
22829+.section .initial_page_table,"a",@progbits
22830 ENTRY(initial_page_table)
22831 .fill 1024,4,0
22832 #endif
22833+.section .initial_pg_fixmap,"a",@progbits
22834 initial_pg_fixmap:
22835 .fill 1024,4,0
22836+.section .empty_zero_page,"a",@progbits
22837 ENTRY(empty_zero_page)
22838 .fill 4096,1,0
22839+.section .swapper_pg_dir,"a",@progbits
22840 ENTRY(swapper_pg_dir)
22841+#ifdef CONFIG_X86_PAE
22842+ .fill 4,8,0
22843+#else
22844 .fill 1024,4,0
22845+#endif
22846+
22847+/*
22848+ * The IDT has to be page-aligned to simplify the Pentium
22849+ * F0 0F bug workaround.. We have a special link segment
22850+ * for this.
22851+ */
22852+.section .idt,"a",@progbits
22853+ENTRY(idt_table)
22854+ .fill 256,8,0
22855
22856 /*
22857 * This starts the data section.
22858 */
22859 #ifdef CONFIG_X86_PAE
22860-__PAGE_ALIGNED_DATA
22861- /* Page-aligned for the benefit of paravirt? */
22862- .align PAGE_SIZE
22863+.section .initial_page_table,"a",@progbits
22864 ENTRY(initial_page_table)
22865 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
22866 # if KPMDS == 3
22867@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
22868 # error "Kernel PMDs should be 1, 2 or 3"
22869 # endif
22870 .align PAGE_SIZE /* needs to be page-sized too */
22871+
22872+#ifdef CONFIG_PAX_PER_CPU_PGD
22873+ENTRY(cpu_pgd)
22874+ .rept 2*NR_CPUS
22875+ .fill 4,8,0
22876+ .endr
22877+#endif
22878+
22879 #endif
22880
22881 .data
22882 .balign 4
22883 ENTRY(stack_start)
22884- .long init_thread_union+THREAD_SIZE
22885+ .long init_thread_union+THREAD_SIZE-8
22886
22887 __INITRODATA
22888 int_msg:
22889@@ -744,7 +851,7 @@ fault_msg:
22890 * segment size, and 32-bit linear address value:
22891 */
22892
22893- .data
22894+.section .rodata,"a",@progbits
22895 .globl boot_gdt_descr
22896 .globl idt_descr
22897
22898@@ -753,7 +860,7 @@ fault_msg:
22899 .word 0 # 32 bit align gdt_desc.address
22900 boot_gdt_descr:
22901 .word __BOOT_DS+7
22902- .long boot_gdt - __PAGE_OFFSET
22903+ .long pa(boot_gdt)
22904
22905 .word 0 # 32-bit align idt_desc.address
22906 idt_descr:
22907@@ -764,7 +871,7 @@ idt_descr:
22908 .word 0 # 32 bit align gdt_desc.address
22909 ENTRY(early_gdt_descr)
22910 .word GDT_ENTRIES*8-1
22911- .long gdt_page /* Overwritten for secondary CPUs */
22912+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
22913
22914 /*
22915 * The boot_gdt must mirror the equivalent in setup.S and is
22916@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
22917 .align L1_CACHE_BYTES
22918 ENTRY(boot_gdt)
22919 .fill GDT_ENTRY_BOOT_CS,8,0
22920- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
22921- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
22922+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
22923+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
22924+
22925+ .align PAGE_SIZE_asm
22926+ENTRY(cpu_gdt_table)
22927+ .rept NR_CPUS
22928+ .quad 0x0000000000000000 /* NULL descriptor */
22929+ .quad 0x0000000000000000 /* 0x0b reserved */
22930+ .quad 0x0000000000000000 /* 0x13 reserved */
22931+ .quad 0x0000000000000000 /* 0x1b reserved */
22932+
22933+#ifdef CONFIG_PAX_KERNEXEC
22934+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
22935+#else
22936+ .quad 0x0000000000000000 /* 0x20 unused */
22937+#endif
22938+
22939+ .quad 0x0000000000000000 /* 0x28 unused */
22940+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
22941+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
22942+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
22943+ .quad 0x0000000000000000 /* 0x4b reserved */
22944+ .quad 0x0000000000000000 /* 0x53 reserved */
22945+ .quad 0x0000000000000000 /* 0x5b reserved */
22946+
22947+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
22948+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
22949+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
22950+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
22951+
22952+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
22953+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
22954+
22955+ /*
22956+ * Segments used for calling PnP BIOS have byte granularity.
22957+ * The code segments and data segments have fixed 64k limits,
22958+ * the transfer segment sizes are set at run time.
22959+ */
22960+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
22961+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
22962+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
22963+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
22964+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
22965+
22966+ /*
22967+ * The APM segments have byte granularity and their bases
22968+ * are set at run time. All have 64k limits.
22969+ */
22970+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
22971+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
22972+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
22973+
22974+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
22975+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
22976+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
22977+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
22978+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
22979+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
22980+
22981+ /* Be sure this is zeroed to avoid false validations in Xen */
22982+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
22983+ .endr
22984diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
22985index a836860..1b5c665 100644
22986--- a/arch/x86/kernel/head_64.S
22987+++ b/arch/x86/kernel/head_64.S
22988@@ -20,6 +20,8 @@
22989 #include <asm/processor-flags.h>
22990 #include <asm/percpu.h>
22991 #include <asm/nops.h>
22992+#include <asm/cpufeature.h>
22993+#include <asm/alternative-asm.h>
22994
22995 #ifdef CONFIG_PARAVIRT
22996 #include <asm/asm-offsets.h>
22997@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
22998 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
22999 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
23000 L3_START_KERNEL = pud_index(__START_KERNEL_map)
23001+L4_VMALLOC_START = pgd_index(VMALLOC_START)
23002+L3_VMALLOC_START = pud_index(VMALLOC_START)
23003+L4_VMALLOC_END = pgd_index(VMALLOC_END)
23004+L3_VMALLOC_END = pud_index(VMALLOC_END)
23005+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
23006+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
23007
23008 .text
23009 __HEAD
23010@@ -89,11 +97,23 @@ startup_64:
23011 * Fixup the physical addresses in the page table
23012 */
23013 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23014+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23015+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23016+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23017+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23018+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23019
23020- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23021- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23022+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
23023+#ifndef CONFIG_XEN
23024+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
23025+#endif
23026
23027- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
23028+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
23029+
23030+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
23031+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
23032+
23033+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
23034
23035 /*
23036 * Set up the identity mapping for the switchover. These
23037@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
23038 movq $(init_level4_pgt - __START_KERNEL_map), %rax
23039 1:
23040
23041- /* Enable PAE mode and PGE */
23042- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
23043+ /* Enable PAE mode and PSE/PGE */
23044+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
23045 movq %rcx, %cr4
23046
23047 /* Setup early boot stage 4 level pagetables. */
23048@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
23049 movl $MSR_EFER, %ecx
23050 rdmsr
23051 btsl $_EFER_SCE, %eax /* Enable System Call */
23052- btl $20,%edi /* No Execute supported? */
23053+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
23054 jnc 1f
23055 btsl $_EFER_NX, %eax
23056 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
23057+ leaq init_level4_pgt(%rip), %rdi
23058+#ifndef CONFIG_EFI
23059+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
23060+#endif
23061+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
23062+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
23063+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
23064+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
23065 1: wrmsr /* Make changes effective */
23066
23067 /* Setup cr0 */
23068@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
23069 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
23070 * address given in m16:64.
23071 */
23072+ pax_set_fptr_mask
23073 movq initial_code(%rip),%rax
23074 pushq $0 # fake return address to stop unwinder
23075 pushq $__KERNEL_CS # set correct cs
23076@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
23077 call dump_stack
23078 #ifdef CONFIG_KALLSYMS
23079 leaq early_idt_ripmsg(%rip),%rdi
23080- movq 40(%rsp),%rsi # %rip again
23081+ movq 88(%rsp),%rsi # %rip again
23082 call __print_symbol
23083 #endif
23084 #endif /* EARLY_PRINTK */
23085@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
23086 early_recursion_flag:
23087 .long 0
23088
23089+ .section .rodata,"a",@progbits
23090 #ifdef CONFIG_EARLY_PRINTK
23091 early_idt_msg:
23092 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
23093@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
23094 NEXT_PAGE(early_dynamic_pgts)
23095 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
23096
23097- .data
23098+ .section .rodata,"a",@progbits
23099
23100-#ifndef CONFIG_XEN
23101 NEXT_PAGE(init_level4_pgt)
23102- .fill 512,8,0
23103-#else
23104-NEXT_PAGE(init_level4_pgt)
23105- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23106 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
23107 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23108+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
23109+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
23110+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
23111+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
23112+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
23113+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23114 .org init_level4_pgt + L4_START_KERNEL*8, 0
23115 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
23116 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
23117
23118+#ifdef CONFIG_PAX_PER_CPU_PGD
23119+NEXT_PAGE(cpu_pgd)
23120+ .rept 2*NR_CPUS
23121+ .fill 512,8,0
23122+ .endr
23123+#endif
23124+
23125 NEXT_PAGE(level3_ident_pgt)
23126 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23127+#ifdef CONFIG_XEN
23128 .fill 511, 8, 0
23129+#else
23130+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
23131+ .fill 510,8,0
23132+#endif
23133+
23134+NEXT_PAGE(level3_vmalloc_start_pgt)
23135+ .fill 512,8,0
23136+
23137+NEXT_PAGE(level3_vmalloc_end_pgt)
23138+ .fill 512,8,0
23139+
23140+NEXT_PAGE(level3_vmemmap_pgt)
23141+ .fill L3_VMEMMAP_START,8,0
23142+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23143+
23144 NEXT_PAGE(level2_ident_pgt)
23145- /* Since I easily can, map the first 1G.
23146+ /* Since I easily can, map the first 2G.
23147 * Don't set NX because code runs from these pages.
23148 */
23149- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
23150-#endif
23151+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
23152
23153 NEXT_PAGE(level3_kernel_pgt)
23154 .fill L3_START_KERNEL,8,0
23155@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
23156 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
23157 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23158
23159+NEXT_PAGE(level2_vmemmap_pgt)
23160+ .fill 512,8,0
23161+
23162 NEXT_PAGE(level2_kernel_pgt)
23163 /*
23164 * 512 MB kernel mapping. We spend a full page on this pagetable
23165@@ -488,39 +544,70 @@ NEXT_PAGE(level2_kernel_pgt)
23166 KERNEL_IMAGE_SIZE/PMD_SIZE)
23167
23168 NEXT_PAGE(level2_fixmap_pgt)
23169- .fill 506,8,0
23170- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23171- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
23172- .fill 5,8,0
23173+ .fill 507,8,0
23174+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
23175+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
23176+ .fill 4,8,0
23177
23178-NEXT_PAGE(level1_fixmap_pgt)
23179+NEXT_PAGE(level1_vsyscall_pgt)
23180 .fill 512,8,0
23181
23182 #undef PMDS
23183
23184- .data
23185+ .align PAGE_SIZE
23186+ENTRY(cpu_gdt_table)
23187+ .rept NR_CPUS
23188+ .quad 0x0000000000000000 /* NULL descriptor */
23189+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
23190+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
23191+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
23192+ .quad 0x00cffb000000ffff /* __USER32_CS */
23193+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
23194+ .quad 0x00affb000000ffff /* __USER_CS */
23195+
23196+#ifdef CONFIG_PAX_KERNEXEC
23197+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
23198+#else
23199+ .quad 0x0 /* unused */
23200+#endif
23201+
23202+ .quad 0,0 /* TSS */
23203+ .quad 0,0 /* LDT */
23204+ .quad 0,0,0 /* three TLS descriptors */
23205+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
23206+ /* asm/segment.h:GDT_ENTRIES must match this */
23207+
23208+#ifdef CONFIG_PAX_MEMORY_UDEREF
23209+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
23210+#else
23211+ .quad 0x0 /* unused */
23212+#endif
23213+
23214+ /* zero the remaining page */
23215+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
23216+ .endr
23217+
23218 .align 16
23219 .globl early_gdt_descr
23220 early_gdt_descr:
23221 .word GDT_ENTRIES*8-1
23222 early_gdt_descr_base:
23223- .quad INIT_PER_CPU_VAR(gdt_page)
23224+ .quad cpu_gdt_table
23225
23226 ENTRY(phys_base)
23227 /* This must match the first entry in level2_kernel_pgt */
23228 .quad 0x0000000000000000
23229
23230 #include "../../x86/xen/xen-head.S"
23231-
23232- .section .bss, "aw", @nobits
23233+
23234+ .section .rodata,"a",@progbits
23235+NEXT_PAGE(empty_zero_page)
23236+ .skip PAGE_SIZE
23237+
23238 .align PAGE_SIZE
23239 ENTRY(idt_table)
23240- .skip IDT_ENTRIES * 16
23241+ .fill 512,8,0
23242
23243 .align L1_CACHE_BYTES
23244 ENTRY(nmi_idt_table)
23245- .skip IDT_ENTRIES * 16
23246-
23247- __PAGE_ALIGNED_BSS
23248-NEXT_PAGE(empty_zero_page)
23249- .skip PAGE_SIZE
23250+ .fill 512,8,0
23251diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
23252index 0fa6912..b37438b 100644
23253--- a/arch/x86/kernel/i386_ksyms_32.c
23254+++ b/arch/x86/kernel/i386_ksyms_32.c
23255@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
23256 EXPORT_SYMBOL(cmpxchg8b_emu);
23257 #endif
23258
23259+EXPORT_SYMBOL_GPL(cpu_gdt_table);
23260+
23261 /* Networking helper routines. */
23262 EXPORT_SYMBOL(csum_partial_copy_generic);
23263+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
23264+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
23265
23266 EXPORT_SYMBOL(__get_user_1);
23267 EXPORT_SYMBOL(__get_user_2);
23268@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
23269
23270 EXPORT_SYMBOL(csum_partial);
23271 EXPORT_SYMBOL(empty_zero_page);
23272+
23273+#ifdef CONFIG_PAX_KERNEXEC
23274+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
23275+#endif
23276+
23277+#ifdef CONFIG_PAX_PER_CPU_PGD
23278+EXPORT_SYMBOL(cpu_pgd);
23279+#endif
23280diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
23281index f7ea30d..6318acc 100644
23282--- a/arch/x86/kernel/i387.c
23283+++ b/arch/x86/kernel/i387.c
23284@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
23285 static inline bool interrupted_user_mode(void)
23286 {
23287 struct pt_regs *regs = get_irq_regs();
23288- return regs && user_mode_vm(regs);
23289+ return regs && user_mode(regs);
23290 }
23291
23292 /*
23293diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
23294index 9a5c460..84868423 100644
23295--- a/arch/x86/kernel/i8259.c
23296+++ b/arch/x86/kernel/i8259.c
23297@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
23298 static void make_8259A_irq(unsigned int irq)
23299 {
23300 disable_irq_nosync(irq);
23301- io_apic_irqs &= ~(1<<irq);
23302+ io_apic_irqs &= ~(1UL<<irq);
23303 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
23304 i8259A_chip.name);
23305 enable_irq(irq);
23306@@ -209,7 +209,7 @@ spurious_8259A_irq:
23307 "spurious 8259A interrupt: IRQ%d.\n", irq);
23308 spurious_irq_mask |= irqmask;
23309 }
23310- atomic_inc(&irq_err_count);
23311+ atomic_inc_unchecked(&irq_err_count);
23312 /*
23313 * Theoretically we do not have to handle this IRQ,
23314 * but in Linux this does not cause problems and is
23315@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
23316 /* (slave's support for AEOI in flat mode is to be investigated) */
23317 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
23318
23319+ pax_open_kernel();
23320 if (auto_eoi)
23321 /*
23322 * In AEOI mode we just have to mask the interrupt
23323 * when acking.
23324 */
23325- i8259A_chip.irq_mask_ack = disable_8259A_irq;
23326+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
23327 else
23328- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
23329+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
23330+ pax_close_kernel();
23331
23332 udelay(100); /* wait for 8259A to initialize */
23333
23334diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
23335index a979b5b..1d6db75 100644
23336--- a/arch/x86/kernel/io_delay.c
23337+++ b/arch/x86/kernel/io_delay.c
23338@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
23339 * Quirk table for systems that misbehave (lock up, etc.) if port
23340 * 0x80 is used:
23341 */
23342-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
23343+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
23344 {
23345 .callback = dmi_io_delay_0xed_port,
23346 .ident = "Compaq Presario V6000",
23347diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
23348index 4ddaf66..6292f4e 100644
23349--- a/arch/x86/kernel/ioport.c
23350+++ b/arch/x86/kernel/ioport.c
23351@@ -6,6 +6,7 @@
23352 #include <linux/sched.h>
23353 #include <linux/kernel.h>
23354 #include <linux/capability.h>
23355+#include <linux/security.h>
23356 #include <linux/errno.h>
23357 #include <linux/types.h>
23358 #include <linux/ioport.h>
23359@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
23360
23361 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
23362 return -EINVAL;
23363+#ifdef CONFIG_GRKERNSEC_IO
23364+ if (turn_on && grsec_disable_privio) {
23365+ gr_handle_ioperm();
23366+ return -EPERM;
23367+ }
23368+#endif
23369 if (turn_on && !capable(CAP_SYS_RAWIO))
23370 return -EPERM;
23371
23372@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
23373 * because the ->io_bitmap_max value must match the bitmap
23374 * contents:
23375 */
23376- tss = &per_cpu(init_tss, get_cpu());
23377+ tss = init_tss + get_cpu();
23378
23379 if (turn_on)
23380 bitmap_clear(t->io_bitmap_ptr, from, num);
23381@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
23382 return -EINVAL;
23383 /* Trying to gain more privileges? */
23384 if (level > old) {
23385+#ifdef CONFIG_GRKERNSEC_IO
23386+ if (grsec_disable_privio) {
23387+ gr_handle_iopl();
23388+ return -EPERM;
23389+ }
23390+#endif
23391 if (!capable(CAP_SYS_RAWIO))
23392 return -EPERM;
23393 }
23394diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
23395index ac0631d..ff7cb62 100644
23396--- a/arch/x86/kernel/irq.c
23397+++ b/arch/x86/kernel/irq.c
23398@@ -18,7 +18,7 @@
23399 #include <asm/mce.h>
23400 #include <asm/hw_irq.h>
23401
23402-atomic_t irq_err_count;
23403+atomic_unchecked_t irq_err_count;
23404
23405 /* Function pointer for generic interrupt vector handling */
23406 void (*x86_platform_ipi_callback)(void) = NULL;
23407@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
23408 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
23409 seq_printf(p, " Machine check polls\n");
23410 #endif
23411- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
23412+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
23413 #if defined(CONFIG_X86_IO_APIC)
23414- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
23415+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
23416 #endif
23417 return 0;
23418 }
23419@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
23420
23421 u64 arch_irq_stat(void)
23422 {
23423- u64 sum = atomic_read(&irq_err_count);
23424+ u64 sum = atomic_read_unchecked(&irq_err_count);
23425 return sum;
23426 }
23427
23428diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
23429index 344faf8..355f60d 100644
23430--- a/arch/x86/kernel/irq_32.c
23431+++ b/arch/x86/kernel/irq_32.c
23432@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
23433 __asm__ __volatile__("andl %%esp,%0" :
23434 "=r" (sp) : "0" (THREAD_SIZE - 1));
23435
23436- return sp < (sizeof(struct thread_info) + STACK_WARN);
23437+ return sp < STACK_WARN;
23438 }
23439
23440 static void print_stack_overflow(void)
23441@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
23442 * per-CPU IRQ handling contexts (thread information and stack)
23443 */
23444 union irq_ctx {
23445- struct thread_info tinfo;
23446- u32 stack[THREAD_SIZE/sizeof(u32)];
23447+ unsigned long previous_esp;
23448+ u32 stack[THREAD_SIZE/sizeof(u32)];
23449 } __attribute__((aligned(THREAD_SIZE)));
23450
23451 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
23452@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
23453 static inline int
23454 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23455 {
23456- union irq_ctx *curctx, *irqctx;
23457+ union irq_ctx *irqctx;
23458 u32 *isp, arg1, arg2;
23459
23460- curctx = (union irq_ctx *) current_thread_info();
23461 irqctx = __this_cpu_read(hardirq_ctx);
23462
23463 /*
23464@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23465 * handler) we can't do that and just have to keep using the
23466 * current stack (which is the irq stack already after all)
23467 */
23468- if (unlikely(curctx == irqctx))
23469+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
23470 return 0;
23471
23472 /* build the stack frame on the IRQ stack */
23473- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
23474- irqctx->tinfo.task = curctx->tinfo.task;
23475- irqctx->tinfo.previous_esp = current_stack_pointer;
23476+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
23477+ irqctx->previous_esp = current_stack_pointer;
23478
23479- /* Copy the preempt_count so that the [soft]irq checks work. */
23480- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
23481+#ifdef CONFIG_PAX_MEMORY_UDEREF
23482+ __set_fs(MAKE_MM_SEG(0));
23483+#endif
23484
23485 if (unlikely(overflow))
23486 call_on_stack(print_stack_overflow, isp);
23487@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23488 : "0" (irq), "1" (desc), "2" (isp),
23489 "D" (desc->handle_irq)
23490 : "memory", "cc", "ecx");
23491+
23492+#ifdef CONFIG_PAX_MEMORY_UDEREF
23493+ __set_fs(current_thread_info()->addr_limit);
23494+#endif
23495+
23496 return 1;
23497 }
23498
23499@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23500 */
23501 void __cpuinit irq_ctx_init(int cpu)
23502 {
23503- union irq_ctx *irqctx;
23504-
23505 if (per_cpu(hardirq_ctx, cpu))
23506 return;
23507
23508- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
23509- THREADINFO_GFP,
23510- THREAD_SIZE_ORDER));
23511- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
23512- irqctx->tinfo.cpu = cpu;
23513- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
23514- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
23515-
23516- per_cpu(hardirq_ctx, cpu) = irqctx;
23517-
23518- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
23519- THREADINFO_GFP,
23520- THREAD_SIZE_ORDER));
23521- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
23522- irqctx->tinfo.cpu = cpu;
23523- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
23524-
23525- per_cpu(softirq_ctx, cpu) = irqctx;
23526+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
23527+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
23528+
23529+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
23530+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
23531
23532 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
23533 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
23534@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
23535 asmlinkage void do_softirq(void)
23536 {
23537 unsigned long flags;
23538- struct thread_info *curctx;
23539 union irq_ctx *irqctx;
23540 u32 *isp;
23541
23542@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
23543 local_irq_save(flags);
23544
23545 if (local_softirq_pending()) {
23546- curctx = current_thread_info();
23547 irqctx = __this_cpu_read(softirq_ctx);
23548- irqctx->tinfo.task = curctx->task;
23549- irqctx->tinfo.previous_esp = current_stack_pointer;
23550+ irqctx->previous_esp = current_stack_pointer;
23551
23552 /* build the stack frame on the softirq stack */
23553- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
23554+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
23555+
23556+#ifdef CONFIG_PAX_MEMORY_UDEREF
23557+ __set_fs(MAKE_MM_SEG(0));
23558+#endif
23559
23560 call_on_stack(__do_softirq, isp);
23561+
23562+#ifdef CONFIG_PAX_MEMORY_UDEREF
23563+ __set_fs(current_thread_info()->addr_limit);
23564+#endif
23565+
23566 /*
23567 * Shouldn't happen, we returned above if in_interrupt():
23568 */
23569@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
23570 if (unlikely(!desc))
23571 return false;
23572
23573- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
23574+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
23575 if (unlikely(overflow))
23576 print_stack_overflow();
23577 desc->handle_irq(irq, desc);
23578diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
23579index d04d3ec..ea4b374 100644
23580--- a/arch/x86/kernel/irq_64.c
23581+++ b/arch/x86/kernel/irq_64.c
23582@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
23583 u64 estack_top, estack_bottom;
23584 u64 curbase = (u64)task_stack_page(current);
23585
23586- if (user_mode_vm(regs))
23587+ if (user_mode(regs))
23588 return;
23589
23590 if (regs->sp >= curbase + sizeof(struct thread_info) +
23591diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
23592index dc1404b..bbc43e7 100644
23593--- a/arch/x86/kernel/kdebugfs.c
23594+++ b/arch/x86/kernel/kdebugfs.c
23595@@ -27,7 +27,7 @@ struct setup_data_node {
23596 u32 len;
23597 };
23598
23599-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
23600+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
23601 size_t count, loff_t *ppos)
23602 {
23603 struct setup_data_node *node = file->private_data;
23604diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
23605index 836f832..a8bda67 100644
23606--- a/arch/x86/kernel/kgdb.c
23607+++ b/arch/x86/kernel/kgdb.c
23608@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
23609 #ifdef CONFIG_X86_32
23610 switch (regno) {
23611 case GDB_SS:
23612- if (!user_mode_vm(regs))
23613+ if (!user_mode(regs))
23614 *(unsigned long *)mem = __KERNEL_DS;
23615 break;
23616 case GDB_SP:
23617- if (!user_mode_vm(regs))
23618+ if (!user_mode(regs))
23619 *(unsigned long *)mem = kernel_stack_pointer(regs);
23620 break;
23621 case GDB_GS:
23622@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
23623 bp->attr.bp_addr = breakinfo[breakno].addr;
23624 bp->attr.bp_len = breakinfo[breakno].len;
23625 bp->attr.bp_type = breakinfo[breakno].type;
23626- info->address = breakinfo[breakno].addr;
23627+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
23628+ info->address = ktla_ktva(breakinfo[breakno].addr);
23629+ else
23630+ info->address = breakinfo[breakno].addr;
23631 info->len = breakinfo[breakno].len;
23632 info->type = breakinfo[breakno].type;
23633 val = arch_install_hw_breakpoint(bp);
23634@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
23635 case 'k':
23636 /* clear the trace bit */
23637 linux_regs->flags &= ~X86_EFLAGS_TF;
23638- atomic_set(&kgdb_cpu_doing_single_step, -1);
23639+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
23640
23641 /* set the trace bit if we're stepping */
23642 if (remcomInBuffer[0] == 's') {
23643 linux_regs->flags |= X86_EFLAGS_TF;
23644- atomic_set(&kgdb_cpu_doing_single_step,
23645+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
23646 raw_smp_processor_id());
23647 }
23648
23649@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
23650
23651 switch (cmd) {
23652 case DIE_DEBUG:
23653- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
23654+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
23655 if (user_mode(regs))
23656 return single_step_cont(regs, args);
23657 break;
23658@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
23659 #endif /* CONFIG_DEBUG_RODATA */
23660
23661 bpt->type = BP_BREAKPOINT;
23662- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
23663+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
23664 BREAK_INSTR_SIZE);
23665 if (err)
23666 return err;
23667- err = probe_kernel_write((char *)bpt->bpt_addr,
23668+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
23669 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
23670 #ifdef CONFIG_DEBUG_RODATA
23671 if (!err)
23672@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
23673 return -EBUSY;
23674 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
23675 BREAK_INSTR_SIZE);
23676- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
23677+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
23678 if (err)
23679 return err;
23680 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
23681@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
23682 if (mutex_is_locked(&text_mutex))
23683 goto knl_write;
23684 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
23685- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
23686+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
23687 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
23688 goto knl_write;
23689 return err;
23690 knl_write:
23691 #endif /* CONFIG_DEBUG_RODATA */
23692- return probe_kernel_write((char *)bpt->bpt_addr,
23693+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
23694 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
23695 }
23696
23697diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
23698index 211bce4..6e2580a 100644
23699--- a/arch/x86/kernel/kprobes/core.c
23700+++ b/arch/x86/kernel/kprobes/core.c
23701@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
23702 s32 raddr;
23703 } __packed *insn;
23704
23705- insn = (struct __arch_relative_insn *)from;
23706+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
23707+
23708+ pax_open_kernel();
23709 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
23710 insn->op = op;
23711+ pax_close_kernel();
23712 }
23713
23714 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
23715@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
23716 kprobe_opcode_t opcode;
23717 kprobe_opcode_t *orig_opcodes = opcodes;
23718
23719- if (search_exception_tables((unsigned long)opcodes))
23720+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
23721 return 0; /* Page fault may occur on this address. */
23722
23723 retry:
23724@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
23725 * for the first byte, we can recover the original instruction
23726 * from it and kp->opcode.
23727 */
23728- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
23729+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
23730 buf[0] = kp->opcode;
23731- return (unsigned long)buf;
23732+ return ktva_ktla((unsigned long)buf);
23733 }
23734
23735 /*
23736@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
23737 /* Another subsystem puts a breakpoint, failed to recover */
23738 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
23739 return 0;
23740+ pax_open_kernel();
23741 memcpy(dest, insn.kaddr, insn.length);
23742+ pax_close_kernel();
23743
23744 #ifdef CONFIG_X86_64
23745 if (insn_rip_relative(&insn)) {
23746@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
23747 return 0;
23748 }
23749 disp = (u8 *) dest + insn_offset_displacement(&insn);
23750+ pax_open_kernel();
23751 *(s32 *) disp = (s32) newdisp;
23752+ pax_close_kernel();
23753 }
23754 #endif
23755 return insn.length;
23756@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23757 * nor set current_kprobe, because it doesn't use single
23758 * stepping.
23759 */
23760- regs->ip = (unsigned long)p->ainsn.insn;
23761+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23762 preempt_enable_no_resched();
23763 return;
23764 }
23765@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23766 regs->flags &= ~X86_EFLAGS_IF;
23767 /* single step inline if the instruction is an int3 */
23768 if (p->opcode == BREAKPOINT_INSTRUCTION)
23769- regs->ip = (unsigned long)p->addr;
23770+ regs->ip = ktla_ktva((unsigned long)p->addr);
23771 else
23772- regs->ip = (unsigned long)p->ainsn.insn;
23773+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23774 }
23775
23776 /*
23777@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
23778 setup_singlestep(p, regs, kcb, 0);
23779 return 1;
23780 }
23781- } else if (*addr != BREAKPOINT_INSTRUCTION) {
23782+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
23783 /*
23784 * The breakpoint instruction was removed right
23785 * after we hit it. Another cpu has removed
23786@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
23787 " movq %rax, 152(%rsp)\n"
23788 RESTORE_REGS_STRING
23789 " popfq\n"
23790+#ifdef KERNEXEC_PLUGIN
23791+ " btsq $63,(%rsp)\n"
23792+#endif
23793 #else
23794 " pushf\n"
23795 SAVE_REGS_STRING
23796@@ -779,7 +789,7 @@ static void __kprobes
23797 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
23798 {
23799 unsigned long *tos = stack_addr(regs);
23800- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
23801+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
23802 unsigned long orig_ip = (unsigned long)p->addr;
23803 kprobe_opcode_t *insn = p->ainsn.insn;
23804
23805@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
23806 struct die_args *args = data;
23807 int ret = NOTIFY_DONE;
23808
23809- if (args->regs && user_mode_vm(args->regs))
23810+ if (args->regs && user_mode(args->regs))
23811 return ret;
23812
23813 switch (val) {
23814diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
23815index 76dc6f0..66bdfc3 100644
23816--- a/arch/x86/kernel/kprobes/opt.c
23817+++ b/arch/x86/kernel/kprobes/opt.c
23818@@ -79,6 +79,7 @@ found:
23819 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
23820 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
23821 {
23822+ pax_open_kernel();
23823 #ifdef CONFIG_X86_64
23824 *addr++ = 0x48;
23825 *addr++ = 0xbf;
23826@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
23827 *addr++ = 0xb8;
23828 #endif
23829 *(unsigned long *)addr = val;
23830+ pax_close_kernel();
23831 }
23832
23833 static void __used __kprobes kprobes_optinsn_template_holder(void)
23834@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23835 * Verify if the address gap is in 2GB range, because this uses
23836 * a relative jump.
23837 */
23838- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
23839+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
23840 if (abs(rel) > 0x7fffffff)
23841 return -ERANGE;
23842
23843@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23844 op->optinsn.size = ret;
23845
23846 /* Copy arch-dep-instance from template */
23847- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
23848+ pax_open_kernel();
23849+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
23850+ pax_close_kernel();
23851
23852 /* Set probe information */
23853 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
23854
23855 /* Set probe function call */
23856- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
23857+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
23858
23859 /* Set returning jmp instruction at the tail of out-of-line buffer */
23860- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
23861+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
23862 (u8 *)op->kp.addr + op->optinsn.size);
23863
23864 flush_icache_range((unsigned long) buf,
23865@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
23866 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
23867
23868 /* Backup instructions which will be replaced by jump address */
23869- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
23870+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
23871 RELATIVE_ADDR_SIZE);
23872
23873 insn_buf[0] = RELATIVEJUMP_OPCODE;
23874@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
23875 /* This kprobe is really able to run optimized path. */
23876 op = container_of(p, struct optimized_kprobe, kp);
23877 /* Detour through copied instructions */
23878- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
23879+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
23880 if (!reenter)
23881 reset_current_kprobe();
23882 preempt_enable_no_resched();
23883diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
23884index cd6d9a5..16245a4 100644
23885--- a/arch/x86/kernel/kvm.c
23886+++ b/arch/x86/kernel/kvm.c
23887@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
23888 return NOTIFY_OK;
23889 }
23890
23891-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
23892+static struct notifier_block kvm_cpu_notifier = {
23893 .notifier_call = kvm_cpu_notify,
23894 };
23895 #endif
23896diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
23897index ebc9873..1b9724b 100644
23898--- a/arch/x86/kernel/ldt.c
23899+++ b/arch/x86/kernel/ldt.c
23900@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
23901 if (reload) {
23902 #ifdef CONFIG_SMP
23903 preempt_disable();
23904- load_LDT(pc);
23905+ load_LDT_nolock(pc);
23906 if (!cpumask_equal(mm_cpumask(current->mm),
23907 cpumask_of(smp_processor_id())))
23908 smp_call_function(flush_ldt, current->mm, 1);
23909 preempt_enable();
23910 #else
23911- load_LDT(pc);
23912+ load_LDT_nolock(pc);
23913 #endif
23914 }
23915 if (oldsize) {
23916@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
23917 return err;
23918
23919 for (i = 0; i < old->size; i++)
23920- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
23921+ write_ldt_entry(new->ldt, i, old->ldt + i);
23922 return 0;
23923 }
23924
23925@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23926 retval = copy_ldt(&mm->context, &old_mm->context);
23927 mutex_unlock(&old_mm->context.lock);
23928 }
23929+
23930+ if (tsk == current) {
23931+ mm->context.vdso = 0;
23932+
23933+#ifdef CONFIG_X86_32
23934+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23935+ mm->context.user_cs_base = 0UL;
23936+ mm->context.user_cs_limit = ~0UL;
23937+
23938+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
23939+ cpus_clear(mm->context.cpu_user_cs_mask);
23940+#endif
23941+
23942+#endif
23943+#endif
23944+
23945+ }
23946+
23947 return retval;
23948 }
23949
23950@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
23951 }
23952 }
23953
23954+#ifdef CONFIG_PAX_SEGMEXEC
23955+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
23956+ error = -EINVAL;
23957+ goto out_unlock;
23958+ }
23959+#endif
23960+
23961 fill_ldt(&ldt, &ldt_info);
23962 if (oldmode)
23963 ldt.avl = 0;
23964diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
23965index 5b19e4d..6476a76 100644
23966--- a/arch/x86/kernel/machine_kexec_32.c
23967+++ b/arch/x86/kernel/machine_kexec_32.c
23968@@ -26,7 +26,7 @@
23969 #include <asm/cacheflush.h>
23970 #include <asm/debugreg.h>
23971
23972-static void set_idt(void *newidt, __u16 limit)
23973+static void set_idt(struct desc_struct *newidt, __u16 limit)
23974 {
23975 struct desc_ptr curidt;
23976
23977@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
23978 }
23979
23980
23981-static void set_gdt(void *newgdt, __u16 limit)
23982+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
23983 {
23984 struct desc_ptr curgdt;
23985
23986@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
23987 }
23988
23989 control_page = page_address(image->control_code_page);
23990- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
23991+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
23992
23993 relocate_kernel_ptr = control_page;
23994 page_list[PA_CONTROL_PAGE] = __pa(control_page);
23995diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
23996index 22db92b..d546bec 100644
23997--- a/arch/x86/kernel/microcode_core.c
23998+++ b/arch/x86/kernel/microcode_core.c
23999@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
24000 return NOTIFY_OK;
24001 }
24002
24003-static struct notifier_block __refdata mc_cpu_notifier = {
24004+static struct notifier_block mc_cpu_notifier = {
24005 .notifier_call = mc_cpu_callback,
24006 };
24007
24008diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
24009index 5fb2ceb..3ae90bb 100644
24010--- a/arch/x86/kernel/microcode_intel.c
24011+++ b/arch/x86/kernel/microcode_intel.c
24012@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24013
24014 static int get_ucode_user(void *to, const void *from, size_t n)
24015 {
24016- return copy_from_user(to, from, n);
24017+ return copy_from_user(to, (const void __force_user *)from, n);
24018 }
24019
24020 static enum ucode_state
24021 request_microcode_user(int cpu, const void __user *buf, size_t size)
24022 {
24023- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24024+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24025 }
24026
24027 static void microcode_fini_cpu(int cpu)
24028diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24029index 216a4d7..228255a 100644
24030--- a/arch/x86/kernel/module.c
24031+++ b/arch/x86/kernel/module.c
24032@@ -43,15 +43,60 @@ do { \
24033 } while (0)
24034 #endif
24035
24036-void *module_alloc(unsigned long size)
24037+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24038 {
24039- if (PAGE_ALIGN(size) > MODULES_LEN)
24040+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
24041 return NULL;
24042 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
24043- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
24044+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
24045 -1, __builtin_return_address(0));
24046 }
24047
24048+void *module_alloc(unsigned long size)
24049+{
24050+
24051+#ifdef CONFIG_PAX_KERNEXEC
24052+ return __module_alloc(size, PAGE_KERNEL);
24053+#else
24054+ return __module_alloc(size, PAGE_KERNEL_EXEC);
24055+#endif
24056+
24057+}
24058+
24059+#ifdef CONFIG_PAX_KERNEXEC
24060+#ifdef CONFIG_X86_32
24061+void *module_alloc_exec(unsigned long size)
24062+{
24063+ struct vm_struct *area;
24064+
24065+ if (size == 0)
24066+ return NULL;
24067+
24068+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
24069+ return area ? area->addr : NULL;
24070+}
24071+EXPORT_SYMBOL(module_alloc_exec);
24072+
24073+void module_free_exec(struct module *mod, void *module_region)
24074+{
24075+ vunmap(module_region);
24076+}
24077+EXPORT_SYMBOL(module_free_exec);
24078+#else
24079+void module_free_exec(struct module *mod, void *module_region)
24080+{
24081+ module_free(mod, module_region);
24082+}
24083+EXPORT_SYMBOL(module_free_exec);
24084+
24085+void *module_alloc_exec(unsigned long size)
24086+{
24087+ return __module_alloc(size, PAGE_KERNEL_RX);
24088+}
24089+EXPORT_SYMBOL(module_alloc_exec);
24090+#endif
24091+#endif
24092+
24093 #ifdef CONFIG_X86_32
24094 int apply_relocate(Elf32_Shdr *sechdrs,
24095 const char *strtab,
24096@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24097 unsigned int i;
24098 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
24099 Elf32_Sym *sym;
24100- uint32_t *location;
24101+ uint32_t *plocation, location;
24102
24103 DEBUGP("Applying relocate section %u to %u\n",
24104 relsec, sechdrs[relsec].sh_info);
24105 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
24106 /* This is where to make the change */
24107- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
24108- + rel[i].r_offset;
24109+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
24110+ location = (uint32_t)plocation;
24111+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
24112+ plocation = ktla_ktva((void *)plocation);
24113 /* This is the symbol it is referring to. Note that all
24114 undefined symbols have been resolved. */
24115 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
24116@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24117 switch (ELF32_R_TYPE(rel[i].r_info)) {
24118 case R_386_32:
24119 /* We add the value into the location given */
24120- *location += sym->st_value;
24121+ pax_open_kernel();
24122+ *plocation += sym->st_value;
24123+ pax_close_kernel();
24124 break;
24125 case R_386_PC32:
24126 /* Add the value, subtract its position */
24127- *location += sym->st_value - (uint32_t)location;
24128+ pax_open_kernel();
24129+ *plocation += sym->st_value - location;
24130+ pax_close_kernel();
24131 break;
24132 default:
24133 pr_err("%s: Unknown relocation: %u\n",
24134@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
24135 case R_X86_64_NONE:
24136 break;
24137 case R_X86_64_64:
24138+ pax_open_kernel();
24139 *(u64 *)loc = val;
24140+ pax_close_kernel();
24141 break;
24142 case R_X86_64_32:
24143+ pax_open_kernel();
24144 *(u32 *)loc = val;
24145+ pax_close_kernel();
24146 if (val != *(u32 *)loc)
24147 goto overflow;
24148 break;
24149 case R_X86_64_32S:
24150+ pax_open_kernel();
24151 *(s32 *)loc = val;
24152+ pax_close_kernel();
24153 if ((s64)val != *(s32 *)loc)
24154 goto overflow;
24155 break;
24156 case R_X86_64_PC32:
24157 val -= (u64)loc;
24158+ pax_open_kernel();
24159 *(u32 *)loc = val;
24160+ pax_close_kernel();
24161+
24162 #if 0
24163 if ((s64)val != *(s32 *)loc)
24164 goto overflow;
24165diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
24166index ce13049..e2e9c3c 100644
24167--- a/arch/x86/kernel/msr.c
24168+++ b/arch/x86/kernel/msr.c
24169@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
24170 return notifier_from_errno(err);
24171 }
24172
24173-static struct notifier_block __refdata msr_class_cpu_notifier = {
24174+static struct notifier_block msr_class_cpu_notifier = {
24175 .notifier_call = msr_class_cpu_callback,
24176 };
24177
24178diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
24179index 6030805..2d33f21 100644
24180--- a/arch/x86/kernel/nmi.c
24181+++ b/arch/x86/kernel/nmi.c
24182@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
24183 return handled;
24184 }
24185
24186-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
24187+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
24188 {
24189 struct nmi_desc *desc = nmi_to_desc(type);
24190 unsigned long flags;
24191@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
24192 * event confuses some handlers (kdump uses this flag)
24193 */
24194 if (action->flags & NMI_FLAG_FIRST)
24195- list_add_rcu(&action->list, &desc->head);
24196+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
24197 else
24198- list_add_tail_rcu(&action->list, &desc->head);
24199+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
24200
24201 spin_unlock_irqrestore(&desc->lock, flags);
24202 return 0;
24203@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
24204 if (!strcmp(n->name, name)) {
24205 WARN(in_nmi(),
24206 "Trying to free NMI (%s) from NMI context!\n", n->name);
24207- list_del_rcu(&n->list);
24208+ pax_list_del_rcu((struct list_head *)&n->list);
24209 break;
24210 }
24211 }
24212@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
24213 dotraplinkage notrace __kprobes void
24214 do_nmi(struct pt_regs *regs, long error_code)
24215 {
24216+
24217+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24218+ if (!user_mode(regs)) {
24219+ unsigned long cs = regs->cs & 0xFFFF;
24220+ unsigned long ip = ktva_ktla(regs->ip);
24221+
24222+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
24223+ regs->ip = ip;
24224+ }
24225+#endif
24226+
24227 nmi_nesting_preprocess(regs);
24228
24229 nmi_enter();
24230diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
24231index 6d9582e..f746287 100644
24232--- a/arch/x86/kernel/nmi_selftest.c
24233+++ b/arch/x86/kernel/nmi_selftest.c
24234@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
24235 {
24236 /* trap all the unknown NMIs we may generate */
24237 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
24238- __initdata);
24239+ __initconst);
24240 }
24241
24242 static void __init cleanup_nmi_testsuite(void)
24243@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
24244 unsigned long timeout;
24245
24246 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
24247- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
24248+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
24249 nmi_fail = FAILURE;
24250 return;
24251 }
24252diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
24253index 676b8c7..870ba04 100644
24254--- a/arch/x86/kernel/paravirt-spinlocks.c
24255+++ b/arch/x86/kernel/paravirt-spinlocks.c
24256@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
24257 arch_spin_lock(lock);
24258 }
24259
24260-struct pv_lock_ops pv_lock_ops = {
24261+struct pv_lock_ops pv_lock_ops __read_only = {
24262 #ifdef CONFIG_SMP
24263 .spin_is_locked = __ticket_spin_is_locked,
24264 .spin_is_contended = __ticket_spin_is_contended,
24265diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
24266index cd6de64..27c6af0 100644
24267--- a/arch/x86/kernel/paravirt.c
24268+++ b/arch/x86/kernel/paravirt.c
24269@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
24270 {
24271 return x;
24272 }
24273+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24274+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
24275+#endif
24276
24277 void __init default_banner(void)
24278 {
24279@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
24280 if (opfunc == NULL)
24281 /* If there's no function, patch it with a ud2a (BUG) */
24282 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
24283- else if (opfunc == _paravirt_nop)
24284+ else if (opfunc == (void *)_paravirt_nop)
24285 /* If the operation is a nop, then nop the callsite */
24286 ret = paravirt_patch_nop();
24287
24288 /* identity functions just return their single argument */
24289- else if (opfunc == _paravirt_ident_32)
24290+ else if (opfunc == (void *)_paravirt_ident_32)
24291 ret = paravirt_patch_ident_32(insnbuf, len);
24292- else if (opfunc == _paravirt_ident_64)
24293+ else if (opfunc == (void *)_paravirt_ident_64)
24294 ret = paravirt_patch_ident_64(insnbuf, len);
24295+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24296+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
24297+ ret = paravirt_patch_ident_64(insnbuf, len);
24298+#endif
24299
24300 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
24301 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
24302@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
24303 if (insn_len > len || start == NULL)
24304 insn_len = len;
24305 else
24306- memcpy(insnbuf, start, insn_len);
24307+ memcpy(insnbuf, ktla_ktva(start), insn_len);
24308
24309 return insn_len;
24310 }
24311@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
24312 return this_cpu_read(paravirt_lazy_mode);
24313 }
24314
24315-struct pv_info pv_info = {
24316+struct pv_info pv_info __read_only = {
24317 .name = "bare hardware",
24318 .paravirt_enabled = 0,
24319 .kernel_rpl = 0,
24320@@ -315,16 +322,16 @@ struct pv_info pv_info = {
24321 #endif
24322 };
24323
24324-struct pv_init_ops pv_init_ops = {
24325+struct pv_init_ops pv_init_ops __read_only = {
24326 .patch = native_patch,
24327 };
24328
24329-struct pv_time_ops pv_time_ops = {
24330+struct pv_time_ops pv_time_ops __read_only = {
24331 .sched_clock = native_sched_clock,
24332 .steal_clock = native_steal_clock,
24333 };
24334
24335-struct pv_irq_ops pv_irq_ops = {
24336+struct pv_irq_ops pv_irq_ops __read_only = {
24337 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
24338 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
24339 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
24340@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
24341 #endif
24342 };
24343
24344-struct pv_cpu_ops pv_cpu_ops = {
24345+struct pv_cpu_ops pv_cpu_ops __read_only = {
24346 .cpuid = native_cpuid,
24347 .get_debugreg = native_get_debugreg,
24348 .set_debugreg = native_set_debugreg,
24349@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
24350 .end_context_switch = paravirt_nop,
24351 };
24352
24353-struct pv_apic_ops pv_apic_ops = {
24354+struct pv_apic_ops pv_apic_ops __read_only= {
24355 #ifdef CONFIG_X86_LOCAL_APIC
24356 .startup_ipi_hook = paravirt_nop,
24357 #endif
24358 };
24359
24360-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
24361+#ifdef CONFIG_X86_32
24362+#ifdef CONFIG_X86_PAE
24363+/* 64-bit pagetable entries */
24364+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
24365+#else
24366 /* 32-bit pagetable entries */
24367 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
24368+#endif
24369 #else
24370 /* 64-bit pagetable entries */
24371 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
24372 #endif
24373
24374-struct pv_mmu_ops pv_mmu_ops = {
24375+struct pv_mmu_ops pv_mmu_ops __read_only = {
24376
24377 .read_cr2 = native_read_cr2,
24378 .write_cr2 = native_write_cr2,
24379@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
24380 .make_pud = PTE_IDENT,
24381
24382 .set_pgd = native_set_pgd,
24383+ .set_pgd_batched = native_set_pgd_batched,
24384 #endif
24385 #endif /* PAGETABLE_LEVELS >= 3 */
24386
24387@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
24388 },
24389
24390 .set_fixmap = native_set_fixmap,
24391+
24392+#ifdef CONFIG_PAX_KERNEXEC
24393+ .pax_open_kernel = native_pax_open_kernel,
24394+ .pax_close_kernel = native_pax_close_kernel,
24395+#endif
24396+
24397 };
24398
24399 EXPORT_SYMBOL_GPL(pv_time_ops);
24400diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
24401index 299d493..2ccb0ee 100644
24402--- a/arch/x86/kernel/pci-calgary_64.c
24403+++ b/arch/x86/kernel/pci-calgary_64.c
24404@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
24405 tce_space = be64_to_cpu(readq(target));
24406 tce_space = tce_space & TAR_SW_BITS;
24407
24408- tce_space = tce_space & (~specified_table_size);
24409+ tce_space = tce_space & (~(unsigned long)specified_table_size);
24410 info->tce_space = (u64 *)__va(tce_space);
24411 }
24412 }
24413diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
24414index 35ccf75..7a15747 100644
24415--- a/arch/x86/kernel/pci-iommu_table.c
24416+++ b/arch/x86/kernel/pci-iommu_table.c
24417@@ -2,7 +2,7 @@
24418 #include <asm/iommu_table.h>
24419 #include <linux/string.h>
24420 #include <linux/kallsyms.h>
24421-
24422+#include <linux/sched.h>
24423
24424 #define DEBUG 1
24425
24426diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
24427index 6c483ba..d10ce2f 100644
24428--- a/arch/x86/kernel/pci-swiotlb.c
24429+++ b/arch/x86/kernel/pci-swiotlb.c
24430@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
24431 void *vaddr, dma_addr_t dma_addr,
24432 struct dma_attrs *attrs)
24433 {
24434- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
24435+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
24436 }
24437
24438 static struct dma_map_ops swiotlb_dma_ops = {
24439diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
24440index 81a5f5e..20f8b58 100644
24441--- a/arch/x86/kernel/process.c
24442+++ b/arch/x86/kernel/process.c
24443@@ -36,7 +36,8 @@
24444 * section. Since TSS's are completely CPU-local, we want them
24445 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
24446 */
24447-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
24448+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
24449+EXPORT_SYMBOL(init_tss);
24450
24451 #ifdef CONFIG_X86_64
24452 static DEFINE_PER_CPU(unsigned char, is_idle);
24453@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
24454 task_xstate_cachep =
24455 kmem_cache_create("task_xstate", xstate_size,
24456 __alignof__(union thread_xstate),
24457- SLAB_PANIC | SLAB_NOTRACK, NULL);
24458+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
24459 }
24460
24461 /*
24462@@ -105,7 +106,7 @@ void exit_thread(void)
24463 unsigned long *bp = t->io_bitmap_ptr;
24464
24465 if (bp) {
24466- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
24467+ struct tss_struct *tss = init_tss + get_cpu();
24468
24469 t->io_bitmap_ptr = NULL;
24470 clear_thread_flag(TIF_IO_BITMAP);
24471@@ -125,6 +126,9 @@ void flush_thread(void)
24472 {
24473 struct task_struct *tsk = current;
24474
24475+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
24476+ loadsegment(gs, 0);
24477+#endif
24478 flush_ptrace_hw_breakpoint(tsk);
24479 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
24480 drop_init_fpu(tsk);
24481@@ -271,7 +275,7 @@ static void __exit_idle(void)
24482 void exit_idle(void)
24483 {
24484 /* idle loop has pid 0 */
24485- if (current->pid)
24486+ if (task_pid_nr(current))
24487 return;
24488 __exit_idle();
24489 }
24490@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
24491 return ret;
24492 }
24493 #endif
24494-void stop_this_cpu(void *dummy)
24495+__noreturn void stop_this_cpu(void *dummy)
24496 {
24497 local_irq_disable();
24498 /*
24499@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
24500 }
24501 early_param("idle", idle_setup);
24502
24503-unsigned long arch_align_stack(unsigned long sp)
24504+#ifdef CONFIG_PAX_RANDKSTACK
24505+void pax_randomize_kstack(struct pt_regs *regs)
24506 {
24507- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
24508- sp -= get_random_int() % 8192;
24509- return sp & ~0xf;
24510-}
24511+ struct thread_struct *thread = &current->thread;
24512+ unsigned long time;
24513
24514-unsigned long arch_randomize_brk(struct mm_struct *mm)
24515-{
24516- unsigned long range_end = mm->brk + 0x02000000;
24517- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
24518-}
24519+ if (!randomize_va_space)
24520+ return;
24521+
24522+ if (v8086_mode(regs))
24523+ return;
24524
24525+ rdtscl(time);
24526+
24527+ /* P4 seems to return a 0 LSB, ignore it */
24528+#ifdef CONFIG_MPENTIUM4
24529+ time &= 0x3EUL;
24530+ time <<= 2;
24531+#elif defined(CONFIG_X86_64)
24532+ time &= 0xFUL;
24533+ time <<= 4;
24534+#else
24535+ time &= 0x1FUL;
24536+ time <<= 3;
24537+#endif
24538+
24539+ thread->sp0 ^= time;
24540+ load_sp0(init_tss + smp_processor_id(), thread);
24541+
24542+#ifdef CONFIG_X86_64
24543+ this_cpu_write(kernel_stack, thread->sp0);
24544+#endif
24545+}
24546+#endif
24547diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
24548index 7305f7d..22f73d6 100644
24549--- a/arch/x86/kernel/process_32.c
24550+++ b/arch/x86/kernel/process_32.c
24551@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
24552 unsigned long thread_saved_pc(struct task_struct *tsk)
24553 {
24554 return ((unsigned long *)tsk->thread.sp)[3];
24555+//XXX return tsk->thread.eip;
24556 }
24557
24558 void __show_regs(struct pt_regs *regs, int all)
24559@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
24560 unsigned long sp;
24561 unsigned short ss, gs;
24562
24563- if (user_mode_vm(regs)) {
24564+ if (user_mode(regs)) {
24565 sp = regs->sp;
24566 ss = regs->ss & 0xffff;
24567- gs = get_user_gs(regs);
24568 } else {
24569 sp = kernel_stack_pointer(regs);
24570 savesegment(ss, ss);
24571- savesegment(gs, gs);
24572 }
24573+ gs = get_user_gs(regs);
24574
24575 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
24576 (u16)regs->cs, regs->ip, regs->flags,
24577- smp_processor_id());
24578+ raw_smp_processor_id());
24579 print_symbol("EIP is at %s\n", regs->ip);
24580
24581 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
24582@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
24583 int copy_thread(unsigned long clone_flags, unsigned long sp,
24584 unsigned long arg, struct task_struct *p)
24585 {
24586- struct pt_regs *childregs = task_pt_regs(p);
24587+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
24588 struct task_struct *tsk;
24589 int err;
24590
24591 p->thread.sp = (unsigned long) childregs;
24592 p->thread.sp0 = (unsigned long) (childregs+1);
24593+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
24594
24595 if (unlikely(p->flags & PF_KTHREAD)) {
24596 /* kernel thread */
24597 memset(childregs, 0, sizeof(struct pt_regs));
24598 p->thread.ip = (unsigned long) ret_from_kernel_thread;
24599- task_user_gs(p) = __KERNEL_STACK_CANARY;
24600- childregs->ds = __USER_DS;
24601- childregs->es = __USER_DS;
24602+ savesegment(gs, childregs->gs);
24603+ childregs->ds = __KERNEL_DS;
24604+ childregs->es = __KERNEL_DS;
24605 childregs->fs = __KERNEL_PERCPU;
24606 childregs->bx = sp; /* function */
24607 childregs->bp = arg;
24608@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24609 struct thread_struct *prev = &prev_p->thread,
24610 *next = &next_p->thread;
24611 int cpu = smp_processor_id();
24612- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24613+ struct tss_struct *tss = init_tss + cpu;
24614 fpu_switch_t fpu;
24615
24616 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
24617@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24618 */
24619 lazy_save_gs(prev->gs);
24620
24621+#ifdef CONFIG_PAX_MEMORY_UDEREF
24622+ __set_fs(task_thread_info(next_p)->addr_limit);
24623+#endif
24624+
24625 /*
24626 * Load the per-thread Thread-Local Storage descriptor.
24627 */
24628@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24629 */
24630 arch_end_context_switch(next_p);
24631
24632+ this_cpu_write(current_task, next_p);
24633+ this_cpu_write(current_tinfo, &next_p->tinfo);
24634+
24635 /*
24636 * Restore %gs if needed (which is common)
24637 */
24638@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24639
24640 switch_fpu_finish(next_p, fpu);
24641
24642- this_cpu_write(current_task, next_p);
24643-
24644 return prev_p;
24645 }
24646
24647@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
24648 } while (count++ < 16);
24649 return 0;
24650 }
24651-
24652diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
24653index 355ae06..560fbbe 100644
24654--- a/arch/x86/kernel/process_64.c
24655+++ b/arch/x86/kernel/process_64.c
24656@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
24657 struct pt_regs *childregs;
24658 struct task_struct *me = current;
24659
24660- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
24661+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
24662 childregs = task_pt_regs(p);
24663 p->thread.sp = (unsigned long) childregs;
24664 p->thread.usersp = me->thread.usersp;
24665+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
24666 set_tsk_thread_flag(p, TIF_FORK);
24667 p->fpu_counter = 0;
24668 p->thread.io_bitmap_ptr = NULL;
24669@@ -165,6 +166,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
24670 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
24671 savesegment(es, p->thread.es);
24672 savesegment(ds, p->thread.ds);
24673+ savesegment(ss, p->thread.ss);
24674+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
24675 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
24676
24677 if (unlikely(p->flags & PF_KTHREAD)) {
24678@@ -273,7 +276,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24679 struct thread_struct *prev = &prev_p->thread;
24680 struct thread_struct *next = &next_p->thread;
24681 int cpu = smp_processor_id();
24682- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24683+ struct tss_struct *tss = init_tss + cpu;
24684 unsigned fsindex, gsindex;
24685 fpu_switch_t fpu;
24686
24687@@ -296,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24688 if (unlikely(next->ds | prev->ds))
24689 loadsegment(ds, next->ds);
24690
24691+ savesegment(ss, prev->ss);
24692+ if (unlikely(next->ss != prev->ss))
24693+ loadsegment(ss, next->ss);
24694
24695 /* We must save %fs and %gs before load_TLS() because
24696 * %fs and %gs may be cleared by load_TLS().
24697@@ -355,10 +361,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24698 prev->usersp = this_cpu_read(old_rsp);
24699 this_cpu_write(old_rsp, next->usersp);
24700 this_cpu_write(current_task, next_p);
24701+ this_cpu_write(current_tinfo, &next_p->tinfo);
24702
24703- this_cpu_write(kernel_stack,
24704- (unsigned long)task_stack_page(next_p) +
24705- THREAD_SIZE - KERNEL_STACK_OFFSET);
24706+ this_cpu_write(kernel_stack, next->sp0);
24707
24708 /*
24709 * Now maybe reload the debug registers and handle I/O bitmaps
24710@@ -427,12 +432,11 @@ unsigned long get_wchan(struct task_struct *p)
24711 if (!p || p == current || p->state == TASK_RUNNING)
24712 return 0;
24713 stack = (unsigned long)task_stack_page(p);
24714- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
24715+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
24716 return 0;
24717 fp = *(u64 *)(p->thread.sp);
24718 do {
24719- if (fp < (unsigned long)stack ||
24720- fp >= (unsigned long)stack+THREAD_SIZE)
24721+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
24722 return 0;
24723 ip = *(u64 *)(fp+8);
24724 if (!in_sched_functions(ip))
24725diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
24726index 29a8120..a50b5ee 100644
24727--- a/arch/x86/kernel/ptrace.c
24728+++ b/arch/x86/kernel/ptrace.c
24729@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
24730 {
24731 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
24732 unsigned long sp = (unsigned long)&regs->sp;
24733- struct thread_info *tinfo;
24734
24735- if (context == (sp & ~(THREAD_SIZE - 1)))
24736+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
24737 return sp;
24738
24739- tinfo = (struct thread_info *)context;
24740- if (tinfo->previous_esp)
24741- return tinfo->previous_esp;
24742+ sp = *(unsigned long *)context;
24743+ if (sp)
24744+ return sp;
24745
24746 return (unsigned long)regs;
24747 }
24748@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
24749 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
24750 {
24751 int i;
24752- int dr7 = 0;
24753+ unsigned long dr7 = 0;
24754 struct arch_hw_breakpoint *info;
24755
24756 for (i = 0; i < HBP_NUM; i++) {
24757@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
24758 unsigned long addr, unsigned long data)
24759 {
24760 int ret;
24761- unsigned long __user *datap = (unsigned long __user *)data;
24762+ unsigned long __user *datap = (__force unsigned long __user *)data;
24763
24764 switch (request) {
24765 /* read the word at location addr in the USER area. */
24766@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
24767 if ((int) addr < 0)
24768 return -EIO;
24769 ret = do_get_thread_area(child, addr,
24770- (struct user_desc __user *)data);
24771+ (__force struct user_desc __user *) data);
24772 break;
24773
24774 case PTRACE_SET_THREAD_AREA:
24775 if ((int) addr < 0)
24776 return -EIO;
24777 ret = do_set_thread_area(child, addr,
24778- (struct user_desc __user *)data, 0);
24779+ (__force struct user_desc __user *) data, 0);
24780 break;
24781 #endif
24782
24783@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
24784
24785 #ifdef CONFIG_X86_64
24786
24787-static struct user_regset x86_64_regsets[] __read_mostly = {
24788+static user_regset_no_const x86_64_regsets[] __read_only = {
24789 [REGSET_GENERAL] = {
24790 .core_note_type = NT_PRSTATUS,
24791 .n = sizeof(struct user_regs_struct) / sizeof(long),
24792@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
24793 #endif /* CONFIG_X86_64 */
24794
24795 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
24796-static struct user_regset x86_32_regsets[] __read_mostly = {
24797+static user_regset_no_const x86_32_regsets[] __read_only = {
24798 [REGSET_GENERAL] = {
24799 .core_note_type = NT_PRSTATUS,
24800 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
24801@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
24802 */
24803 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
24804
24805-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24806+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24807 {
24808 #ifdef CONFIG_X86_64
24809 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
24810@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
24811 memset(info, 0, sizeof(*info));
24812 info->si_signo = SIGTRAP;
24813 info->si_code = si_code;
24814- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
24815+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
24816 }
24817
24818 void user_single_step_siginfo(struct task_struct *tsk,
24819@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
24820 # define IS_IA32 0
24821 #endif
24822
24823+#ifdef CONFIG_GRKERNSEC_SETXID
24824+extern void gr_delayed_cred_worker(void);
24825+#endif
24826+
24827 /*
24828 * We must return the syscall number to actually look up in the table.
24829 * This can be -1L to skip running any syscall at all.
24830@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
24831
24832 user_exit();
24833
24834+#ifdef CONFIG_GRKERNSEC_SETXID
24835+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24836+ gr_delayed_cred_worker();
24837+#endif
24838+
24839 /*
24840 * If we stepped into a sysenter/syscall insn, it trapped in
24841 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
24842@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
24843 */
24844 user_exit();
24845
24846+#ifdef CONFIG_GRKERNSEC_SETXID
24847+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24848+ gr_delayed_cred_worker();
24849+#endif
24850+
24851 audit_syscall_exit(regs);
24852
24853 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
24854diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
24855index 2cb9470..ff1fd80 100644
24856--- a/arch/x86/kernel/pvclock.c
24857+++ b/arch/x86/kernel/pvclock.c
24858@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
24859 return pv_tsc_khz;
24860 }
24861
24862-static atomic64_t last_value = ATOMIC64_INIT(0);
24863+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
24864
24865 void pvclock_resume(void)
24866 {
24867- atomic64_set(&last_value, 0);
24868+ atomic64_set_unchecked(&last_value, 0);
24869 }
24870
24871 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
24872@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
24873 * updating at the same time, and one of them could be slightly behind,
24874 * making the assumption that last_value always go forward fail to hold.
24875 */
24876- last = atomic64_read(&last_value);
24877+ last = atomic64_read_unchecked(&last_value);
24878 do {
24879 if (ret < last)
24880 return last;
24881- last = atomic64_cmpxchg(&last_value, last, ret);
24882+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
24883 } while (unlikely(last != ret));
24884
24885 return ret;
24886diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
24887index 76fa1e9..abf09ea 100644
24888--- a/arch/x86/kernel/reboot.c
24889+++ b/arch/x86/kernel/reboot.c
24890@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
24891 EXPORT_SYMBOL(pm_power_off);
24892
24893 static const struct desc_ptr no_idt = {};
24894-static int reboot_mode;
24895+static unsigned short reboot_mode;
24896 enum reboot_type reboot_type = BOOT_ACPI;
24897 int reboot_force;
24898
24899@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
24900
24901 void __noreturn machine_real_restart(unsigned int type)
24902 {
24903+
24904+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
24905+ struct desc_struct *gdt;
24906+#endif
24907+
24908 local_irq_disable();
24909
24910 /*
24911@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
24912
24913 /* Jump to the identity-mapped low memory code */
24914 #ifdef CONFIG_X86_32
24915- asm volatile("jmpl *%0" : :
24916+
24917+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
24918+ gdt = get_cpu_gdt_table(smp_processor_id());
24919+ pax_open_kernel();
24920+#ifdef CONFIG_PAX_MEMORY_UDEREF
24921+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
24922+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
24923+ loadsegment(ds, __KERNEL_DS);
24924+ loadsegment(es, __KERNEL_DS);
24925+ loadsegment(ss, __KERNEL_DS);
24926+#endif
24927+#ifdef CONFIG_PAX_KERNEXEC
24928+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
24929+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
24930+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
24931+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
24932+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
24933+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
24934+#endif
24935+ pax_close_kernel();
24936+#endif
24937+
24938+ asm volatile("ljmpl *%0" : :
24939 "rm" (real_mode_header->machine_real_restart_asm),
24940 "a" (type));
24941 #else
24942@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
24943 * try to force a triple fault and then cycle between hitting the keyboard
24944 * controller and doing that
24945 */
24946-static void native_machine_emergency_restart(void)
24947+static void __noreturn native_machine_emergency_restart(void)
24948 {
24949 int i;
24950 int attempt = 0;
24951@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
24952 #endif
24953 }
24954
24955-static void __machine_emergency_restart(int emergency)
24956+static void __noreturn __machine_emergency_restart(int emergency)
24957 {
24958 reboot_emergency = emergency;
24959 machine_ops.emergency_restart();
24960 }
24961
24962-static void native_machine_restart(char *__unused)
24963+static void __noreturn native_machine_restart(char *__unused)
24964 {
24965 pr_notice("machine restart\n");
24966
24967@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
24968 __machine_emergency_restart(0);
24969 }
24970
24971-static void native_machine_halt(void)
24972+static void __noreturn native_machine_halt(void)
24973 {
24974 /* Stop other cpus and apics */
24975 machine_shutdown();
24976@@ -679,7 +706,7 @@ static void native_machine_halt(void)
24977 stop_this_cpu(NULL);
24978 }
24979
24980-static void native_machine_power_off(void)
24981+static void __noreturn native_machine_power_off(void)
24982 {
24983 if (pm_power_off) {
24984 if (!reboot_force)
24985@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
24986 }
24987 /* A fallback in case there is no PM info available */
24988 tboot_shutdown(TB_SHUTDOWN_HALT);
24989+ unreachable();
24990 }
24991
24992-struct machine_ops machine_ops = {
24993+struct machine_ops machine_ops __read_only = {
24994 .power_off = native_machine_power_off,
24995 .shutdown = native_machine_shutdown,
24996 .emergency_restart = native_machine_emergency_restart,
24997diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
24998index c8e41e9..64049ef 100644
24999--- a/arch/x86/kernel/reboot_fixups_32.c
25000+++ b/arch/x86/kernel/reboot_fixups_32.c
25001@@ -57,7 +57,7 @@ struct device_fixup {
25002 unsigned int vendor;
25003 unsigned int device;
25004 void (*reboot_fixup)(struct pci_dev *);
25005-};
25006+} __do_const;
25007
25008 /*
25009 * PCI ids solely used for fixups_table go here
25010diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
25011index f2bb9c9..bed145d7 100644
25012--- a/arch/x86/kernel/relocate_kernel_64.S
25013+++ b/arch/x86/kernel/relocate_kernel_64.S
25014@@ -11,6 +11,7 @@
25015 #include <asm/kexec.h>
25016 #include <asm/processor-flags.h>
25017 #include <asm/pgtable_types.h>
25018+#include <asm/alternative-asm.h>
25019
25020 /*
25021 * Must be relocatable PIC code callable as a C function
25022@@ -167,6 +168,7 @@ identity_mapped:
25023 xorq %r14, %r14
25024 xorq %r15, %r15
25025
25026+ pax_force_retaddr 0, 1
25027 ret
25028
25029 1:
25030diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
25031index 56f7fcf..2cfe4f1 100644
25032--- a/arch/x86/kernel/setup.c
25033+++ b/arch/x86/kernel/setup.c
25034@@ -110,6 +110,7 @@
25035 #include <asm/mce.h>
25036 #include <asm/alternative.h>
25037 #include <asm/prom.h>
25038+#include <asm/boot.h>
25039
25040 /*
25041 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
25042@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
25043 #endif
25044
25045
25046-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
25047-unsigned long mmu_cr4_features;
25048+#ifdef CONFIG_X86_64
25049+unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
25050+#elif defined(CONFIG_X86_PAE)
25051+unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
25052 #else
25053-unsigned long mmu_cr4_features = X86_CR4_PAE;
25054+unsigned long mmu_cr4_features __read_only;
25055 #endif
25056
25057+void set_in_cr4(unsigned long mask)
25058+{
25059+ unsigned long cr4 = read_cr4();
25060+
25061+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
25062+ return;
25063+
25064+ pax_open_kernel();
25065+ mmu_cr4_features |= mask;
25066+ pax_close_kernel();
25067+
25068+ if (trampoline_cr4_features)
25069+ *trampoline_cr4_features = mmu_cr4_features;
25070+ cr4 |= mask;
25071+ write_cr4(cr4);
25072+}
25073+EXPORT_SYMBOL(set_in_cr4);
25074+
25075+void clear_in_cr4(unsigned long mask)
25076+{
25077+ unsigned long cr4 = read_cr4();
25078+
25079+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
25080+ return;
25081+
25082+ pax_open_kernel();
25083+ mmu_cr4_features &= ~mask;
25084+ pax_close_kernel();
25085+
25086+ if (trampoline_cr4_features)
25087+ *trampoline_cr4_features = mmu_cr4_features;
25088+ cr4 &= ~mask;
25089+ write_cr4(cr4);
25090+}
25091+EXPORT_SYMBOL(clear_in_cr4);
25092+
25093 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
25094 int bootloader_type, bootloader_version;
25095
25096@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
25097
25098 switch (data->type) {
25099 case SETUP_E820_EXT:
25100- parse_e820_ext(data);
25101+ parse_e820_ext((struct setup_data __force_kernel *)data);
25102 break;
25103 case SETUP_DTB:
25104 add_dtb(pa_data);
25105@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
25106 * area (640->1Mb) as ram even though it is not.
25107 * take them out.
25108 */
25109- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
25110+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
25111
25112 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
25113 }
25114@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
25115 /* called before trim_bios_range() to spare extra sanitize */
25116 static void __init e820_add_kernel_range(void)
25117 {
25118- u64 start = __pa_symbol(_text);
25119+ u64 start = __pa_symbol(ktla_ktva(_text));
25120 u64 size = __pa_symbol(_end) - start;
25121
25122 /*
25123@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
25124
25125 void __init setup_arch(char **cmdline_p)
25126 {
25127+#ifdef CONFIG_X86_32
25128+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
25129+#else
25130 memblock_reserve(__pa_symbol(_text),
25131 (unsigned long)__bss_stop - (unsigned long)_text);
25132+#endif
25133
25134 early_reserve_initrd();
25135
25136@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
25137
25138 if (!boot_params.hdr.root_flags)
25139 root_mountflags &= ~MS_RDONLY;
25140- init_mm.start_code = (unsigned long) _text;
25141- init_mm.end_code = (unsigned long) _etext;
25142+ init_mm.start_code = ktla_ktva((unsigned long) _text);
25143+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
25144 init_mm.end_data = (unsigned long) _edata;
25145 init_mm.brk = _brk_end;
25146
25147- code_resource.start = __pa_symbol(_text);
25148- code_resource.end = __pa_symbol(_etext)-1;
25149- data_resource.start = __pa_symbol(_etext);
25150+ code_resource.start = __pa_symbol(ktla_ktva(_text));
25151+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
25152+ data_resource.start = __pa_symbol(_sdata);
25153 data_resource.end = __pa_symbol(_edata)-1;
25154 bss_resource.start = __pa_symbol(__bss_start);
25155 bss_resource.end = __pa_symbol(__bss_stop)-1;
25156diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
25157index 5cdff03..80fa283 100644
25158--- a/arch/x86/kernel/setup_percpu.c
25159+++ b/arch/x86/kernel/setup_percpu.c
25160@@ -21,19 +21,17 @@
25161 #include <asm/cpu.h>
25162 #include <asm/stackprotector.h>
25163
25164-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
25165+#ifdef CONFIG_SMP
25166+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
25167 EXPORT_PER_CPU_SYMBOL(cpu_number);
25168+#endif
25169
25170-#ifdef CONFIG_X86_64
25171 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
25172-#else
25173-#define BOOT_PERCPU_OFFSET 0
25174-#endif
25175
25176 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
25177 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
25178
25179-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
25180+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
25181 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
25182 };
25183 EXPORT_SYMBOL(__per_cpu_offset);
25184@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
25185 {
25186 #ifdef CONFIG_NEED_MULTIPLE_NODES
25187 pg_data_t *last = NULL;
25188- unsigned int cpu;
25189+ int cpu;
25190
25191 for_each_possible_cpu(cpu) {
25192 int node = early_cpu_to_node(cpu);
25193@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
25194 {
25195 #ifdef CONFIG_X86_32
25196 struct desc_struct gdt;
25197+ unsigned long base = per_cpu_offset(cpu);
25198
25199- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
25200- 0x2 | DESCTYPE_S, 0x8);
25201- gdt.s = 1;
25202+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
25203+ 0x83 | DESCTYPE_S, 0xC);
25204 write_gdt_entry(get_cpu_gdt_table(cpu),
25205 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
25206 #endif
25207@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
25208 /* alrighty, percpu areas up and running */
25209 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
25210 for_each_possible_cpu(cpu) {
25211+#ifdef CONFIG_CC_STACKPROTECTOR
25212+#ifdef CONFIG_X86_32
25213+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
25214+#endif
25215+#endif
25216 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
25217 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
25218 per_cpu(cpu_number, cpu) = cpu;
25219@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
25220 */
25221 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
25222 #endif
25223+#ifdef CONFIG_CC_STACKPROTECTOR
25224+#ifdef CONFIG_X86_32
25225+ if (!cpu)
25226+ per_cpu(stack_canary.canary, cpu) = canary;
25227+#endif
25228+#endif
25229 /*
25230 * Up to this point, the boot CPU has been using .init.data
25231 * area. Reload any changed state for the boot CPU.
25232diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
25233index 6956299..18126ec4 100644
25234--- a/arch/x86/kernel/signal.c
25235+++ b/arch/x86/kernel/signal.c
25236@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
25237 * Align the stack pointer according to the i386 ABI,
25238 * i.e. so that on function entry ((sp + 4) & 15) == 0.
25239 */
25240- sp = ((sp + 4) & -16ul) - 4;
25241+ sp = ((sp - 12) & -16ul) - 4;
25242 #else /* !CONFIG_X86_32 */
25243 sp = round_down(sp, 16) - 8;
25244 #endif
25245@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
25246 }
25247
25248 if (current->mm->context.vdso)
25249- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
25250+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
25251 else
25252- restorer = &frame->retcode;
25253+ restorer = (void __user *)&frame->retcode;
25254 if (ksig->ka.sa.sa_flags & SA_RESTORER)
25255 restorer = ksig->ka.sa.sa_restorer;
25256
25257@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
25258 * reasons and because gdb uses it as a signature to notice
25259 * signal handler stack frames.
25260 */
25261- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
25262+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
25263
25264 if (err)
25265 return -EFAULT;
25266@@ -364,10 +364,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25267 else
25268 put_user_ex(0, &frame->uc.uc_flags);
25269 put_user_ex(0, &frame->uc.uc_link);
25270- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
25271+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
25272
25273 /* Set up to return from userspace. */
25274- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
25275+ if (current->mm->context.vdso)
25276+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
25277+ else
25278+ restorer = (void __user *)&frame->retcode;
25279 if (ksig->ka.sa.sa_flags & SA_RESTORER)
25280 restorer = ksig->ka.sa.sa_restorer;
25281 put_user_ex(restorer, &frame->pretcode);
25282@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25283 * reasons and because gdb uses it as a signature to notice
25284 * signal handler stack frames.
25285 */
25286- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
25287+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
25288 } put_user_catch(err);
25289
25290 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
25291@@ -429,7 +432,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25292 else
25293 put_user_ex(0, &frame->uc.uc_flags);
25294 put_user_ex(0, &frame->uc.uc_link);
25295- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
25296+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
25297
25298 /* Set up to return from userspace. If provided, use a stub
25299 already in userspace. */
25300@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
25301 {
25302 int usig = signr_convert(ksig->sig);
25303 sigset_t *set = sigmask_to_save();
25304- compat_sigset_t *cset = (compat_sigset_t *) set;
25305+ sigset_t sigcopy;
25306+ compat_sigset_t *cset;
25307+
25308+ sigcopy = *set;
25309+
25310+ cset = (compat_sigset_t *) &sigcopy;
25311
25312 /* Set up the stack frame */
25313 if (is_ia32_frame()) {
25314@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
25315 } else if (is_x32_frame()) {
25316 return x32_setup_rt_frame(ksig, cset, regs);
25317 } else {
25318- return __setup_rt_frame(ksig->sig, ksig, set, regs);
25319+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
25320 }
25321 }
25322
25323diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
25324index 48d2b7d..90d328a 100644
25325--- a/arch/x86/kernel/smp.c
25326+++ b/arch/x86/kernel/smp.c
25327@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
25328
25329 __setup("nonmi_ipi", nonmi_ipi_setup);
25330
25331-struct smp_ops smp_ops = {
25332+struct smp_ops smp_ops __read_only = {
25333 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
25334 .smp_prepare_cpus = native_smp_prepare_cpus,
25335 .smp_cpus_done = native_smp_cpus_done,
25336diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
25337index bfd348e..914f323 100644
25338--- a/arch/x86/kernel/smpboot.c
25339+++ b/arch/x86/kernel/smpboot.c
25340@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
25341
25342 enable_start_cpu0 = 0;
25343
25344-#ifdef CONFIG_X86_32
25345- /* switch away from the initial page table */
25346- load_cr3(swapper_pg_dir);
25347- __flush_tlb_all();
25348-#endif
25349-
25350 /* otherwise gcc will move up smp_processor_id before the cpu_init */
25351 barrier();
25352+
25353+ /* switch away from the initial page table */
25354+#ifdef CONFIG_PAX_PER_CPU_PGD
25355+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
25356+ __flush_tlb_all();
25357+#elif defined(CONFIG_X86_32)
25358+ load_cr3(swapper_pg_dir);
25359+ __flush_tlb_all();
25360+#endif
25361+
25362 /*
25363 * Check TSC synchronization with the BP:
25364 */
25365@@ -748,6 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
25366 idle->thread.sp = (unsigned long) (((struct pt_regs *)
25367 (THREAD_SIZE + task_stack_page(idle))) - 1);
25368 per_cpu(current_task, cpu) = idle;
25369+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
25370
25371 #ifdef CONFIG_X86_32
25372 /* Stack for startup_32 can be just as for start_secondary onwards */
25373@@ -755,11 +760,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
25374 #else
25375 clear_tsk_thread_flag(idle, TIF_FORK);
25376 initial_gs = per_cpu_offset(cpu);
25377- per_cpu(kernel_stack, cpu) =
25378- (unsigned long)task_stack_page(idle) -
25379- KERNEL_STACK_OFFSET + THREAD_SIZE;
25380+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25381 #endif
25382+
25383+ pax_open_kernel();
25384 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
25385+ pax_close_kernel();
25386+
25387 initial_code = (unsigned long)start_secondary;
25388 stack_start = idle->thread.sp;
25389
25390@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
25391 /* the FPU context is blank, nobody can own it */
25392 __cpu_disable_lazy_restore(cpu);
25393
25394+#ifdef CONFIG_PAX_PER_CPU_PGD
25395+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
25396+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25397+ KERNEL_PGD_PTRS);
25398+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
25399+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25400+ KERNEL_PGD_PTRS);
25401+#endif
25402+
25403 err = do_boot_cpu(apicid, cpu, tidle);
25404 if (err) {
25405 pr_debug("do_boot_cpu failed %d\n", err);
25406diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
25407index 9b4d51d..5d28b58 100644
25408--- a/arch/x86/kernel/step.c
25409+++ b/arch/x86/kernel/step.c
25410@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
25411 struct desc_struct *desc;
25412 unsigned long base;
25413
25414- seg &= ~7UL;
25415+ seg >>= 3;
25416
25417 mutex_lock(&child->mm->context.lock);
25418- if (unlikely((seg >> 3) >= child->mm->context.size))
25419+ if (unlikely(seg >= child->mm->context.size))
25420 addr = -1L; /* bogus selector, access would fault */
25421 else {
25422 desc = child->mm->context.ldt + seg;
25423@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
25424 addr += base;
25425 }
25426 mutex_unlock(&child->mm->context.lock);
25427- }
25428+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
25429+ addr = ktla_ktva(addr);
25430
25431 return addr;
25432 }
25433@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
25434 unsigned char opcode[15];
25435 unsigned long addr = convert_ip_to_linear(child, regs);
25436
25437+ if (addr == -EINVAL)
25438+ return 0;
25439+
25440 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
25441 for (i = 0; i < copied; i++) {
25442 switch (opcode[i]) {
25443diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
25444new file mode 100644
25445index 0000000..5877189
25446--- /dev/null
25447+++ b/arch/x86/kernel/sys_i386_32.c
25448@@ -0,0 +1,189 @@
25449+/*
25450+ * This file contains various random system calls that
25451+ * have a non-standard calling sequence on the Linux/i386
25452+ * platform.
25453+ */
25454+
25455+#include <linux/errno.h>
25456+#include <linux/sched.h>
25457+#include <linux/mm.h>
25458+#include <linux/fs.h>
25459+#include <linux/smp.h>
25460+#include <linux/sem.h>
25461+#include <linux/msg.h>
25462+#include <linux/shm.h>
25463+#include <linux/stat.h>
25464+#include <linux/syscalls.h>
25465+#include <linux/mman.h>
25466+#include <linux/file.h>
25467+#include <linux/utsname.h>
25468+#include <linux/ipc.h>
25469+#include <linux/elf.h>
25470+
25471+#include <linux/uaccess.h>
25472+#include <linux/unistd.h>
25473+
25474+#include <asm/syscalls.h>
25475+
25476+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
25477+{
25478+ unsigned long pax_task_size = TASK_SIZE;
25479+
25480+#ifdef CONFIG_PAX_SEGMEXEC
25481+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
25482+ pax_task_size = SEGMEXEC_TASK_SIZE;
25483+#endif
25484+
25485+ if (flags & MAP_FIXED)
25486+ if (len > pax_task_size || addr > pax_task_size - len)
25487+ return -EINVAL;
25488+
25489+ return 0;
25490+}
25491+
25492+/*
25493+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
25494+ */
25495+static unsigned long get_align_mask(void)
25496+{
25497+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
25498+ return 0;
25499+
25500+ if (!(current->flags & PF_RANDOMIZE))
25501+ return 0;
25502+
25503+ return va_align.mask;
25504+}
25505+
25506+unsigned long
25507+arch_get_unmapped_area(struct file *filp, unsigned long addr,
25508+ unsigned long len, unsigned long pgoff, unsigned long flags)
25509+{
25510+ struct mm_struct *mm = current->mm;
25511+ struct vm_area_struct *vma;
25512+ unsigned long pax_task_size = TASK_SIZE;
25513+ struct vm_unmapped_area_info info;
25514+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25515+
25516+#ifdef CONFIG_PAX_SEGMEXEC
25517+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25518+ pax_task_size = SEGMEXEC_TASK_SIZE;
25519+#endif
25520+
25521+ pax_task_size -= PAGE_SIZE;
25522+
25523+ if (len > pax_task_size)
25524+ return -ENOMEM;
25525+
25526+ if (flags & MAP_FIXED)
25527+ return addr;
25528+
25529+#ifdef CONFIG_PAX_RANDMMAP
25530+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25531+#endif
25532+
25533+ if (addr) {
25534+ addr = PAGE_ALIGN(addr);
25535+ if (pax_task_size - len >= addr) {
25536+ vma = find_vma(mm, addr);
25537+ if (check_heap_stack_gap(vma, addr, len, offset))
25538+ return addr;
25539+ }
25540+ }
25541+
25542+ info.flags = 0;
25543+ info.length = len;
25544+ info.align_mask = filp ? get_align_mask() : 0;
25545+ info.align_offset = pgoff << PAGE_SHIFT;
25546+ info.threadstack_offset = offset;
25547+
25548+#ifdef CONFIG_PAX_PAGEEXEC
25549+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
25550+ info.low_limit = 0x00110000UL;
25551+ info.high_limit = mm->start_code;
25552+
25553+#ifdef CONFIG_PAX_RANDMMAP
25554+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25555+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
25556+#endif
25557+
25558+ if (info.low_limit < info.high_limit) {
25559+ addr = vm_unmapped_area(&info);
25560+ if (!IS_ERR_VALUE(addr))
25561+ return addr;
25562+ }
25563+ } else
25564+#endif
25565+
25566+ info.low_limit = mm->mmap_base;
25567+ info.high_limit = pax_task_size;
25568+
25569+ return vm_unmapped_area(&info);
25570+}
25571+
25572+unsigned long
25573+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25574+ const unsigned long len, const unsigned long pgoff,
25575+ const unsigned long flags)
25576+{
25577+ struct vm_area_struct *vma;
25578+ struct mm_struct *mm = current->mm;
25579+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
25580+ struct vm_unmapped_area_info info;
25581+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25582+
25583+#ifdef CONFIG_PAX_SEGMEXEC
25584+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25585+ pax_task_size = SEGMEXEC_TASK_SIZE;
25586+#endif
25587+
25588+ pax_task_size -= PAGE_SIZE;
25589+
25590+ /* requested length too big for entire address space */
25591+ if (len > pax_task_size)
25592+ return -ENOMEM;
25593+
25594+ if (flags & MAP_FIXED)
25595+ return addr;
25596+
25597+#ifdef CONFIG_PAX_PAGEEXEC
25598+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
25599+ goto bottomup;
25600+#endif
25601+
25602+#ifdef CONFIG_PAX_RANDMMAP
25603+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25604+#endif
25605+
25606+ /* requesting a specific address */
25607+ if (addr) {
25608+ addr = PAGE_ALIGN(addr);
25609+ if (pax_task_size - len >= addr) {
25610+ vma = find_vma(mm, addr);
25611+ if (check_heap_stack_gap(vma, addr, len, offset))
25612+ return addr;
25613+ }
25614+ }
25615+
25616+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
25617+ info.length = len;
25618+ info.low_limit = PAGE_SIZE;
25619+ info.high_limit = mm->mmap_base;
25620+ info.align_mask = filp ? get_align_mask() : 0;
25621+ info.align_offset = pgoff << PAGE_SHIFT;
25622+ info.threadstack_offset = offset;
25623+
25624+ addr = vm_unmapped_area(&info);
25625+ if (!(addr & ~PAGE_MASK))
25626+ return addr;
25627+ VM_BUG_ON(addr != -ENOMEM);
25628+
25629+bottomup:
25630+ /*
25631+ * A failed mmap() very likely causes application failure,
25632+ * so fall back to the bottom-up function here. This scenario
25633+ * can happen with large stack limits and large mmap()
25634+ * allocations.
25635+ */
25636+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
25637+}
25638diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
25639index 30277e2..5664a29 100644
25640--- a/arch/x86/kernel/sys_x86_64.c
25641+++ b/arch/x86/kernel/sys_x86_64.c
25642@@ -81,8 +81,8 @@ out:
25643 return error;
25644 }
25645
25646-static void find_start_end(unsigned long flags, unsigned long *begin,
25647- unsigned long *end)
25648+static void find_start_end(struct mm_struct *mm, unsigned long flags,
25649+ unsigned long *begin, unsigned long *end)
25650 {
25651 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
25652 unsigned long new_begin;
25653@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
25654 *begin = new_begin;
25655 }
25656 } else {
25657- *begin = current->mm->mmap_legacy_base;
25658+ *begin = mm->mmap_legacy_base;
25659 *end = TASK_SIZE;
25660 }
25661 }
25662@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
25663 struct vm_area_struct *vma;
25664 struct vm_unmapped_area_info info;
25665 unsigned long begin, end;
25666+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25667
25668 if (flags & MAP_FIXED)
25669 return addr;
25670
25671- find_start_end(flags, &begin, &end);
25672+ find_start_end(mm, flags, &begin, &end);
25673
25674 if (len > end)
25675 return -ENOMEM;
25676
25677+#ifdef CONFIG_PAX_RANDMMAP
25678+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25679+#endif
25680+
25681 if (addr) {
25682 addr = PAGE_ALIGN(addr);
25683 vma = find_vma(mm, addr);
25684- if (end - len >= addr &&
25685- (!vma || addr + len <= vma->vm_start))
25686+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25687 return addr;
25688 }
25689
25690@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
25691 info.high_limit = end;
25692 info.align_mask = filp ? get_align_mask() : 0;
25693 info.align_offset = pgoff << PAGE_SHIFT;
25694+ info.threadstack_offset = offset;
25695 return vm_unmapped_area(&info);
25696 }
25697
25698@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25699 struct mm_struct *mm = current->mm;
25700 unsigned long addr = addr0;
25701 struct vm_unmapped_area_info info;
25702+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25703
25704 /* requested length too big for entire address space */
25705 if (len > TASK_SIZE)
25706@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25707 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
25708 goto bottomup;
25709
25710+#ifdef CONFIG_PAX_RANDMMAP
25711+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25712+#endif
25713+
25714 /* requesting a specific address */
25715 if (addr) {
25716 addr = PAGE_ALIGN(addr);
25717 vma = find_vma(mm, addr);
25718- if (TASK_SIZE - len >= addr &&
25719- (!vma || addr + len <= vma->vm_start))
25720+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25721 return addr;
25722 }
25723
25724@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25725 info.high_limit = mm->mmap_base;
25726 info.align_mask = filp ? get_align_mask() : 0;
25727 info.align_offset = pgoff << PAGE_SHIFT;
25728+ info.threadstack_offset = offset;
25729 addr = vm_unmapped_area(&info);
25730 if (!(addr & ~PAGE_MASK))
25731 return addr;
25732diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
25733index f84fe00..f41d9f1 100644
25734--- a/arch/x86/kernel/tboot.c
25735+++ b/arch/x86/kernel/tboot.c
25736@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
25737
25738 void tboot_shutdown(u32 shutdown_type)
25739 {
25740- void (*shutdown)(void);
25741+ void (* __noreturn shutdown)(void);
25742
25743 if (!tboot_enabled())
25744 return;
25745@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
25746
25747 switch_to_tboot_pt();
25748
25749- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
25750+ shutdown = (void *)tboot->shutdown_entry;
25751 shutdown();
25752
25753 /* should not reach here */
25754@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
25755 return 0;
25756 }
25757
25758-static atomic_t ap_wfs_count;
25759+static atomic_unchecked_t ap_wfs_count;
25760
25761 static int tboot_wait_for_aps(int num_aps)
25762 {
25763@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
25764 {
25765 switch (action) {
25766 case CPU_DYING:
25767- atomic_inc(&ap_wfs_count);
25768+ atomic_inc_unchecked(&ap_wfs_count);
25769 if (num_online_cpus() == 1)
25770- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
25771+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
25772 return NOTIFY_BAD;
25773 break;
25774 }
25775 return NOTIFY_OK;
25776 }
25777
25778-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
25779+static struct notifier_block tboot_cpu_notifier =
25780 {
25781 .notifier_call = tboot_cpu_callback,
25782 };
25783@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
25784
25785 tboot_create_trampoline();
25786
25787- atomic_set(&ap_wfs_count, 0);
25788+ atomic_set_unchecked(&ap_wfs_count, 0);
25789 register_hotcpu_notifier(&tboot_cpu_notifier);
25790
25791 acpi_os_set_prepare_sleep(&tboot_sleep);
25792diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
25793index 24d3c91..d06b473 100644
25794--- a/arch/x86/kernel/time.c
25795+++ b/arch/x86/kernel/time.c
25796@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
25797 {
25798 unsigned long pc = instruction_pointer(regs);
25799
25800- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
25801+ if (!user_mode(regs) && in_lock_functions(pc)) {
25802 #ifdef CONFIG_FRAME_POINTER
25803- return *(unsigned long *)(regs->bp + sizeof(long));
25804+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
25805 #else
25806 unsigned long *sp =
25807 (unsigned long *)kernel_stack_pointer(regs);
25808@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
25809 * or above a saved flags. Eflags has bits 22-31 zero,
25810 * kernel addresses don't.
25811 */
25812+
25813+#ifdef CONFIG_PAX_KERNEXEC
25814+ return ktla_ktva(sp[0]);
25815+#else
25816 if (sp[0] >> 22)
25817 return sp[0];
25818 if (sp[1] >> 22)
25819 return sp[1];
25820 #endif
25821+
25822+#endif
25823 }
25824 return pc;
25825 }
25826diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
25827index f7fec09..9991981 100644
25828--- a/arch/x86/kernel/tls.c
25829+++ b/arch/x86/kernel/tls.c
25830@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
25831 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
25832 return -EINVAL;
25833
25834+#ifdef CONFIG_PAX_SEGMEXEC
25835+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
25836+ return -EINVAL;
25837+#endif
25838+
25839 set_tls_desc(p, idx, &info, 1);
25840
25841 return 0;
25842@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
25843
25844 if (kbuf)
25845 info = kbuf;
25846- else if (__copy_from_user(infobuf, ubuf, count))
25847+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
25848 return -EFAULT;
25849 else
25850 info = infobuf;
25851diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
25852index 772e2a8..bad5bf6 100644
25853--- a/arch/x86/kernel/traps.c
25854+++ b/arch/x86/kernel/traps.c
25855@@ -68,12 +68,6 @@
25856 #include <asm/setup.h>
25857
25858 asmlinkage int system_call(void);
25859-
25860-/*
25861- * The IDT has to be page-aligned to simplify the Pentium
25862- * F0 0F bug workaround.
25863- */
25864-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
25865 #endif
25866
25867 DECLARE_BITMAP(used_vectors, NR_VECTORS);
25868@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
25869 }
25870
25871 static int __kprobes
25872-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25873+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
25874 struct pt_regs *regs, long error_code)
25875 {
25876 #ifdef CONFIG_X86_32
25877- if (regs->flags & X86_VM_MASK) {
25878+ if (v8086_mode(regs)) {
25879 /*
25880 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
25881 * On nmi (interrupt 2), do_trap should not be called.
25882@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25883 return -1;
25884 }
25885 #endif
25886- if (!user_mode(regs)) {
25887+ if (!user_mode_novm(regs)) {
25888 if (!fixup_exception(regs)) {
25889 tsk->thread.error_code = error_code;
25890 tsk->thread.trap_nr = trapnr;
25891+
25892+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25893+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
25894+ str = "PAX: suspicious stack segment fault";
25895+#endif
25896+
25897 die(str, regs, error_code);
25898 }
25899+
25900+#ifdef CONFIG_PAX_REFCOUNT
25901+ if (trapnr == 4)
25902+ pax_report_refcount_overflow(regs);
25903+#endif
25904+
25905 return 0;
25906 }
25907
25908@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25909 }
25910
25911 static void __kprobes
25912-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25913+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
25914 long error_code, siginfo_t *info)
25915 {
25916 struct task_struct *tsk = current;
25917@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25918 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
25919 printk_ratelimit()) {
25920 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
25921- tsk->comm, tsk->pid, str,
25922+ tsk->comm, task_pid_nr(tsk), str,
25923 regs->ip, regs->sp, error_code);
25924 print_vma_addr(" in ", regs->ip);
25925 pr_cont("\n");
25926@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
25927 conditional_sti(regs);
25928
25929 #ifdef CONFIG_X86_32
25930- if (regs->flags & X86_VM_MASK) {
25931+ if (v8086_mode(regs)) {
25932 local_irq_enable();
25933 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
25934 goto exit;
25935@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
25936 #endif
25937
25938 tsk = current;
25939- if (!user_mode(regs)) {
25940+ if (!user_mode_novm(regs)) {
25941 if (fixup_exception(regs))
25942 goto exit;
25943
25944 tsk->thread.error_code = error_code;
25945 tsk->thread.trap_nr = X86_TRAP_GP;
25946 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
25947- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
25948+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
25949+
25950+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25951+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
25952+ die("PAX: suspicious general protection fault", regs, error_code);
25953+ else
25954+#endif
25955+
25956 die("general protection fault", regs, error_code);
25957+ }
25958 goto exit;
25959 }
25960
25961+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25962+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
25963+ struct mm_struct *mm = tsk->mm;
25964+ unsigned long limit;
25965+
25966+ down_write(&mm->mmap_sem);
25967+ limit = mm->context.user_cs_limit;
25968+ if (limit < TASK_SIZE) {
25969+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
25970+ up_write(&mm->mmap_sem);
25971+ return;
25972+ }
25973+ up_write(&mm->mmap_sem);
25974+ }
25975+#endif
25976+
25977 tsk->thread.error_code = error_code;
25978 tsk->thread.trap_nr = X86_TRAP_GP;
25979
25980@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25981 /* It's safe to allow irq's after DR6 has been saved */
25982 preempt_conditional_sti(regs);
25983
25984- if (regs->flags & X86_VM_MASK) {
25985+ if (v8086_mode(regs)) {
25986 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
25987 X86_TRAP_DB);
25988 preempt_conditional_cli(regs);
25989@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25990 * We already checked v86 mode above, so we can check for kernel mode
25991 * by just checking the CPL of CS.
25992 */
25993- if ((dr6 & DR_STEP) && !user_mode(regs)) {
25994+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
25995 tsk->thread.debugreg6 &= ~DR_STEP;
25996 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
25997 regs->flags &= ~X86_EFLAGS_TF;
25998@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
25999 return;
26000 conditional_sti(regs);
26001
26002- if (!user_mode_vm(regs))
26003+ if (!user_mode(regs))
26004 {
26005 if (!fixup_exception(regs)) {
26006 task->thread.error_code = error_code;
26007diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
26008index 2ed8459..7cf329f 100644
26009--- a/arch/x86/kernel/uprobes.c
26010+++ b/arch/x86/kernel/uprobes.c
26011@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
26012 int ret = NOTIFY_DONE;
26013
26014 /* We are only interested in userspace traps */
26015- if (regs && !user_mode_vm(regs))
26016+ if (regs && !user_mode(regs))
26017 return NOTIFY_DONE;
26018
26019 switch (val) {
26020@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
26021
26022 if (ncopied != rasize) {
26023 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
26024- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
26025+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
26026
26027 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
26028 }
26029diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
26030index b9242ba..50c5edd 100644
26031--- a/arch/x86/kernel/verify_cpu.S
26032+++ b/arch/x86/kernel/verify_cpu.S
26033@@ -20,6 +20,7 @@
26034 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
26035 * arch/x86/kernel/trampoline_64.S: secondary processor verification
26036 * arch/x86/kernel/head_32.S: processor startup
26037+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
26038 *
26039 * verify_cpu, returns the status of longmode and SSE in register %eax.
26040 * 0: Success 1: Failure
26041diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
26042index e8edcf5..27f9344 100644
26043--- a/arch/x86/kernel/vm86_32.c
26044+++ b/arch/x86/kernel/vm86_32.c
26045@@ -44,6 +44,7 @@
26046 #include <linux/ptrace.h>
26047 #include <linux/audit.h>
26048 #include <linux/stddef.h>
26049+#include <linux/grsecurity.h>
26050
26051 #include <asm/uaccess.h>
26052 #include <asm/io.h>
26053@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
26054 do_exit(SIGSEGV);
26055 }
26056
26057- tss = &per_cpu(init_tss, get_cpu());
26058+ tss = init_tss + get_cpu();
26059 current->thread.sp0 = current->thread.saved_sp0;
26060 current->thread.sysenter_cs = __KERNEL_CS;
26061 load_sp0(tss, &current->thread);
26062@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
26063
26064 if (tsk->thread.saved_sp0)
26065 return -EPERM;
26066+
26067+#ifdef CONFIG_GRKERNSEC_VM86
26068+ if (!capable(CAP_SYS_RAWIO)) {
26069+ gr_handle_vm86();
26070+ return -EPERM;
26071+ }
26072+#endif
26073+
26074 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
26075 offsetof(struct kernel_vm86_struct, vm86plus) -
26076 sizeof(info.regs));
26077@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
26078 int tmp;
26079 struct vm86plus_struct __user *v86;
26080
26081+#ifdef CONFIG_GRKERNSEC_VM86
26082+ if (!capable(CAP_SYS_RAWIO)) {
26083+ gr_handle_vm86();
26084+ return -EPERM;
26085+ }
26086+#endif
26087+
26088 tsk = current;
26089 switch (cmd) {
26090 case VM86_REQUEST_IRQ:
26091@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
26092 tsk->thread.saved_fs = info->regs32->fs;
26093 tsk->thread.saved_gs = get_user_gs(info->regs32);
26094
26095- tss = &per_cpu(init_tss, get_cpu());
26096+ tss = init_tss + get_cpu();
26097 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
26098 if (cpu_has_sep)
26099 tsk->thread.sysenter_cs = 0;
26100@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
26101 goto cannot_handle;
26102 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
26103 goto cannot_handle;
26104- intr_ptr = (unsigned long __user *) (i << 2);
26105+ intr_ptr = (__force unsigned long __user *) (i << 2);
26106 if (get_user(segoffs, intr_ptr))
26107 goto cannot_handle;
26108 if ((segoffs >> 16) == BIOSSEG)
26109diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
26110index 10c4f30..57377c2 100644
26111--- a/arch/x86/kernel/vmlinux.lds.S
26112+++ b/arch/x86/kernel/vmlinux.lds.S
26113@@ -26,6 +26,13 @@
26114 #include <asm/page_types.h>
26115 #include <asm/cache.h>
26116 #include <asm/boot.h>
26117+#include <asm/segment.h>
26118+
26119+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26120+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
26121+#else
26122+#define __KERNEL_TEXT_OFFSET 0
26123+#endif
26124
26125 #undef i386 /* in case the preprocessor is a 32bit one */
26126
26127@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
26128
26129 PHDRS {
26130 text PT_LOAD FLAGS(5); /* R_E */
26131+#ifdef CONFIG_X86_32
26132+ module PT_LOAD FLAGS(5); /* R_E */
26133+#endif
26134+#ifdef CONFIG_XEN
26135+ rodata PT_LOAD FLAGS(5); /* R_E */
26136+#else
26137+ rodata PT_LOAD FLAGS(4); /* R__ */
26138+#endif
26139 data PT_LOAD FLAGS(6); /* RW_ */
26140-#ifdef CONFIG_X86_64
26141+ init.begin PT_LOAD FLAGS(6); /* RW_ */
26142 #ifdef CONFIG_SMP
26143 percpu PT_LOAD FLAGS(6); /* RW_ */
26144 #endif
26145+ text.init PT_LOAD FLAGS(5); /* R_E */
26146+ text.exit PT_LOAD FLAGS(5); /* R_E */
26147 init PT_LOAD FLAGS(7); /* RWE */
26148-#endif
26149 note PT_NOTE FLAGS(0); /* ___ */
26150 }
26151
26152 SECTIONS
26153 {
26154 #ifdef CONFIG_X86_32
26155- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
26156- phys_startup_32 = startup_32 - LOAD_OFFSET;
26157+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
26158 #else
26159- . = __START_KERNEL;
26160- phys_startup_64 = startup_64 - LOAD_OFFSET;
26161+ . = __START_KERNEL;
26162 #endif
26163
26164 /* Text and read-only data */
26165- .text : AT(ADDR(.text) - LOAD_OFFSET) {
26166- _text = .;
26167+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26168 /* bootstrapping code */
26169+#ifdef CONFIG_X86_32
26170+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26171+#else
26172+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26173+#endif
26174+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26175+ _text = .;
26176 HEAD_TEXT
26177 . = ALIGN(8);
26178 _stext = .;
26179@@ -104,13 +124,48 @@ SECTIONS
26180 IRQENTRY_TEXT
26181 *(.fixup)
26182 *(.gnu.warning)
26183- /* End of text section */
26184- _etext = .;
26185 } :text = 0x9090
26186
26187- NOTES :text :note
26188+ . += __KERNEL_TEXT_OFFSET;
26189
26190- EXCEPTION_TABLE(16) :text = 0x9090
26191+#ifdef CONFIG_X86_32
26192+ . = ALIGN(PAGE_SIZE);
26193+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
26194+
26195+#ifdef CONFIG_PAX_KERNEXEC
26196+ MODULES_EXEC_VADDR = .;
26197+ BYTE(0)
26198+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
26199+ . = ALIGN(HPAGE_SIZE) - 1;
26200+ MODULES_EXEC_END = .;
26201+#endif
26202+
26203+ } :module
26204+#endif
26205+
26206+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
26207+ /* End of text section */
26208+ BYTE(0)
26209+ _etext = . - __KERNEL_TEXT_OFFSET;
26210+ }
26211+
26212+#ifdef CONFIG_X86_32
26213+ . = ALIGN(PAGE_SIZE);
26214+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
26215+ *(.idt)
26216+ . = ALIGN(PAGE_SIZE);
26217+ *(.empty_zero_page)
26218+ *(.initial_pg_fixmap)
26219+ *(.initial_pg_pmd)
26220+ *(.initial_page_table)
26221+ *(.swapper_pg_dir)
26222+ } :rodata
26223+#endif
26224+
26225+ . = ALIGN(PAGE_SIZE);
26226+ NOTES :rodata :note
26227+
26228+ EXCEPTION_TABLE(16) :rodata
26229
26230 #if defined(CONFIG_DEBUG_RODATA)
26231 /* .text should occupy whole number of pages */
26232@@ -122,16 +177,20 @@ SECTIONS
26233
26234 /* Data */
26235 .data : AT(ADDR(.data) - LOAD_OFFSET) {
26236+
26237+#ifdef CONFIG_PAX_KERNEXEC
26238+ . = ALIGN(HPAGE_SIZE);
26239+#else
26240+ . = ALIGN(PAGE_SIZE);
26241+#endif
26242+
26243 /* Start of data section */
26244 _sdata = .;
26245
26246 /* init_task */
26247 INIT_TASK_DATA(THREAD_SIZE)
26248
26249-#ifdef CONFIG_X86_32
26250- /* 32 bit has nosave before _edata */
26251 NOSAVE_DATA
26252-#endif
26253
26254 PAGE_ALIGNED_DATA(PAGE_SIZE)
26255
26256@@ -172,12 +231,19 @@ SECTIONS
26257 #endif /* CONFIG_X86_64 */
26258
26259 /* Init code and data - will be freed after init */
26260- . = ALIGN(PAGE_SIZE);
26261 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
26262+ BYTE(0)
26263+
26264+#ifdef CONFIG_PAX_KERNEXEC
26265+ . = ALIGN(HPAGE_SIZE);
26266+#else
26267+ . = ALIGN(PAGE_SIZE);
26268+#endif
26269+
26270 __init_begin = .; /* paired with __init_end */
26271- }
26272+ } :init.begin
26273
26274-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
26275+#ifdef CONFIG_SMP
26276 /*
26277 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
26278 * output PHDR, so the next output section - .init.text - should
26279@@ -186,12 +252,27 @@ SECTIONS
26280 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
26281 #endif
26282
26283- INIT_TEXT_SECTION(PAGE_SIZE)
26284-#ifdef CONFIG_X86_64
26285- :init
26286-#endif
26287+ . = ALIGN(PAGE_SIZE);
26288+ init_begin = .;
26289+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
26290+ VMLINUX_SYMBOL(_sinittext) = .;
26291+ INIT_TEXT
26292+ VMLINUX_SYMBOL(_einittext) = .;
26293+ . = ALIGN(PAGE_SIZE);
26294+ } :text.init
26295
26296- INIT_DATA_SECTION(16)
26297+ /*
26298+ * .exit.text is discard at runtime, not link time, to deal with
26299+ * references from .altinstructions and .eh_frame
26300+ */
26301+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26302+ EXIT_TEXT
26303+ . = ALIGN(16);
26304+ } :text.exit
26305+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
26306+
26307+ . = ALIGN(PAGE_SIZE);
26308+ INIT_DATA_SECTION(16) :init
26309
26310 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
26311 __x86_cpu_dev_start = .;
26312@@ -253,19 +334,12 @@ SECTIONS
26313 }
26314
26315 . = ALIGN(8);
26316- /*
26317- * .exit.text is discard at runtime, not link time, to deal with
26318- * references from .altinstructions and .eh_frame
26319- */
26320- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
26321- EXIT_TEXT
26322- }
26323
26324 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
26325 EXIT_DATA
26326 }
26327
26328-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
26329+#ifndef CONFIG_SMP
26330 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
26331 #endif
26332
26333@@ -284,16 +358,10 @@ SECTIONS
26334 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
26335 __smp_locks = .;
26336 *(.smp_locks)
26337- . = ALIGN(PAGE_SIZE);
26338 __smp_locks_end = .;
26339+ . = ALIGN(PAGE_SIZE);
26340 }
26341
26342-#ifdef CONFIG_X86_64
26343- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
26344- NOSAVE_DATA
26345- }
26346-#endif
26347-
26348 /* BSS */
26349 . = ALIGN(PAGE_SIZE);
26350 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
26351@@ -309,6 +377,7 @@ SECTIONS
26352 __brk_base = .;
26353 . += 64 * 1024; /* 64k alignment slop space */
26354 *(.brk_reservation) /* areas brk users have reserved */
26355+ . = ALIGN(HPAGE_SIZE);
26356 __brk_limit = .;
26357 }
26358
26359@@ -335,13 +404,12 @@ SECTIONS
26360 * for the boot processor.
26361 */
26362 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
26363-INIT_PER_CPU(gdt_page);
26364 INIT_PER_CPU(irq_stack_union);
26365
26366 /*
26367 * Build-time check on the image size:
26368 */
26369-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
26370+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
26371 "kernel image bigger than KERNEL_IMAGE_SIZE");
26372
26373 #ifdef CONFIG_SMP
26374diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
26375index 9a907a6..f83f921 100644
26376--- a/arch/x86/kernel/vsyscall_64.c
26377+++ b/arch/x86/kernel/vsyscall_64.c
26378@@ -56,15 +56,13 @@
26379 DEFINE_VVAR(int, vgetcpu_mode);
26380 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
26381
26382-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
26383+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
26384
26385 static int __init vsyscall_setup(char *str)
26386 {
26387 if (str) {
26388 if (!strcmp("emulate", str))
26389 vsyscall_mode = EMULATE;
26390- else if (!strcmp("native", str))
26391- vsyscall_mode = NATIVE;
26392 else if (!strcmp("none", str))
26393 vsyscall_mode = NONE;
26394 else
26395@@ -323,8 +321,7 @@ do_ret:
26396 return true;
26397
26398 sigsegv:
26399- force_sig(SIGSEGV, current);
26400- return true;
26401+ do_group_exit(SIGKILL);
26402 }
26403
26404 /*
26405@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
26406 extern char __vvar_page;
26407 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
26408
26409- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
26410- vsyscall_mode == NATIVE
26411- ? PAGE_KERNEL_VSYSCALL
26412- : PAGE_KERNEL_VVAR);
26413+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
26414 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
26415 (unsigned long)VSYSCALL_START);
26416
26417diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
26418index b014d94..e775258 100644
26419--- a/arch/x86/kernel/x8664_ksyms_64.c
26420+++ b/arch/x86/kernel/x8664_ksyms_64.c
26421@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
26422 EXPORT_SYMBOL(copy_user_generic_unrolled);
26423 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
26424 EXPORT_SYMBOL(__copy_user_nocache);
26425-EXPORT_SYMBOL(_copy_from_user);
26426-EXPORT_SYMBOL(_copy_to_user);
26427
26428 EXPORT_SYMBOL(copy_page);
26429 EXPORT_SYMBOL(clear_page);
26430@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
26431 #ifndef CONFIG_PARAVIRT
26432 EXPORT_SYMBOL(native_load_gs_index);
26433 #endif
26434+
26435+#ifdef CONFIG_PAX_PER_CPU_PGD
26436+EXPORT_SYMBOL(cpu_pgd);
26437+#endif
26438diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
26439index 45a14db..075bb9b 100644
26440--- a/arch/x86/kernel/x86_init.c
26441+++ b/arch/x86/kernel/x86_init.c
26442@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
26443 },
26444 };
26445
26446-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
26447+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
26448 .early_percpu_clock_init = x86_init_noop,
26449 .setup_percpu_clockev = setup_secondary_APIC_clock,
26450 };
26451@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
26452 static void default_nmi_init(void) { };
26453 static int default_i8042_detect(void) { return 1; };
26454
26455-struct x86_platform_ops x86_platform = {
26456+struct x86_platform_ops x86_platform __read_only = {
26457 .calibrate_tsc = native_calibrate_tsc,
26458 .get_wallclock = mach_get_cmos_time,
26459 .set_wallclock = mach_set_rtc_mmss,
26460@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
26461 };
26462
26463 EXPORT_SYMBOL_GPL(x86_platform);
26464-struct x86_msi_ops x86_msi = {
26465+struct x86_msi_ops x86_msi __read_only = {
26466 .setup_msi_irqs = native_setup_msi_irqs,
26467 .compose_msi_msg = native_compose_msi_msg,
26468 .teardown_msi_irq = native_teardown_msi_irq,
26469@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
26470 .setup_hpet_msi = default_setup_hpet_msi,
26471 };
26472
26473-struct x86_io_apic_ops x86_io_apic_ops = {
26474+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
26475 .init = native_io_apic_init_mappings,
26476 .read = native_io_apic_read,
26477 .write = native_io_apic_write,
26478diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
26479index ada87a3..afea76d 100644
26480--- a/arch/x86/kernel/xsave.c
26481+++ b/arch/x86/kernel/xsave.c
26482@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
26483 {
26484 int err;
26485
26486+ buf = (struct xsave_struct __user *)____m(buf);
26487 if (use_xsave())
26488 err = xsave_user(buf);
26489 else if (use_fxsr())
26490@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
26491 */
26492 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
26493 {
26494+ buf = (void __user *)____m(buf);
26495 if (use_xsave()) {
26496 if ((unsigned long)buf % 64 || fx_only) {
26497 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
26498diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
26499index a20ecb5..d0e2194 100644
26500--- a/arch/x86/kvm/cpuid.c
26501+++ b/arch/x86/kvm/cpuid.c
26502@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
26503 struct kvm_cpuid2 *cpuid,
26504 struct kvm_cpuid_entry2 __user *entries)
26505 {
26506- int r;
26507+ int r, i;
26508
26509 r = -E2BIG;
26510 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
26511 goto out;
26512 r = -EFAULT;
26513- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
26514- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
26515+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
26516 goto out;
26517+ for (i = 0; i < cpuid->nent; ++i) {
26518+ struct kvm_cpuid_entry2 cpuid_entry;
26519+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
26520+ goto out;
26521+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
26522+ }
26523 vcpu->arch.cpuid_nent = cpuid->nent;
26524 kvm_apic_set_version(vcpu);
26525 kvm_x86_ops->cpuid_update(vcpu);
26526@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
26527 struct kvm_cpuid2 *cpuid,
26528 struct kvm_cpuid_entry2 __user *entries)
26529 {
26530- int r;
26531+ int r, i;
26532
26533 r = -E2BIG;
26534 if (cpuid->nent < vcpu->arch.cpuid_nent)
26535 goto out;
26536 r = -EFAULT;
26537- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
26538- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
26539+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
26540 goto out;
26541+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
26542+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
26543+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
26544+ goto out;
26545+ }
26546 return 0;
26547
26548 out:
26549diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
26550index 5953dce..f11a7d2 100644
26551--- a/arch/x86/kvm/emulate.c
26552+++ b/arch/x86/kvm/emulate.c
26553@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26554
26555 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
26556 do { \
26557+ unsigned long _tmp; \
26558 __asm__ __volatile__ ( \
26559 _PRE_EFLAGS("0", "4", "2") \
26560 _op _suffix " %"_x"3,%1; " \
26561@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26562 /* Raw emulation: instruction has two explicit operands. */
26563 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
26564 do { \
26565- unsigned long _tmp; \
26566- \
26567 switch ((ctxt)->dst.bytes) { \
26568 case 2: \
26569 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
26570@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26571
26572 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
26573 do { \
26574- unsigned long _tmp; \
26575 switch ((ctxt)->dst.bytes) { \
26576 case 1: \
26577 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
26578diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
26579index 0eee2c8..94a32c3 100644
26580--- a/arch/x86/kvm/lapic.c
26581+++ b/arch/x86/kvm/lapic.c
26582@@ -55,7 +55,7 @@
26583 #define APIC_BUS_CYCLE_NS 1
26584
26585 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
26586-#define apic_debug(fmt, arg...)
26587+#define apic_debug(fmt, arg...) do {} while (0)
26588
26589 #define APIC_LVT_NUM 6
26590 /* 14 is the version for Xeon and Pentium 8.4.8*/
26591diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
26592index da20860..d19fdf5 100644
26593--- a/arch/x86/kvm/paging_tmpl.h
26594+++ b/arch/x86/kvm/paging_tmpl.h
26595@@ -208,7 +208,7 @@ retry_walk:
26596 if (unlikely(kvm_is_error_hva(host_addr)))
26597 goto error;
26598
26599- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
26600+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
26601 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
26602 goto error;
26603 walker->ptep_user[walker->level - 1] = ptep_user;
26604diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
26605index a14a6ea..dc86cf0 100644
26606--- a/arch/x86/kvm/svm.c
26607+++ b/arch/x86/kvm/svm.c
26608@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
26609 int cpu = raw_smp_processor_id();
26610
26611 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
26612+
26613+ pax_open_kernel();
26614 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
26615+ pax_close_kernel();
26616+
26617 load_TR_desc();
26618 }
26619
26620@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
26621 #endif
26622 #endif
26623
26624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26625+ __set_fs(current_thread_info()->addr_limit);
26626+#endif
26627+
26628 reload_tss(vcpu);
26629
26630 local_irq_disable();
26631diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
26632index 5402c94..c3bdeee 100644
26633--- a/arch/x86/kvm/vmx.c
26634+++ b/arch/x86/kvm/vmx.c
26635@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
26636 #endif
26637 }
26638
26639-static void vmcs_clear_bits(unsigned long field, u32 mask)
26640+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
26641 {
26642 vmcs_writel(field, vmcs_readl(field) & ~mask);
26643 }
26644
26645-static void vmcs_set_bits(unsigned long field, u32 mask)
26646+static void vmcs_set_bits(unsigned long field, unsigned long mask)
26647 {
26648 vmcs_writel(field, vmcs_readl(field) | mask);
26649 }
26650@@ -1517,7 +1517,11 @@ static void reload_tss(void)
26651 struct desc_struct *descs;
26652
26653 descs = (void *)gdt->address;
26654+
26655+ pax_open_kernel();
26656 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
26657+ pax_close_kernel();
26658+
26659 load_TR_desc();
26660 }
26661
26662@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
26663 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
26664 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
26665
26666+#ifdef CONFIG_PAX_PER_CPU_PGD
26667+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
26668+#endif
26669+
26670 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
26671 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
26672 vmx->loaded_vmcs->cpu = cpu;
26673@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
26674 if (!cpu_has_vmx_flexpriority())
26675 flexpriority_enabled = 0;
26676
26677- if (!cpu_has_vmx_tpr_shadow())
26678- kvm_x86_ops->update_cr8_intercept = NULL;
26679+ if (!cpu_has_vmx_tpr_shadow()) {
26680+ pax_open_kernel();
26681+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
26682+ pax_close_kernel();
26683+ }
26684
26685 if (enable_ept && !cpu_has_vmx_ept_2m_page())
26686 kvm_disable_largepages();
26687@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
26688 if (!cpu_has_vmx_apicv())
26689 enable_apicv = 0;
26690
26691+ pax_open_kernel();
26692 if (enable_apicv)
26693- kvm_x86_ops->update_cr8_intercept = NULL;
26694+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
26695 else {
26696- kvm_x86_ops->hwapic_irr_update = NULL;
26697- kvm_x86_ops->deliver_posted_interrupt = NULL;
26698- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
26699+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
26700+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
26701+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
26702 }
26703+ pax_close_kernel();
26704
26705 if (nested)
26706 nested_vmx_setup_ctls_msrs();
26707@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
26708
26709 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
26710 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
26711+
26712+#ifndef CONFIG_PAX_PER_CPU_PGD
26713 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
26714+#endif
26715
26716 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
26717 #ifdef CONFIG_X86_64
26718@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
26719 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
26720 vmx->host_idt_base = dt.address;
26721
26722- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
26723+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
26724
26725 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
26726 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
26727@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26728 "jmp 2f \n\t"
26729 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
26730 "2: "
26731+
26732+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26733+ "ljmp %[cs],$3f\n\t"
26734+ "3: "
26735+#endif
26736+
26737 /* Save guest registers, load host registers, keep flags */
26738 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
26739 "pop %0 \n\t"
26740@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26741 #endif
26742 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
26743 [wordsize]"i"(sizeof(ulong))
26744+
26745+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26746+ ,[cs]"i"(__KERNEL_CS)
26747+#endif
26748+
26749 : "cc", "memory"
26750 #ifdef CONFIG_X86_64
26751 , "rax", "rbx", "rdi", "rsi"
26752@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26753 if (debugctlmsr)
26754 update_debugctlmsr(debugctlmsr);
26755
26756-#ifndef CONFIG_X86_64
26757+#ifdef CONFIG_X86_32
26758 /*
26759 * The sysexit path does not restore ds/es, so we must set them to
26760 * a reasonable value ourselves.
26761@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26762 * may be executed in interrupt context, which saves and restore segments
26763 * around it, nullifying its effect.
26764 */
26765- loadsegment(ds, __USER_DS);
26766- loadsegment(es, __USER_DS);
26767+ loadsegment(ds, __KERNEL_DS);
26768+ loadsegment(es, __KERNEL_DS);
26769+ loadsegment(ss, __KERNEL_DS);
26770+
26771+#ifdef CONFIG_PAX_KERNEXEC
26772+ loadsegment(fs, __KERNEL_PERCPU);
26773+#endif
26774+
26775+#ifdef CONFIG_PAX_MEMORY_UDEREF
26776+ __set_fs(current_thread_info()->addr_limit);
26777+#endif
26778+
26779 #endif
26780
26781 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
26782diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
26783index e8ba99c..ee9d7d9 100644
26784--- a/arch/x86/kvm/x86.c
26785+++ b/arch/x86/kvm/x86.c
26786@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
26787 {
26788 struct kvm *kvm = vcpu->kvm;
26789 int lm = is_long_mode(vcpu);
26790- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
26791- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
26792+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
26793+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
26794 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
26795 : kvm->arch.xen_hvm_config.blob_size_32;
26796 u32 page_num = data & ~PAGE_MASK;
26797@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
26798 if (n < msr_list.nmsrs)
26799 goto out;
26800 r = -EFAULT;
26801+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
26802+ goto out;
26803 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
26804 num_msrs_to_save * sizeof(u32)))
26805 goto out;
26806@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
26807 };
26808 #endif
26809
26810-int kvm_arch_init(void *opaque)
26811+int kvm_arch_init(const void *opaque)
26812 {
26813 int r;
26814 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
26815diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
26816index 7114c63..a1018fc 100644
26817--- a/arch/x86/lguest/boot.c
26818+++ b/arch/x86/lguest/boot.c
26819@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
26820 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
26821 * Launcher to reboot us.
26822 */
26823-static void lguest_restart(char *reason)
26824+static __noreturn void lguest_restart(char *reason)
26825 {
26826 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
26827+ BUG();
26828 }
26829
26830 /*G:050
26831diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
26832index 00933d5..3a64af9 100644
26833--- a/arch/x86/lib/atomic64_386_32.S
26834+++ b/arch/x86/lib/atomic64_386_32.S
26835@@ -48,6 +48,10 @@ BEGIN(read)
26836 movl (v), %eax
26837 movl 4(v), %edx
26838 RET_ENDP
26839+BEGIN(read_unchecked)
26840+ movl (v), %eax
26841+ movl 4(v), %edx
26842+RET_ENDP
26843 #undef v
26844
26845 #define v %esi
26846@@ -55,6 +59,10 @@ BEGIN(set)
26847 movl %ebx, (v)
26848 movl %ecx, 4(v)
26849 RET_ENDP
26850+BEGIN(set_unchecked)
26851+ movl %ebx, (v)
26852+ movl %ecx, 4(v)
26853+RET_ENDP
26854 #undef v
26855
26856 #define v %esi
26857@@ -70,6 +78,20 @@ RET_ENDP
26858 BEGIN(add)
26859 addl %eax, (v)
26860 adcl %edx, 4(v)
26861+
26862+#ifdef CONFIG_PAX_REFCOUNT
26863+ jno 0f
26864+ subl %eax, (v)
26865+ sbbl %edx, 4(v)
26866+ int $4
26867+0:
26868+ _ASM_EXTABLE(0b, 0b)
26869+#endif
26870+
26871+RET_ENDP
26872+BEGIN(add_unchecked)
26873+ addl %eax, (v)
26874+ adcl %edx, 4(v)
26875 RET_ENDP
26876 #undef v
26877
26878@@ -77,6 +99,24 @@ RET_ENDP
26879 BEGIN(add_return)
26880 addl (v), %eax
26881 adcl 4(v), %edx
26882+
26883+#ifdef CONFIG_PAX_REFCOUNT
26884+ into
26885+1234:
26886+ _ASM_EXTABLE(1234b, 2f)
26887+#endif
26888+
26889+ movl %eax, (v)
26890+ movl %edx, 4(v)
26891+
26892+#ifdef CONFIG_PAX_REFCOUNT
26893+2:
26894+#endif
26895+
26896+RET_ENDP
26897+BEGIN(add_return_unchecked)
26898+ addl (v), %eax
26899+ adcl 4(v), %edx
26900 movl %eax, (v)
26901 movl %edx, 4(v)
26902 RET_ENDP
26903@@ -86,6 +126,20 @@ RET_ENDP
26904 BEGIN(sub)
26905 subl %eax, (v)
26906 sbbl %edx, 4(v)
26907+
26908+#ifdef CONFIG_PAX_REFCOUNT
26909+ jno 0f
26910+ addl %eax, (v)
26911+ adcl %edx, 4(v)
26912+ int $4
26913+0:
26914+ _ASM_EXTABLE(0b, 0b)
26915+#endif
26916+
26917+RET_ENDP
26918+BEGIN(sub_unchecked)
26919+ subl %eax, (v)
26920+ sbbl %edx, 4(v)
26921 RET_ENDP
26922 #undef v
26923
26924@@ -96,6 +150,27 @@ BEGIN(sub_return)
26925 sbbl $0, %edx
26926 addl (v), %eax
26927 adcl 4(v), %edx
26928+
26929+#ifdef CONFIG_PAX_REFCOUNT
26930+ into
26931+1234:
26932+ _ASM_EXTABLE(1234b, 2f)
26933+#endif
26934+
26935+ movl %eax, (v)
26936+ movl %edx, 4(v)
26937+
26938+#ifdef CONFIG_PAX_REFCOUNT
26939+2:
26940+#endif
26941+
26942+RET_ENDP
26943+BEGIN(sub_return_unchecked)
26944+ negl %edx
26945+ negl %eax
26946+ sbbl $0, %edx
26947+ addl (v), %eax
26948+ adcl 4(v), %edx
26949 movl %eax, (v)
26950 movl %edx, 4(v)
26951 RET_ENDP
26952@@ -105,6 +180,20 @@ RET_ENDP
26953 BEGIN(inc)
26954 addl $1, (v)
26955 adcl $0, 4(v)
26956+
26957+#ifdef CONFIG_PAX_REFCOUNT
26958+ jno 0f
26959+ subl $1, (v)
26960+ sbbl $0, 4(v)
26961+ int $4
26962+0:
26963+ _ASM_EXTABLE(0b, 0b)
26964+#endif
26965+
26966+RET_ENDP
26967+BEGIN(inc_unchecked)
26968+ addl $1, (v)
26969+ adcl $0, 4(v)
26970 RET_ENDP
26971 #undef v
26972
26973@@ -114,6 +203,26 @@ BEGIN(inc_return)
26974 movl 4(v), %edx
26975 addl $1, %eax
26976 adcl $0, %edx
26977+
26978+#ifdef CONFIG_PAX_REFCOUNT
26979+ into
26980+1234:
26981+ _ASM_EXTABLE(1234b, 2f)
26982+#endif
26983+
26984+ movl %eax, (v)
26985+ movl %edx, 4(v)
26986+
26987+#ifdef CONFIG_PAX_REFCOUNT
26988+2:
26989+#endif
26990+
26991+RET_ENDP
26992+BEGIN(inc_return_unchecked)
26993+ movl (v), %eax
26994+ movl 4(v), %edx
26995+ addl $1, %eax
26996+ adcl $0, %edx
26997 movl %eax, (v)
26998 movl %edx, 4(v)
26999 RET_ENDP
27000@@ -123,6 +232,20 @@ RET_ENDP
27001 BEGIN(dec)
27002 subl $1, (v)
27003 sbbl $0, 4(v)
27004+
27005+#ifdef CONFIG_PAX_REFCOUNT
27006+ jno 0f
27007+ addl $1, (v)
27008+ adcl $0, 4(v)
27009+ int $4
27010+0:
27011+ _ASM_EXTABLE(0b, 0b)
27012+#endif
27013+
27014+RET_ENDP
27015+BEGIN(dec_unchecked)
27016+ subl $1, (v)
27017+ sbbl $0, 4(v)
27018 RET_ENDP
27019 #undef v
27020
27021@@ -132,6 +255,26 @@ BEGIN(dec_return)
27022 movl 4(v), %edx
27023 subl $1, %eax
27024 sbbl $0, %edx
27025+
27026+#ifdef CONFIG_PAX_REFCOUNT
27027+ into
27028+1234:
27029+ _ASM_EXTABLE(1234b, 2f)
27030+#endif
27031+
27032+ movl %eax, (v)
27033+ movl %edx, 4(v)
27034+
27035+#ifdef CONFIG_PAX_REFCOUNT
27036+2:
27037+#endif
27038+
27039+RET_ENDP
27040+BEGIN(dec_return_unchecked)
27041+ movl (v), %eax
27042+ movl 4(v), %edx
27043+ subl $1, %eax
27044+ sbbl $0, %edx
27045 movl %eax, (v)
27046 movl %edx, 4(v)
27047 RET_ENDP
27048@@ -143,6 +286,13 @@ BEGIN(add_unless)
27049 adcl %edx, %edi
27050 addl (v), %eax
27051 adcl 4(v), %edx
27052+
27053+#ifdef CONFIG_PAX_REFCOUNT
27054+ into
27055+1234:
27056+ _ASM_EXTABLE(1234b, 2f)
27057+#endif
27058+
27059 cmpl %eax, %ecx
27060 je 3f
27061 1:
27062@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
27063 1:
27064 addl $1, %eax
27065 adcl $0, %edx
27066+
27067+#ifdef CONFIG_PAX_REFCOUNT
27068+ into
27069+1234:
27070+ _ASM_EXTABLE(1234b, 2f)
27071+#endif
27072+
27073 movl %eax, (v)
27074 movl %edx, 4(v)
27075 movl $1, %eax
27076@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
27077 movl 4(v), %edx
27078 subl $1, %eax
27079 sbbl $0, %edx
27080+
27081+#ifdef CONFIG_PAX_REFCOUNT
27082+ into
27083+1234:
27084+ _ASM_EXTABLE(1234b, 1f)
27085+#endif
27086+
27087 js 1f
27088 movl %eax, (v)
27089 movl %edx, 4(v)
27090diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
27091index f5cc9eb..51fa319 100644
27092--- a/arch/x86/lib/atomic64_cx8_32.S
27093+++ b/arch/x86/lib/atomic64_cx8_32.S
27094@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
27095 CFI_STARTPROC
27096
27097 read64 %ecx
27098+ pax_force_retaddr
27099 ret
27100 CFI_ENDPROC
27101 ENDPROC(atomic64_read_cx8)
27102
27103+ENTRY(atomic64_read_unchecked_cx8)
27104+ CFI_STARTPROC
27105+
27106+ read64 %ecx
27107+ pax_force_retaddr
27108+ ret
27109+ CFI_ENDPROC
27110+ENDPROC(atomic64_read_unchecked_cx8)
27111+
27112 ENTRY(atomic64_set_cx8)
27113 CFI_STARTPROC
27114
27115@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
27116 cmpxchg8b (%esi)
27117 jne 1b
27118
27119+ pax_force_retaddr
27120 ret
27121 CFI_ENDPROC
27122 ENDPROC(atomic64_set_cx8)
27123
27124+ENTRY(atomic64_set_unchecked_cx8)
27125+ CFI_STARTPROC
27126+
27127+1:
27128+/* we don't need LOCK_PREFIX since aligned 64-bit writes
27129+ * are atomic on 586 and newer */
27130+ cmpxchg8b (%esi)
27131+ jne 1b
27132+
27133+ pax_force_retaddr
27134+ ret
27135+ CFI_ENDPROC
27136+ENDPROC(atomic64_set_unchecked_cx8)
27137+
27138 ENTRY(atomic64_xchg_cx8)
27139 CFI_STARTPROC
27140
27141@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
27142 cmpxchg8b (%esi)
27143 jne 1b
27144
27145+ pax_force_retaddr
27146 ret
27147 CFI_ENDPROC
27148 ENDPROC(atomic64_xchg_cx8)
27149
27150-.macro addsub_return func ins insc
27151-ENTRY(atomic64_\func\()_return_cx8)
27152+.macro addsub_return func ins insc unchecked=""
27153+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27154 CFI_STARTPROC
27155 SAVE ebp
27156 SAVE ebx
27157@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
27158 movl %edx, %ecx
27159 \ins\()l %esi, %ebx
27160 \insc\()l %edi, %ecx
27161+
27162+.ifb \unchecked
27163+#ifdef CONFIG_PAX_REFCOUNT
27164+ into
27165+2:
27166+ _ASM_EXTABLE(2b, 3f)
27167+#endif
27168+.endif
27169+
27170 LOCK_PREFIX
27171 cmpxchg8b (%ebp)
27172 jne 1b
27173-
27174-10:
27175 movl %ebx, %eax
27176 movl %ecx, %edx
27177+
27178+.ifb \unchecked
27179+#ifdef CONFIG_PAX_REFCOUNT
27180+3:
27181+#endif
27182+.endif
27183+
27184 RESTORE edi
27185 RESTORE esi
27186 RESTORE ebx
27187 RESTORE ebp
27188+ pax_force_retaddr
27189 ret
27190 CFI_ENDPROC
27191-ENDPROC(atomic64_\func\()_return_cx8)
27192+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27193 .endm
27194
27195 addsub_return add add adc
27196 addsub_return sub sub sbb
27197+addsub_return add add adc _unchecked
27198+addsub_return sub sub sbb _unchecked
27199
27200-.macro incdec_return func ins insc
27201-ENTRY(atomic64_\func\()_return_cx8)
27202+.macro incdec_return func ins insc unchecked=""
27203+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27204 CFI_STARTPROC
27205 SAVE ebx
27206
27207@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
27208 movl %edx, %ecx
27209 \ins\()l $1, %ebx
27210 \insc\()l $0, %ecx
27211+
27212+.ifb \unchecked
27213+#ifdef CONFIG_PAX_REFCOUNT
27214+ into
27215+2:
27216+ _ASM_EXTABLE(2b, 3f)
27217+#endif
27218+.endif
27219+
27220 LOCK_PREFIX
27221 cmpxchg8b (%esi)
27222 jne 1b
27223
27224-10:
27225 movl %ebx, %eax
27226 movl %ecx, %edx
27227+
27228+.ifb \unchecked
27229+#ifdef CONFIG_PAX_REFCOUNT
27230+3:
27231+#endif
27232+.endif
27233+
27234 RESTORE ebx
27235+ pax_force_retaddr
27236 ret
27237 CFI_ENDPROC
27238-ENDPROC(atomic64_\func\()_return_cx8)
27239+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27240 .endm
27241
27242 incdec_return inc add adc
27243 incdec_return dec sub sbb
27244+incdec_return inc add adc _unchecked
27245+incdec_return dec sub sbb _unchecked
27246
27247 ENTRY(atomic64_dec_if_positive_cx8)
27248 CFI_STARTPROC
27249@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
27250 movl %edx, %ecx
27251 subl $1, %ebx
27252 sbb $0, %ecx
27253+
27254+#ifdef CONFIG_PAX_REFCOUNT
27255+ into
27256+1234:
27257+ _ASM_EXTABLE(1234b, 2f)
27258+#endif
27259+
27260 js 2f
27261 LOCK_PREFIX
27262 cmpxchg8b (%esi)
27263@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
27264 movl %ebx, %eax
27265 movl %ecx, %edx
27266 RESTORE ebx
27267+ pax_force_retaddr
27268 ret
27269 CFI_ENDPROC
27270 ENDPROC(atomic64_dec_if_positive_cx8)
27271@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
27272 movl %edx, %ecx
27273 addl %ebp, %ebx
27274 adcl %edi, %ecx
27275+
27276+#ifdef CONFIG_PAX_REFCOUNT
27277+ into
27278+1234:
27279+ _ASM_EXTABLE(1234b, 3f)
27280+#endif
27281+
27282 LOCK_PREFIX
27283 cmpxchg8b (%esi)
27284 jne 1b
27285@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
27286 CFI_ADJUST_CFA_OFFSET -8
27287 RESTORE ebx
27288 RESTORE ebp
27289+ pax_force_retaddr
27290 ret
27291 4:
27292 cmpl %edx, 4(%esp)
27293@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
27294 xorl %ecx, %ecx
27295 addl $1, %ebx
27296 adcl %edx, %ecx
27297+
27298+#ifdef CONFIG_PAX_REFCOUNT
27299+ into
27300+1234:
27301+ _ASM_EXTABLE(1234b, 3f)
27302+#endif
27303+
27304 LOCK_PREFIX
27305 cmpxchg8b (%esi)
27306 jne 1b
27307@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
27308 movl $1, %eax
27309 3:
27310 RESTORE ebx
27311+ pax_force_retaddr
27312 ret
27313 CFI_ENDPROC
27314 ENDPROC(atomic64_inc_not_zero_cx8)
27315diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
27316index e78b8ee..7e173a8 100644
27317--- a/arch/x86/lib/checksum_32.S
27318+++ b/arch/x86/lib/checksum_32.S
27319@@ -29,7 +29,8 @@
27320 #include <asm/dwarf2.h>
27321 #include <asm/errno.h>
27322 #include <asm/asm.h>
27323-
27324+#include <asm/segment.h>
27325+
27326 /*
27327 * computes a partial checksum, e.g. for TCP/UDP fragments
27328 */
27329@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
27330
27331 #define ARGBASE 16
27332 #define FP 12
27333-
27334-ENTRY(csum_partial_copy_generic)
27335+
27336+ENTRY(csum_partial_copy_generic_to_user)
27337 CFI_STARTPROC
27338+
27339+#ifdef CONFIG_PAX_MEMORY_UDEREF
27340+ pushl_cfi %gs
27341+ popl_cfi %es
27342+ jmp csum_partial_copy_generic
27343+#endif
27344+
27345+ENTRY(csum_partial_copy_generic_from_user)
27346+
27347+#ifdef CONFIG_PAX_MEMORY_UDEREF
27348+ pushl_cfi %gs
27349+ popl_cfi %ds
27350+#endif
27351+
27352+ENTRY(csum_partial_copy_generic)
27353 subl $4,%esp
27354 CFI_ADJUST_CFA_OFFSET 4
27355 pushl_cfi %edi
27356@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
27357 jmp 4f
27358 SRC(1: movw (%esi), %bx )
27359 addl $2, %esi
27360-DST( movw %bx, (%edi) )
27361+DST( movw %bx, %es:(%edi) )
27362 addl $2, %edi
27363 addw %bx, %ax
27364 adcl $0, %eax
27365@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
27366 SRC(1: movl (%esi), %ebx )
27367 SRC( movl 4(%esi), %edx )
27368 adcl %ebx, %eax
27369-DST( movl %ebx, (%edi) )
27370+DST( movl %ebx, %es:(%edi) )
27371 adcl %edx, %eax
27372-DST( movl %edx, 4(%edi) )
27373+DST( movl %edx, %es:4(%edi) )
27374
27375 SRC( movl 8(%esi), %ebx )
27376 SRC( movl 12(%esi), %edx )
27377 adcl %ebx, %eax
27378-DST( movl %ebx, 8(%edi) )
27379+DST( movl %ebx, %es:8(%edi) )
27380 adcl %edx, %eax
27381-DST( movl %edx, 12(%edi) )
27382+DST( movl %edx, %es:12(%edi) )
27383
27384 SRC( movl 16(%esi), %ebx )
27385 SRC( movl 20(%esi), %edx )
27386 adcl %ebx, %eax
27387-DST( movl %ebx, 16(%edi) )
27388+DST( movl %ebx, %es:16(%edi) )
27389 adcl %edx, %eax
27390-DST( movl %edx, 20(%edi) )
27391+DST( movl %edx, %es:20(%edi) )
27392
27393 SRC( movl 24(%esi), %ebx )
27394 SRC( movl 28(%esi), %edx )
27395 adcl %ebx, %eax
27396-DST( movl %ebx, 24(%edi) )
27397+DST( movl %ebx, %es:24(%edi) )
27398 adcl %edx, %eax
27399-DST( movl %edx, 28(%edi) )
27400+DST( movl %edx, %es:28(%edi) )
27401
27402 lea 32(%esi), %esi
27403 lea 32(%edi), %edi
27404@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
27405 shrl $2, %edx # This clears CF
27406 SRC(3: movl (%esi), %ebx )
27407 adcl %ebx, %eax
27408-DST( movl %ebx, (%edi) )
27409+DST( movl %ebx, %es:(%edi) )
27410 lea 4(%esi), %esi
27411 lea 4(%edi), %edi
27412 dec %edx
27413@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
27414 jb 5f
27415 SRC( movw (%esi), %cx )
27416 leal 2(%esi), %esi
27417-DST( movw %cx, (%edi) )
27418+DST( movw %cx, %es:(%edi) )
27419 leal 2(%edi), %edi
27420 je 6f
27421 shll $16,%ecx
27422 SRC(5: movb (%esi), %cl )
27423-DST( movb %cl, (%edi) )
27424+DST( movb %cl, %es:(%edi) )
27425 6: addl %ecx, %eax
27426 adcl $0, %eax
27427 7:
27428@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
27429
27430 6001:
27431 movl ARGBASE+20(%esp), %ebx # src_err_ptr
27432- movl $-EFAULT, (%ebx)
27433+ movl $-EFAULT, %ss:(%ebx)
27434
27435 # zero the complete destination - computing the rest
27436 # is too much work
27437@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
27438
27439 6002:
27440 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
27441- movl $-EFAULT,(%ebx)
27442+ movl $-EFAULT,%ss:(%ebx)
27443 jmp 5000b
27444
27445 .previous
27446
27447+ pushl_cfi %ss
27448+ popl_cfi %ds
27449+ pushl_cfi %ss
27450+ popl_cfi %es
27451 popl_cfi %ebx
27452 CFI_RESTORE ebx
27453 popl_cfi %esi
27454@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
27455 popl_cfi %ecx # equivalent to addl $4,%esp
27456 ret
27457 CFI_ENDPROC
27458-ENDPROC(csum_partial_copy_generic)
27459+ENDPROC(csum_partial_copy_generic_to_user)
27460
27461 #else
27462
27463 /* Version for PentiumII/PPro */
27464
27465 #define ROUND1(x) \
27466+ nop; nop; nop; \
27467 SRC(movl x(%esi), %ebx ) ; \
27468 addl %ebx, %eax ; \
27469- DST(movl %ebx, x(%edi) ) ;
27470+ DST(movl %ebx, %es:x(%edi)) ;
27471
27472 #define ROUND(x) \
27473+ nop; nop; nop; \
27474 SRC(movl x(%esi), %ebx ) ; \
27475 adcl %ebx, %eax ; \
27476- DST(movl %ebx, x(%edi) ) ;
27477+ DST(movl %ebx, %es:x(%edi)) ;
27478
27479 #define ARGBASE 12
27480-
27481-ENTRY(csum_partial_copy_generic)
27482+
27483+ENTRY(csum_partial_copy_generic_to_user)
27484 CFI_STARTPROC
27485+
27486+#ifdef CONFIG_PAX_MEMORY_UDEREF
27487+ pushl_cfi %gs
27488+ popl_cfi %es
27489+ jmp csum_partial_copy_generic
27490+#endif
27491+
27492+ENTRY(csum_partial_copy_generic_from_user)
27493+
27494+#ifdef CONFIG_PAX_MEMORY_UDEREF
27495+ pushl_cfi %gs
27496+ popl_cfi %ds
27497+#endif
27498+
27499+ENTRY(csum_partial_copy_generic)
27500 pushl_cfi %ebx
27501 CFI_REL_OFFSET ebx, 0
27502 pushl_cfi %edi
27503@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
27504 subl %ebx, %edi
27505 lea -1(%esi),%edx
27506 andl $-32,%edx
27507- lea 3f(%ebx,%ebx), %ebx
27508+ lea 3f(%ebx,%ebx,2), %ebx
27509 testl %esi, %esi
27510 jmp *%ebx
27511 1: addl $64,%esi
27512@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
27513 jb 5f
27514 SRC( movw (%esi), %dx )
27515 leal 2(%esi), %esi
27516-DST( movw %dx, (%edi) )
27517+DST( movw %dx, %es:(%edi) )
27518 leal 2(%edi), %edi
27519 je 6f
27520 shll $16,%edx
27521 5:
27522 SRC( movb (%esi), %dl )
27523-DST( movb %dl, (%edi) )
27524+DST( movb %dl, %es:(%edi) )
27525 6: addl %edx, %eax
27526 adcl $0, %eax
27527 7:
27528 .section .fixup, "ax"
27529 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
27530- movl $-EFAULT, (%ebx)
27531+ movl $-EFAULT, %ss:(%ebx)
27532 # zero the complete destination (computing the rest is too much work)
27533 movl ARGBASE+8(%esp),%edi # dst
27534 movl ARGBASE+12(%esp),%ecx # len
27535@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
27536 rep; stosb
27537 jmp 7b
27538 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
27539- movl $-EFAULT, (%ebx)
27540+ movl $-EFAULT, %ss:(%ebx)
27541 jmp 7b
27542 .previous
27543
27544+#ifdef CONFIG_PAX_MEMORY_UDEREF
27545+ pushl_cfi %ss
27546+ popl_cfi %ds
27547+ pushl_cfi %ss
27548+ popl_cfi %es
27549+#endif
27550+
27551 popl_cfi %esi
27552 CFI_RESTORE esi
27553 popl_cfi %edi
27554@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
27555 CFI_RESTORE ebx
27556 ret
27557 CFI_ENDPROC
27558-ENDPROC(csum_partial_copy_generic)
27559+ENDPROC(csum_partial_copy_generic_to_user)
27560
27561 #undef ROUND
27562 #undef ROUND1
27563diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
27564index f2145cf..cea889d 100644
27565--- a/arch/x86/lib/clear_page_64.S
27566+++ b/arch/x86/lib/clear_page_64.S
27567@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
27568 movl $4096/8,%ecx
27569 xorl %eax,%eax
27570 rep stosq
27571+ pax_force_retaddr
27572 ret
27573 CFI_ENDPROC
27574 ENDPROC(clear_page_c)
27575@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
27576 movl $4096,%ecx
27577 xorl %eax,%eax
27578 rep stosb
27579+ pax_force_retaddr
27580 ret
27581 CFI_ENDPROC
27582 ENDPROC(clear_page_c_e)
27583@@ -43,6 +45,7 @@ ENTRY(clear_page)
27584 leaq 64(%rdi),%rdi
27585 jnz .Lloop
27586 nop
27587+ pax_force_retaddr
27588 ret
27589 CFI_ENDPROC
27590 .Lclear_page_end:
27591@@ -58,7 +61,7 @@ ENDPROC(clear_page)
27592
27593 #include <asm/cpufeature.h>
27594
27595- .section .altinstr_replacement,"ax"
27596+ .section .altinstr_replacement,"a"
27597 1: .byte 0xeb /* jmp <disp8> */
27598 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
27599 2: .byte 0xeb /* jmp <disp8> */
27600diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
27601index 1e572c5..2a162cd 100644
27602--- a/arch/x86/lib/cmpxchg16b_emu.S
27603+++ b/arch/x86/lib/cmpxchg16b_emu.S
27604@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
27605
27606 popf
27607 mov $1, %al
27608+ pax_force_retaddr
27609 ret
27610
27611 not_same:
27612 popf
27613 xor %al,%al
27614+ pax_force_retaddr
27615 ret
27616
27617 CFI_ENDPROC
27618diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
27619index 176cca6..1166c50 100644
27620--- a/arch/x86/lib/copy_page_64.S
27621+++ b/arch/x86/lib/copy_page_64.S
27622@@ -9,6 +9,7 @@ copy_page_rep:
27623 CFI_STARTPROC
27624 movl $4096/8, %ecx
27625 rep movsq
27626+ pax_force_retaddr
27627 ret
27628 CFI_ENDPROC
27629 ENDPROC(copy_page_rep)
27630@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
27631
27632 ENTRY(copy_page)
27633 CFI_STARTPROC
27634- subq $2*8, %rsp
27635- CFI_ADJUST_CFA_OFFSET 2*8
27636+ subq $3*8, %rsp
27637+ CFI_ADJUST_CFA_OFFSET 3*8
27638 movq %rbx, (%rsp)
27639 CFI_REL_OFFSET rbx, 0
27640 movq %r12, 1*8(%rsp)
27641 CFI_REL_OFFSET r12, 1*8
27642+ movq %r13, 2*8(%rsp)
27643+ CFI_REL_OFFSET r13, 2*8
27644
27645 movl $(4096/64)-5, %ecx
27646 .p2align 4
27647@@ -36,7 +39,7 @@ ENTRY(copy_page)
27648 movq 0x8*2(%rsi), %rdx
27649 movq 0x8*3(%rsi), %r8
27650 movq 0x8*4(%rsi), %r9
27651- movq 0x8*5(%rsi), %r10
27652+ movq 0x8*5(%rsi), %r13
27653 movq 0x8*6(%rsi), %r11
27654 movq 0x8*7(%rsi), %r12
27655
27656@@ -47,7 +50,7 @@ ENTRY(copy_page)
27657 movq %rdx, 0x8*2(%rdi)
27658 movq %r8, 0x8*3(%rdi)
27659 movq %r9, 0x8*4(%rdi)
27660- movq %r10, 0x8*5(%rdi)
27661+ movq %r13, 0x8*5(%rdi)
27662 movq %r11, 0x8*6(%rdi)
27663 movq %r12, 0x8*7(%rdi)
27664
27665@@ -66,7 +69,7 @@ ENTRY(copy_page)
27666 movq 0x8*2(%rsi), %rdx
27667 movq 0x8*3(%rsi), %r8
27668 movq 0x8*4(%rsi), %r9
27669- movq 0x8*5(%rsi), %r10
27670+ movq 0x8*5(%rsi), %r13
27671 movq 0x8*6(%rsi), %r11
27672 movq 0x8*7(%rsi), %r12
27673
27674@@ -75,7 +78,7 @@ ENTRY(copy_page)
27675 movq %rdx, 0x8*2(%rdi)
27676 movq %r8, 0x8*3(%rdi)
27677 movq %r9, 0x8*4(%rdi)
27678- movq %r10, 0x8*5(%rdi)
27679+ movq %r13, 0x8*5(%rdi)
27680 movq %r11, 0x8*6(%rdi)
27681 movq %r12, 0x8*7(%rdi)
27682
27683@@ -87,8 +90,11 @@ ENTRY(copy_page)
27684 CFI_RESTORE rbx
27685 movq 1*8(%rsp), %r12
27686 CFI_RESTORE r12
27687- addq $2*8, %rsp
27688- CFI_ADJUST_CFA_OFFSET -2*8
27689+ movq 2*8(%rsp), %r13
27690+ CFI_RESTORE r13
27691+ addq $3*8, %rsp
27692+ CFI_ADJUST_CFA_OFFSET -3*8
27693+ pax_force_retaddr
27694 ret
27695 .Lcopy_page_end:
27696 CFI_ENDPROC
27697@@ -99,7 +105,7 @@ ENDPROC(copy_page)
27698
27699 #include <asm/cpufeature.h>
27700
27701- .section .altinstr_replacement,"ax"
27702+ .section .altinstr_replacement,"a"
27703 1: .byte 0xeb /* jmp <disp8> */
27704 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
27705 2:
27706diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
27707index a30ca15..6b3f4e1 100644
27708--- a/arch/x86/lib/copy_user_64.S
27709+++ b/arch/x86/lib/copy_user_64.S
27710@@ -18,31 +18,7 @@
27711 #include <asm/alternative-asm.h>
27712 #include <asm/asm.h>
27713 #include <asm/smap.h>
27714-
27715-/*
27716- * By placing feature2 after feature1 in altinstructions section, we logically
27717- * implement:
27718- * If CPU has feature2, jmp to alt2 is used
27719- * else if CPU has feature1, jmp to alt1 is used
27720- * else jmp to orig is used.
27721- */
27722- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
27723-0:
27724- .byte 0xe9 /* 32bit jump */
27725- .long \orig-1f /* by default jump to orig */
27726-1:
27727- .section .altinstr_replacement,"ax"
27728-2: .byte 0xe9 /* near jump with 32bit immediate */
27729- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
27730-3: .byte 0xe9 /* near jump with 32bit immediate */
27731- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
27732- .previous
27733-
27734- .section .altinstructions,"a"
27735- altinstruction_entry 0b,2b,\feature1,5,5
27736- altinstruction_entry 0b,3b,\feature2,5,5
27737- .previous
27738- .endm
27739+#include <asm/pgtable.h>
27740
27741 .macro ALIGN_DESTINATION
27742 #ifdef FIX_ALIGNMENT
27743@@ -70,52 +46,6 @@
27744 #endif
27745 .endm
27746
27747-/* Standard copy_to_user with segment limit checking */
27748-ENTRY(_copy_to_user)
27749- CFI_STARTPROC
27750- GET_THREAD_INFO(%rax)
27751- movq %rdi,%rcx
27752- addq %rdx,%rcx
27753- jc bad_to_user
27754- cmpq TI_addr_limit(%rax),%rcx
27755- ja bad_to_user
27756- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
27757- copy_user_generic_unrolled,copy_user_generic_string, \
27758- copy_user_enhanced_fast_string
27759- CFI_ENDPROC
27760-ENDPROC(_copy_to_user)
27761-
27762-/* Standard copy_from_user with segment limit checking */
27763-ENTRY(_copy_from_user)
27764- CFI_STARTPROC
27765- GET_THREAD_INFO(%rax)
27766- movq %rsi,%rcx
27767- addq %rdx,%rcx
27768- jc bad_from_user
27769- cmpq TI_addr_limit(%rax),%rcx
27770- ja bad_from_user
27771- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
27772- copy_user_generic_unrolled,copy_user_generic_string, \
27773- copy_user_enhanced_fast_string
27774- CFI_ENDPROC
27775-ENDPROC(_copy_from_user)
27776-
27777- .section .fixup,"ax"
27778- /* must zero dest */
27779-ENTRY(bad_from_user)
27780-bad_from_user:
27781- CFI_STARTPROC
27782- movl %edx,%ecx
27783- xorl %eax,%eax
27784- rep
27785- stosb
27786-bad_to_user:
27787- movl %edx,%eax
27788- ret
27789- CFI_ENDPROC
27790-ENDPROC(bad_from_user)
27791- .previous
27792-
27793 /*
27794 * copy_user_generic_unrolled - memory copy with exception handling.
27795 * This version is for CPUs like P4 that don't have efficient micro
27796@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
27797 */
27798 ENTRY(copy_user_generic_unrolled)
27799 CFI_STARTPROC
27800+ ASM_PAX_OPEN_USERLAND
27801 ASM_STAC
27802 cmpl $8,%edx
27803 jb 20f /* less then 8 bytes, go to byte copy loop */
27804@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
27805 jz 17f
27806 1: movq (%rsi),%r8
27807 2: movq 1*8(%rsi),%r9
27808-3: movq 2*8(%rsi),%r10
27809+3: movq 2*8(%rsi),%rax
27810 4: movq 3*8(%rsi),%r11
27811 5: movq %r8,(%rdi)
27812 6: movq %r9,1*8(%rdi)
27813-7: movq %r10,2*8(%rdi)
27814+7: movq %rax,2*8(%rdi)
27815 8: movq %r11,3*8(%rdi)
27816 9: movq 4*8(%rsi),%r8
27817 10: movq 5*8(%rsi),%r9
27818-11: movq 6*8(%rsi),%r10
27819+11: movq 6*8(%rsi),%rax
27820 12: movq 7*8(%rsi),%r11
27821 13: movq %r8,4*8(%rdi)
27822 14: movq %r9,5*8(%rdi)
27823-15: movq %r10,6*8(%rdi)
27824+15: movq %rax,6*8(%rdi)
27825 16: movq %r11,7*8(%rdi)
27826 leaq 64(%rsi),%rsi
27827 leaq 64(%rdi),%rdi
27828@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
27829 jnz 21b
27830 23: xor %eax,%eax
27831 ASM_CLAC
27832+ ASM_PAX_CLOSE_USERLAND
27833+ pax_force_retaddr
27834 ret
27835
27836 .section .fixup,"ax"
27837@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
27838 */
27839 ENTRY(copy_user_generic_string)
27840 CFI_STARTPROC
27841+ ASM_PAX_OPEN_USERLAND
27842 ASM_STAC
27843 andl %edx,%edx
27844 jz 4f
27845@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
27846 movsb
27847 4: xorl %eax,%eax
27848 ASM_CLAC
27849+ ASM_PAX_CLOSE_USERLAND
27850+ pax_force_retaddr
27851 ret
27852
27853 .section .fixup,"ax"
27854@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
27855 */
27856 ENTRY(copy_user_enhanced_fast_string)
27857 CFI_STARTPROC
27858+ ASM_PAX_OPEN_USERLAND
27859 ASM_STAC
27860 andl %edx,%edx
27861 jz 2f
27862@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
27863 movsb
27864 2: xorl %eax,%eax
27865 ASM_CLAC
27866+ ASM_PAX_CLOSE_USERLAND
27867+ pax_force_retaddr
27868 ret
27869
27870 .section .fixup,"ax"
27871diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
27872index 6a4f43c..55d26f2 100644
27873--- a/arch/x86/lib/copy_user_nocache_64.S
27874+++ b/arch/x86/lib/copy_user_nocache_64.S
27875@@ -8,6 +8,7 @@
27876
27877 #include <linux/linkage.h>
27878 #include <asm/dwarf2.h>
27879+#include <asm/alternative-asm.h>
27880
27881 #define FIX_ALIGNMENT 1
27882
27883@@ -16,6 +17,7 @@
27884 #include <asm/thread_info.h>
27885 #include <asm/asm.h>
27886 #include <asm/smap.h>
27887+#include <asm/pgtable.h>
27888
27889 .macro ALIGN_DESTINATION
27890 #ifdef FIX_ALIGNMENT
27891@@ -49,6 +51,16 @@
27892 */
27893 ENTRY(__copy_user_nocache)
27894 CFI_STARTPROC
27895+
27896+#ifdef CONFIG_PAX_MEMORY_UDEREF
27897+ mov pax_user_shadow_base,%rcx
27898+ cmp %rcx,%rsi
27899+ jae 1f
27900+ add %rcx,%rsi
27901+1:
27902+#endif
27903+
27904+ ASM_PAX_OPEN_USERLAND
27905 ASM_STAC
27906 cmpl $8,%edx
27907 jb 20f /* less then 8 bytes, go to byte copy loop */
27908@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
27909 jz 17f
27910 1: movq (%rsi),%r8
27911 2: movq 1*8(%rsi),%r9
27912-3: movq 2*8(%rsi),%r10
27913+3: movq 2*8(%rsi),%rax
27914 4: movq 3*8(%rsi),%r11
27915 5: movnti %r8,(%rdi)
27916 6: movnti %r9,1*8(%rdi)
27917-7: movnti %r10,2*8(%rdi)
27918+7: movnti %rax,2*8(%rdi)
27919 8: movnti %r11,3*8(%rdi)
27920 9: movq 4*8(%rsi),%r8
27921 10: movq 5*8(%rsi),%r9
27922-11: movq 6*8(%rsi),%r10
27923+11: movq 6*8(%rsi),%rax
27924 12: movq 7*8(%rsi),%r11
27925 13: movnti %r8,4*8(%rdi)
27926 14: movnti %r9,5*8(%rdi)
27927-15: movnti %r10,6*8(%rdi)
27928+15: movnti %rax,6*8(%rdi)
27929 16: movnti %r11,7*8(%rdi)
27930 leaq 64(%rsi),%rsi
27931 leaq 64(%rdi),%rdi
27932@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
27933 jnz 21b
27934 23: xorl %eax,%eax
27935 ASM_CLAC
27936+ ASM_PAX_CLOSE_USERLAND
27937 sfence
27938+ pax_force_retaddr
27939 ret
27940
27941 .section .fixup,"ax"
27942diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
27943index 2419d5f..953ee51 100644
27944--- a/arch/x86/lib/csum-copy_64.S
27945+++ b/arch/x86/lib/csum-copy_64.S
27946@@ -9,6 +9,7 @@
27947 #include <asm/dwarf2.h>
27948 #include <asm/errno.h>
27949 #include <asm/asm.h>
27950+#include <asm/alternative-asm.h>
27951
27952 /*
27953 * Checksum copy with exception handling.
27954@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
27955 CFI_RESTORE rbp
27956 addq $7*8, %rsp
27957 CFI_ADJUST_CFA_OFFSET -7*8
27958+ pax_force_retaddr 0, 1
27959 ret
27960 CFI_RESTORE_STATE
27961
27962diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
27963index 25b7ae8..c40113e 100644
27964--- a/arch/x86/lib/csum-wrappers_64.c
27965+++ b/arch/x86/lib/csum-wrappers_64.c
27966@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
27967 len -= 2;
27968 }
27969 }
27970- isum = csum_partial_copy_generic((__force const void *)src,
27971+ pax_open_userland();
27972+ stac();
27973+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
27974 dst, len, isum, errp, NULL);
27975+ clac();
27976+ pax_close_userland();
27977 if (unlikely(*errp))
27978 goto out_err;
27979
27980@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
27981 }
27982
27983 *errp = 0;
27984- return csum_partial_copy_generic(src, (void __force *)dst,
27985+ pax_open_userland();
27986+ stac();
27987+ isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
27988 len, isum, NULL, errp);
27989+ clac();
27990+ pax_close_userland();
27991+ return isum;
27992 }
27993 EXPORT_SYMBOL(csum_partial_copy_to_user);
27994
27995diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
27996index a451235..1daa956 100644
27997--- a/arch/x86/lib/getuser.S
27998+++ b/arch/x86/lib/getuser.S
27999@@ -33,17 +33,40 @@
28000 #include <asm/thread_info.h>
28001 #include <asm/asm.h>
28002 #include <asm/smap.h>
28003+#include <asm/segment.h>
28004+#include <asm/pgtable.h>
28005+#include <asm/alternative-asm.h>
28006+
28007+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28008+#define __copyuser_seg gs;
28009+#else
28010+#define __copyuser_seg
28011+#endif
28012
28013 .text
28014 ENTRY(__get_user_1)
28015 CFI_STARTPROC
28016+
28017+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28018 GET_THREAD_INFO(%_ASM_DX)
28019 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28020 jae bad_get_user
28021 ASM_STAC
28022-1: movzbl (%_ASM_AX),%edx
28023+
28024+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28025+ mov pax_user_shadow_base,%_ASM_DX
28026+ cmp %_ASM_DX,%_ASM_AX
28027+ jae 1234f
28028+ add %_ASM_DX,%_ASM_AX
28029+1234:
28030+#endif
28031+
28032+#endif
28033+
28034+1: __copyuser_seg movzbl (%_ASM_AX),%edx
28035 xor %eax,%eax
28036 ASM_CLAC
28037+ pax_force_retaddr
28038 ret
28039 CFI_ENDPROC
28040 ENDPROC(__get_user_1)
28041@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
28042 ENTRY(__get_user_2)
28043 CFI_STARTPROC
28044 add $1,%_ASM_AX
28045+
28046+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28047 jc bad_get_user
28048 GET_THREAD_INFO(%_ASM_DX)
28049 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28050 jae bad_get_user
28051 ASM_STAC
28052-2: movzwl -1(%_ASM_AX),%edx
28053+
28054+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28055+ mov pax_user_shadow_base,%_ASM_DX
28056+ cmp %_ASM_DX,%_ASM_AX
28057+ jae 1234f
28058+ add %_ASM_DX,%_ASM_AX
28059+1234:
28060+#endif
28061+
28062+#endif
28063+
28064+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
28065 xor %eax,%eax
28066 ASM_CLAC
28067+ pax_force_retaddr
28068 ret
28069 CFI_ENDPROC
28070 ENDPROC(__get_user_2)
28071@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
28072 ENTRY(__get_user_4)
28073 CFI_STARTPROC
28074 add $3,%_ASM_AX
28075+
28076+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28077 jc bad_get_user
28078 GET_THREAD_INFO(%_ASM_DX)
28079 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28080 jae bad_get_user
28081 ASM_STAC
28082-3: movl -3(%_ASM_AX),%edx
28083+
28084+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28085+ mov pax_user_shadow_base,%_ASM_DX
28086+ cmp %_ASM_DX,%_ASM_AX
28087+ jae 1234f
28088+ add %_ASM_DX,%_ASM_AX
28089+1234:
28090+#endif
28091+
28092+#endif
28093+
28094+3: __copyuser_seg movl -3(%_ASM_AX),%edx
28095 xor %eax,%eax
28096 ASM_CLAC
28097+ pax_force_retaddr
28098 ret
28099 CFI_ENDPROC
28100 ENDPROC(__get_user_4)
28101@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
28102 GET_THREAD_INFO(%_ASM_DX)
28103 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28104 jae bad_get_user
28105+
28106+#ifdef CONFIG_PAX_MEMORY_UDEREF
28107+ mov pax_user_shadow_base,%_ASM_DX
28108+ cmp %_ASM_DX,%_ASM_AX
28109+ jae 1234f
28110+ add %_ASM_DX,%_ASM_AX
28111+1234:
28112+#endif
28113+
28114 ASM_STAC
28115 4: movq -7(%_ASM_AX),%rdx
28116 xor %eax,%eax
28117 ASM_CLAC
28118+ pax_force_retaddr
28119 ret
28120 #else
28121 add $7,%_ASM_AX
28122@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
28123 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28124 jae bad_get_user_8
28125 ASM_STAC
28126-4: movl -7(%_ASM_AX),%edx
28127-5: movl -3(%_ASM_AX),%ecx
28128+4: __copyuser_seg movl -7(%_ASM_AX),%edx
28129+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
28130 xor %eax,%eax
28131 ASM_CLAC
28132+ pax_force_retaddr
28133 ret
28134 #endif
28135 CFI_ENDPROC
28136@@ -113,6 +175,7 @@ bad_get_user:
28137 xor %edx,%edx
28138 mov $(-EFAULT),%_ASM_AX
28139 ASM_CLAC
28140+ pax_force_retaddr
28141 ret
28142 CFI_ENDPROC
28143 END(bad_get_user)
28144@@ -124,6 +187,7 @@ bad_get_user_8:
28145 xor %ecx,%ecx
28146 mov $(-EFAULT),%_ASM_AX
28147 ASM_CLAC
28148+ pax_force_retaddr
28149 ret
28150 CFI_ENDPROC
28151 END(bad_get_user_8)
28152diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
28153index 54fcffe..7be149e 100644
28154--- a/arch/x86/lib/insn.c
28155+++ b/arch/x86/lib/insn.c
28156@@ -20,8 +20,10 @@
28157
28158 #ifdef __KERNEL__
28159 #include <linux/string.h>
28160+#include <asm/pgtable_types.h>
28161 #else
28162 #include <string.h>
28163+#define ktla_ktva(addr) addr
28164 #endif
28165 #include <asm/inat.h>
28166 #include <asm/insn.h>
28167@@ -53,8 +55,8 @@
28168 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
28169 {
28170 memset(insn, 0, sizeof(*insn));
28171- insn->kaddr = kaddr;
28172- insn->next_byte = kaddr;
28173+ insn->kaddr = ktla_ktva(kaddr);
28174+ insn->next_byte = ktla_ktva(kaddr);
28175 insn->x86_64 = x86_64 ? 1 : 0;
28176 insn->opnd_bytes = 4;
28177 if (x86_64)
28178diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
28179index 05a95e7..326f2fa 100644
28180--- a/arch/x86/lib/iomap_copy_64.S
28181+++ b/arch/x86/lib/iomap_copy_64.S
28182@@ -17,6 +17,7 @@
28183
28184 #include <linux/linkage.h>
28185 #include <asm/dwarf2.h>
28186+#include <asm/alternative-asm.h>
28187
28188 /*
28189 * override generic version in lib/iomap_copy.c
28190@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
28191 CFI_STARTPROC
28192 movl %edx,%ecx
28193 rep movsd
28194+ pax_force_retaddr
28195 ret
28196 CFI_ENDPROC
28197 ENDPROC(__iowrite32_copy)
28198diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
28199index 56313a3..9b59269 100644
28200--- a/arch/x86/lib/memcpy_64.S
28201+++ b/arch/x86/lib/memcpy_64.S
28202@@ -24,7 +24,7 @@
28203 * This gets patched over the unrolled variant (below) via the
28204 * alternative instructions framework:
28205 */
28206- .section .altinstr_replacement, "ax", @progbits
28207+ .section .altinstr_replacement, "a", @progbits
28208 .Lmemcpy_c:
28209 movq %rdi, %rax
28210 movq %rdx, %rcx
28211@@ -33,6 +33,7 @@
28212 rep movsq
28213 movl %edx, %ecx
28214 rep movsb
28215+ pax_force_retaddr
28216 ret
28217 .Lmemcpy_e:
28218 .previous
28219@@ -44,11 +45,12 @@
28220 * This gets patched over the unrolled variant (below) via the
28221 * alternative instructions framework:
28222 */
28223- .section .altinstr_replacement, "ax", @progbits
28224+ .section .altinstr_replacement, "a", @progbits
28225 .Lmemcpy_c_e:
28226 movq %rdi, %rax
28227 movq %rdx, %rcx
28228 rep movsb
28229+ pax_force_retaddr
28230 ret
28231 .Lmemcpy_e_e:
28232 .previous
28233@@ -76,13 +78,13 @@ ENTRY(memcpy)
28234 */
28235 movq 0*8(%rsi), %r8
28236 movq 1*8(%rsi), %r9
28237- movq 2*8(%rsi), %r10
28238+ movq 2*8(%rsi), %rcx
28239 movq 3*8(%rsi), %r11
28240 leaq 4*8(%rsi), %rsi
28241
28242 movq %r8, 0*8(%rdi)
28243 movq %r9, 1*8(%rdi)
28244- movq %r10, 2*8(%rdi)
28245+ movq %rcx, 2*8(%rdi)
28246 movq %r11, 3*8(%rdi)
28247 leaq 4*8(%rdi), %rdi
28248 jae .Lcopy_forward_loop
28249@@ -105,12 +107,12 @@ ENTRY(memcpy)
28250 subq $0x20, %rdx
28251 movq -1*8(%rsi), %r8
28252 movq -2*8(%rsi), %r9
28253- movq -3*8(%rsi), %r10
28254+ movq -3*8(%rsi), %rcx
28255 movq -4*8(%rsi), %r11
28256 leaq -4*8(%rsi), %rsi
28257 movq %r8, -1*8(%rdi)
28258 movq %r9, -2*8(%rdi)
28259- movq %r10, -3*8(%rdi)
28260+ movq %rcx, -3*8(%rdi)
28261 movq %r11, -4*8(%rdi)
28262 leaq -4*8(%rdi), %rdi
28263 jae .Lcopy_backward_loop
28264@@ -130,12 +132,13 @@ ENTRY(memcpy)
28265 */
28266 movq 0*8(%rsi), %r8
28267 movq 1*8(%rsi), %r9
28268- movq -2*8(%rsi, %rdx), %r10
28269+ movq -2*8(%rsi, %rdx), %rcx
28270 movq -1*8(%rsi, %rdx), %r11
28271 movq %r8, 0*8(%rdi)
28272 movq %r9, 1*8(%rdi)
28273- movq %r10, -2*8(%rdi, %rdx)
28274+ movq %rcx, -2*8(%rdi, %rdx)
28275 movq %r11, -1*8(%rdi, %rdx)
28276+ pax_force_retaddr
28277 retq
28278 .p2align 4
28279 .Lless_16bytes:
28280@@ -148,6 +151,7 @@ ENTRY(memcpy)
28281 movq -1*8(%rsi, %rdx), %r9
28282 movq %r8, 0*8(%rdi)
28283 movq %r9, -1*8(%rdi, %rdx)
28284+ pax_force_retaddr
28285 retq
28286 .p2align 4
28287 .Lless_8bytes:
28288@@ -161,6 +165,7 @@ ENTRY(memcpy)
28289 movl -4(%rsi, %rdx), %r8d
28290 movl %ecx, (%rdi)
28291 movl %r8d, -4(%rdi, %rdx)
28292+ pax_force_retaddr
28293 retq
28294 .p2align 4
28295 .Lless_3bytes:
28296@@ -179,6 +184,7 @@ ENTRY(memcpy)
28297 movb %cl, (%rdi)
28298
28299 .Lend:
28300+ pax_force_retaddr
28301 retq
28302 CFI_ENDPROC
28303 ENDPROC(memcpy)
28304diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
28305index 65268a6..5aa7815 100644
28306--- a/arch/x86/lib/memmove_64.S
28307+++ b/arch/x86/lib/memmove_64.S
28308@@ -61,13 +61,13 @@ ENTRY(memmove)
28309 5:
28310 sub $0x20, %rdx
28311 movq 0*8(%rsi), %r11
28312- movq 1*8(%rsi), %r10
28313+ movq 1*8(%rsi), %rcx
28314 movq 2*8(%rsi), %r9
28315 movq 3*8(%rsi), %r8
28316 leaq 4*8(%rsi), %rsi
28317
28318 movq %r11, 0*8(%rdi)
28319- movq %r10, 1*8(%rdi)
28320+ movq %rcx, 1*8(%rdi)
28321 movq %r9, 2*8(%rdi)
28322 movq %r8, 3*8(%rdi)
28323 leaq 4*8(%rdi), %rdi
28324@@ -81,10 +81,10 @@ ENTRY(memmove)
28325 4:
28326 movq %rdx, %rcx
28327 movq -8(%rsi, %rdx), %r11
28328- lea -8(%rdi, %rdx), %r10
28329+ lea -8(%rdi, %rdx), %r9
28330 shrq $3, %rcx
28331 rep movsq
28332- movq %r11, (%r10)
28333+ movq %r11, (%r9)
28334 jmp 13f
28335 .Lmemmove_end_forward:
28336
28337@@ -95,14 +95,14 @@ ENTRY(memmove)
28338 7:
28339 movq %rdx, %rcx
28340 movq (%rsi), %r11
28341- movq %rdi, %r10
28342+ movq %rdi, %r9
28343 leaq -8(%rsi, %rdx), %rsi
28344 leaq -8(%rdi, %rdx), %rdi
28345 shrq $3, %rcx
28346 std
28347 rep movsq
28348 cld
28349- movq %r11, (%r10)
28350+ movq %r11, (%r9)
28351 jmp 13f
28352
28353 /*
28354@@ -127,13 +127,13 @@ ENTRY(memmove)
28355 8:
28356 subq $0x20, %rdx
28357 movq -1*8(%rsi), %r11
28358- movq -2*8(%rsi), %r10
28359+ movq -2*8(%rsi), %rcx
28360 movq -3*8(%rsi), %r9
28361 movq -4*8(%rsi), %r8
28362 leaq -4*8(%rsi), %rsi
28363
28364 movq %r11, -1*8(%rdi)
28365- movq %r10, -2*8(%rdi)
28366+ movq %rcx, -2*8(%rdi)
28367 movq %r9, -3*8(%rdi)
28368 movq %r8, -4*8(%rdi)
28369 leaq -4*8(%rdi), %rdi
28370@@ -151,11 +151,11 @@ ENTRY(memmove)
28371 * Move data from 16 bytes to 31 bytes.
28372 */
28373 movq 0*8(%rsi), %r11
28374- movq 1*8(%rsi), %r10
28375+ movq 1*8(%rsi), %rcx
28376 movq -2*8(%rsi, %rdx), %r9
28377 movq -1*8(%rsi, %rdx), %r8
28378 movq %r11, 0*8(%rdi)
28379- movq %r10, 1*8(%rdi)
28380+ movq %rcx, 1*8(%rdi)
28381 movq %r9, -2*8(%rdi, %rdx)
28382 movq %r8, -1*8(%rdi, %rdx)
28383 jmp 13f
28384@@ -167,9 +167,9 @@ ENTRY(memmove)
28385 * Move data from 8 bytes to 15 bytes.
28386 */
28387 movq 0*8(%rsi), %r11
28388- movq -1*8(%rsi, %rdx), %r10
28389+ movq -1*8(%rsi, %rdx), %r9
28390 movq %r11, 0*8(%rdi)
28391- movq %r10, -1*8(%rdi, %rdx)
28392+ movq %r9, -1*8(%rdi, %rdx)
28393 jmp 13f
28394 10:
28395 cmpq $4, %rdx
28396@@ -178,9 +178,9 @@ ENTRY(memmove)
28397 * Move data from 4 bytes to 7 bytes.
28398 */
28399 movl (%rsi), %r11d
28400- movl -4(%rsi, %rdx), %r10d
28401+ movl -4(%rsi, %rdx), %r9d
28402 movl %r11d, (%rdi)
28403- movl %r10d, -4(%rdi, %rdx)
28404+ movl %r9d, -4(%rdi, %rdx)
28405 jmp 13f
28406 11:
28407 cmp $2, %rdx
28408@@ -189,9 +189,9 @@ ENTRY(memmove)
28409 * Move data from 2 bytes to 3 bytes.
28410 */
28411 movw (%rsi), %r11w
28412- movw -2(%rsi, %rdx), %r10w
28413+ movw -2(%rsi, %rdx), %r9w
28414 movw %r11w, (%rdi)
28415- movw %r10w, -2(%rdi, %rdx)
28416+ movw %r9w, -2(%rdi, %rdx)
28417 jmp 13f
28418 12:
28419 cmp $1, %rdx
28420@@ -202,14 +202,16 @@ ENTRY(memmove)
28421 movb (%rsi), %r11b
28422 movb %r11b, (%rdi)
28423 13:
28424+ pax_force_retaddr
28425 retq
28426 CFI_ENDPROC
28427
28428- .section .altinstr_replacement,"ax"
28429+ .section .altinstr_replacement,"a"
28430 .Lmemmove_begin_forward_efs:
28431 /* Forward moving data. */
28432 movq %rdx, %rcx
28433 rep movsb
28434+ pax_force_retaddr
28435 retq
28436 .Lmemmove_end_forward_efs:
28437 .previous
28438diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
28439index 2dcb380..50a78bc 100644
28440--- a/arch/x86/lib/memset_64.S
28441+++ b/arch/x86/lib/memset_64.S
28442@@ -16,7 +16,7 @@
28443 *
28444 * rax original destination
28445 */
28446- .section .altinstr_replacement, "ax", @progbits
28447+ .section .altinstr_replacement, "a", @progbits
28448 .Lmemset_c:
28449 movq %rdi,%r9
28450 movq %rdx,%rcx
28451@@ -30,6 +30,7 @@
28452 movl %edx,%ecx
28453 rep stosb
28454 movq %r9,%rax
28455+ pax_force_retaddr
28456 ret
28457 .Lmemset_e:
28458 .previous
28459@@ -45,13 +46,14 @@
28460 *
28461 * rax original destination
28462 */
28463- .section .altinstr_replacement, "ax", @progbits
28464+ .section .altinstr_replacement, "a", @progbits
28465 .Lmemset_c_e:
28466 movq %rdi,%r9
28467 movb %sil,%al
28468 movq %rdx,%rcx
28469 rep stosb
28470 movq %r9,%rax
28471+ pax_force_retaddr
28472 ret
28473 .Lmemset_e_e:
28474 .previous
28475@@ -59,7 +61,7 @@
28476 ENTRY(memset)
28477 ENTRY(__memset)
28478 CFI_STARTPROC
28479- movq %rdi,%r10
28480+ movq %rdi,%r11
28481
28482 /* expand byte value */
28483 movzbl %sil,%ecx
28484@@ -117,7 +119,8 @@ ENTRY(__memset)
28485 jnz .Lloop_1
28486
28487 .Lende:
28488- movq %r10,%rax
28489+ movq %r11,%rax
28490+ pax_force_retaddr
28491 ret
28492
28493 CFI_RESTORE_STATE
28494diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
28495index c9f2d9b..e7fd2c0 100644
28496--- a/arch/x86/lib/mmx_32.c
28497+++ b/arch/x86/lib/mmx_32.c
28498@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
28499 {
28500 void *p;
28501 int i;
28502+ unsigned long cr0;
28503
28504 if (unlikely(in_interrupt()))
28505 return __memcpy(to, from, len);
28506@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
28507 kernel_fpu_begin();
28508
28509 __asm__ __volatile__ (
28510- "1: prefetch (%0)\n" /* This set is 28 bytes */
28511- " prefetch 64(%0)\n"
28512- " prefetch 128(%0)\n"
28513- " prefetch 192(%0)\n"
28514- " prefetch 256(%0)\n"
28515+ "1: prefetch (%1)\n" /* This set is 28 bytes */
28516+ " prefetch 64(%1)\n"
28517+ " prefetch 128(%1)\n"
28518+ " prefetch 192(%1)\n"
28519+ " prefetch 256(%1)\n"
28520 "2: \n"
28521 ".section .fixup, \"ax\"\n"
28522- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28523+ "3: \n"
28524+
28525+#ifdef CONFIG_PAX_KERNEXEC
28526+ " movl %%cr0, %0\n"
28527+ " movl %0, %%eax\n"
28528+ " andl $0xFFFEFFFF, %%eax\n"
28529+ " movl %%eax, %%cr0\n"
28530+#endif
28531+
28532+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28533+
28534+#ifdef CONFIG_PAX_KERNEXEC
28535+ " movl %0, %%cr0\n"
28536+#endif
28537+
28538 " jmp 2b\n"
28539 ".previous\n"
28540 _ASM_EXTABLE(1b, 3b)
28541- : : "r" (from));
28542+ : "=&r" (cr0) : "r" (from) : "ax");
28543
28544 for ( ; i > 5; i--) {
28545 __asm__ __volatile__ (
28546- "1: prefetch 320(%0)\n"
28547- "2: movq (%0), %%mm0\n"
28548- " movq 8(%0), %%mm1\n"
28549- " movq 16(%0), %%mm2\n"
28550- " movq 24(%0), %%mm3\n"
28551- " movq %%mm0, (%1)\n"
28552- " movq %%mm1, 8(%1)\n"
28553- " movq %%mm2, 16(%1)\n"
28554- " movq %%mm3, 24(%1)\n"
28555- " movq 32(%0), %%mm0\n"
28556- " movq 40(%0), %%mm1\n"
28557- " movq 48(%0), %%mm2\n"
28558- " movq 56(%0), %%mm3\n"
28559- " movq %%mm0, 32(%1)\n"
28560- " movq %%mm1, 40(%1)\n"
28561- " movq %%mm2, 48(%1)\n"
28562- " movq %%mm3, 56(%1)\n"
28563+ "1: prefetch 320(%1)\n"
28564+ "2: movq (%1), %%mm0\n"
28565+ " movq 8(%1), %%mm1\n"
28566+ " movq 16(%1), %%mm2\n"
28567+ " movq 24(%1), %%mm3\n"
28568+ " movq %%mm0, (%2)\n"
28569+ " movq %%mm1, 8(%2)\n"
28570+ " movq %%mm2, 16(%2)\n"
28571+ " movq %%mm3, 24(%2)\n"
28572+ " movq 32(%1), %%mm0\n"
28573+ " movq 40(%1), %%mm1\n"
28574+ " movq 48(%1), %%mm2\n"
28575+ " movq 56(%1), %%mm3\n"
28576+ " movq %%mm0, 32(%2)\n"
28577+ " movq %%mm1, 40(%2)\n"
28578+ " movq %%mm2, 48(%2)\n"
28579+ " movq %%mm3, 56(%2)\n"
28580 ".section .fixup, \"ax\"\n"
28581- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28582+ "3:\n"
28583+
28584+#ifdef CONFIG_PAX_KERNEXEC
28585+ " movl %%cr0, %0\n"
28586+ " movl %0, %%eax\n"
28587+ " andl $0xFFFEFFFF, %%eax\n"
28588+ " movl %%eax, %%cr0\n"
28589+#endif
28590+
28591+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28592+
28593+#ifdef CONFIG_PAX_KERNEXEC
28594+ " movl %0, %%cr0\n"
28595+#endif
28596+
28597 " jmp 2b\n"
28598 ".previous\n"
28599 _ASM_EXTABLE(1b, 3b)
28600- : : "r" (from), "r" (to) : "memory");
28601+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28602
28603 from += 64;
28604 to += 64;
28605@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
28606 static void fast_copy_page(void *to, void *from)
28607 {
28608 int i;
28609+ unsigned long cr0;
28610
28611 kernel_fpu_begin();
28612
28613@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
28614 * but that is for later. -AV
28615 */
28616 __asm__ __volatile__(
28617- "1: prefetch (%0)\n"
28618- " prefetch 64(%0)\n"
28619- " prefetch 128(%0)\n"
28620- " prefetch 192(%0)\n"
28621- " prefetch 256(%0)\n"
28622+ "1: prefetch (%1)\n"
28623+ " prefetch 64(%1)\n"
28624+ " prefetch 128(%1)\n"
28625+ " prefetch 192(%1)\n"
28626+ " prefetch 256(%1)\n"
28627 "2: \n"
28628 ".section .fixup, \"ax\"\n"
28629- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28630+ "3: \n"
28631+
28632+#ifdef CONFIG_PAX_KERNEXEC
28633+ " movl %%cr0, %0\n"
28634+ " movl %0, %%eax\n"
28635+ " andl $0xFFFEFFFF, %%eax\n"
28636+ " movl %%eax, %%cr0\n"
28637+#endif
28638+
28639+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28640+
28641+#ifdef CONFIG_PAX_KERNEXEC
28642+ " movl %0, %%cr0\n"
28643+#endif
28644+
28645 " jmp 2b\n"
28646 ".previous\n"
28647- _ASM_EXTABLE(1b, 3b) : : "r" (from));
28648+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
28649
28650 for (i = 0; i < (4096-320)/64; i++) {
28651 __asm__ __volatile__ (
28652- "1: prefetch 320(%0)\n"
28653- "2: movq (%0), %%mm0\n"
28654- " movntq %%mm0, (%1)\n"
28655- " movq 8(%0), %%mm1\n"
28656- " movntq %%mm1, 8(%1)\n"
28657- " movq 16(%0), %%mm2\n"
28658- " movntq %%mm2, 16(%1)\n"
28659- " movq 24(%0), %%mm3\n"
28660- " movntq %%mm3, 24(%1)\n"
28661- " movq 32(%0), %%mm4\n"
28662- " movntq %%mm4, 32(%1)\n"
28663- " movq 40(%0), %%mm5\n"
28664- " movntq %%mm5, 40(%1)\n"
28665- " movq 48(%0), %%mm6\n"
28666- " movntq %%mm6, 48(%1)\n"
28667- " movq 56(%0), %%mm7\n"
28668- " movntq %%mm7, 56(%1)\n"
28669+ "1: prefetch 320(%1)\n"
28670+ "2: movq (%1), %%mm0\n"
28671+ " movntq %%mm0, (%2)\n"
28672+ " movq 8(%1), %%mm1\n"
28673+ " movntq %%mm1, 8(%2)\n"
28674+ " movq 16(%1), %%mm2\n"
28675+ " movntq %%mm2, 16(%2)\n"
28676+ " movq 24(%1), %%mm3\n"
28677+ " movntq %%mm3, 24(%2)\n"
28678+ " movq 32(%1), %%mm4\n"
28679+ " movntq %%mm4, 32(%2)\n"
28680+ " movq 40(%1), %%mm5\n"
28681+ " movntq %%mm5, 40(%2)\n"
28682+ " movq 48(%1), %%mm6\n"
28683+ " movntq %%mm6, 48(%2)\n"
28684+ " movq 56(%1), %%mm7\n"
28685+ " movntq %%mm7, 56(%2)\n"
28686 ".section .fixup, \"ax\"\n"
28687- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28688+ "3:\n"
28689+
28690+#ifdef CONFIG_PAX_KERNEXEC
28691+ " movl %%cr0, %0\n"
28692+ " movl %0, %%eax\n"
28693+ " andl $0xFFFEFFFF, %%eax\n"
28694+ " movl %%eax, %%cr0\n"
28695+#endif
28696+
28697+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28698+
28699+#ifdef CONFIG_PAX_KERNEXEC
28700+ " movl %0, %%cr0\n"
28701+#endif
28702+
28703 " jmp 2b\n"
28704 ".previous\n"
28705- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
28706+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28707
28708 from += 64;
28709 to += 64;
28710@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
28711 static void fast_copy_page(void *to, void *from)
28712 {
28713 int i;
28714+ unsigned long cr0;
28715
28716 kernel_fpu_begin();
28717
28718 __asm__ __volatile__ (
28719- "1: prefetch (%0)\n"
28720- " prefetch 64(%0)\n"
28721- " prefetch 128(%0)\n"
28722- " prefetch 192(%0)\n"
28723- " prefetch 256(%0)\n"
28724+ "1: prefetch (%1)\n"
28725+ " prefetch 64(%1)\n"
28726+ " prefetch 128(%1)\n"
28727+ " prefetch 192(%1)\n"
28728+ " prefetch 256(%1)\n"
28729 "2: \n"
28730 ".section .fixup, \"ax\"\n"
28731- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28732+ "3: \n"
28733+
28734+#ifdef CONFIG_PAX_KERNEXEC
28735+ " movl %%cr0, %0\n"
28736+ " movl %0, %%eax\n"
28737+ " andl $0xFFFEFFFF, %%eax\n"
28738+ " movl %%eax, %%cr0\n"
28739+#endif
28740+
28741+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28742+
28743+#ifdef CONFIG_PAX_KERNEXEC
28744+ " movl %0, %%cr0\n"
28745+#endif
28746+
28747 " jmp 2b\n"
28748 ".previous\n"
28749- _ASM_EXTABLE(1b, 3b) : : "r" (from));
28750+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
28751
28752 for (i = 0; i < 4096/64; i++) {
28753 __asm__ __volatile__ (
28754- "1: prefetch 320(%0)\n"
28755- "2: movq (%0), %%mm0\n"
28756- " movq 8(%0), %%mm1\n"
28757- " movq 16(%0), %%mm2\n"
28758- " movq 24(%0), %%mm3\n"
28759- " movq %%mm0, (%1)\n"
28760- " movq %%mm1, 8(%1)\n"
28761- " movq %%mm2, 16(%1)\n"
28762- " movq %%mm3, 24(%1)\n"
28763- " movq 32(%0), %%mm0\n"
28764- " movq 40(%0), %%mm1\n"
28765- " movq 48(%0), %%mm2\n"
28766- " movq 56(%0), %%mm3\n"
28767- " movq %%mm0, 32(%1)\n"
28768- " movq %%mm1, 40(%1)\n"
28769- " movq %%mm2, 48(%1)\n"
28770- " movq %%mm3, 56(%1)\n"
28771+ "1: prefetch 320(%1)\n"
28772+ "2: movq (%1), %%mm0\n"
28773+ " movq 8(%1), %%mm1\n"
28774+ " movq 16(%1), %%mm2\n"
28775+ " movq 24(%1), %%mm3\n"
28776+ " movq %%mm0, (%2)\n"
28777+ " movq %%mm1, 8(%2)\n"
28778+ " movq %%mm2, 16(%2)\n"
28779+ " movq %%mm3, 24(%2)\n"
28780+ " movq 32(%1), %%mm0\n"
28781+ " movq 40(%1), %%mm1\n"
28782+ " movq 48(%1), %%mm2\n"
28783+ " movq 56(%1), %%mm3\n"
28784+ " movq %%mm0, 32(%2)\n"
28785+ " movq %%mm1, 40(%2)\n"
28786+ " movq %%mm2, 48(%2)\n"
28787+ " movq %%mm3, 56(%2)\n"
28788 ".section .fixup, \"ax\"\n"
28789- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28790+ "3:\n"
28791+
28792+#ifdef CONFIG_PAX_KERNEXEC
28793+ " movl %%cr0, %0\n"
28794+ " movl %0, %%eax\n"
28795+ " andl $0xFFFEFFFF, %%eax\n"
28796+ " movl %%eax, %%cr0\n"
28797+#endif
28798+
28799+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28800+
28801+#ifdef CONFIG_PAX_KERNEXEC
28802+ " movl %0, %%cr0\n"
28803+#endif
28804+
28805 " jmp 2b\n"
28806 ".previous\n"
28807 _ASM_EXTABLE(1b, 3b)
28808- : : "r" (from), "r" (to) : "memory");
28809+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28810
28811 from += 64;
28812 to += 64;
28813diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
28814index f6d13ee..aca5f0b 100644
28815--- a/arch/x86/lib/msr-reg.S
28816+++ b/arch/x86/lib/msr-reg.S
28817@@ -3,6 +3,7 @@
28818 #include <asm/dwarf2.h>
28819 #include <asm/asm.h>
28820 #include <asm/msr.h>
28821+#include <asm/alternative-asm.h>
28822
28823 #ifdef CONFIG_X86_64
28824 /*
28825@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
28826 CFI_STARTPROC
28827 pushq_cfi %rbx
28828 pushq_cfi %rbp
28829- movq %rdi, %r10 /* Save pointer */
28830+ movq %rdi, %r9 /* Save pointer */
28831 xorl %r11d, %r11d /* Return value */
28832 movl (%rdi), %eax
28833 movl 4(%rdi), %ecx
28834@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
28835 movl 28(%rdi), %edi
28836 CFI_REMEMBER_STATE
28837 1: \op
28838-2: movl %eax, (%r10)
28839+2: movl %eax, (%r9)
28840 movl %r11d, %eax /* Return value */
28841- movl %ecx, 4(%r10)
28842- movl %edx, 8(%r10)
28843- movl %ebx, 12(%r10)
28844- movl %ebp, 20(%r10)
28845- movl %esi, 24(%r10)
28846- movl %edi, 28(%r10)
28847+ movl %ecx, 4(%r9)
28848+ movl %edx, 8(%r9)
28849+ movl %ebx, 12(%r9)
28850+ movl %ebp, 20(%r9)
28851+ movl %esi, 24(%r9)
28852+ movl %edi, 28(%r9)
28853 popq_cfi %rbp
28854 popq_cfi %rbx
28855+ pax_force_retaddr
28856 ret
28857 3:
28858 CFI_RESTORE_STATE
28859diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
28860index fc6ba17..d4d989d 100644
28861--- a/arch/x86/lib/putuser.S
28862+++ b/arch/x86/lib/putuser.S
28863@@ -16,7 +16,9 @@
28864 #include <asm/errno.h>
28865 #include <asm/asm.h>
28866 #include <asm/smap.h>
28867-
28868+#include <asm/segment.h>
28869+#include <asm/pgtable.h>
28870+#include <asm/alternative-asm.h>
28871
28872 /*
28873 * __put_user_X
28874@@ -30,57 +32,125 @@
28875 * as they get called from within inline assembly.
28876 */
28877
28878-#define ENTER CFI_STARTPROC ; \
28879- GET_THREAD_INFO(%_ASM_BX)
28880-#define EXIT ASM_CLAC ; \
28881- ret ; \
28882+#define ENTER CFI_STARTPROC
28883+#define EXIT ASM_CLAC ; \
28884+ pax_force_retaddr ; \
28885+ ret ; \
28886 CFI_ENDPROC
28887
28888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28889+#define _DEST %_ASM_CX,%_ASM_BX
28890+#else
28891+#define _DEST %_ASM_CX
28892+#endif
28893+
28894+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28895+#define __copyuser_seg gs;
28896+#else
28897+#define __copyuser_seg
28898+#endif
28899+
28900 .text
28901 ENTRY(__put_user_1)
28902 ENTER
28903+
28904+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28905+ GET_THREAD_INFO(%_ASM_BX)
28906 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
28907 jae bad_put_user
28908 ASM_STAC
28909-1: movb %al,(%_ASM_CX)
28910+
28911+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28912+ mov pax_user_shadow_base,%_ASM_BX
28913+ cmp %_ASM_BX,%_ASM_CX
28914+ jb 1234f
28915+ xor %ebx,%ebx
28916+1234:
28917+#endif
28918+
28919+#endif
28920+
28921+1: __copyuser_seg movb %al,(_DEST)
28922 xor %eax,%eax
28923 EXIT
28924 ENDPROC(__put_user_1)
28925
28926 ENTRY(__put_user_2)
28927 ENTER
28928+
28929+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28930+ GET_THREAD_INFO(%_ASM_BX)
28931 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28932 sub $1,%_ASM_BX
28933 cmp %_ASM_BX,%_ASM_CX
28934 jae bad_put_user
28935 ASM_STAC
28936-2: movw %ax,(%_ASM_CX)
28937+
28938+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28939+ mov pax_user_shadow_base,%_ASM_BX
28940+ cmp %_ASM_BX,%_ASM_CX
28941+ jb 1234f
28942+ xor %ebx,%ebx
28943+1234:
28944+#endif
28945+
28946+#endif
28947+
28948+2: __copyuser_seg movw %ax,(_DEST)
28949 xor %eax,%eax
28950 EXIT
28951 ENDPROC(__put_user_2)
28952
28953 ENTRY(__put_user_4)
28954 ENTER
28955+
28956+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28957+ GET_THREAD_INFO(%_ASM_BX)
28958 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28959 sub $3,%_ASM_BX
28960 cmp %_ASM_BX,%_ASM_CX
28961 jae bad_put_user
28962 ASM_STAC
28963-3: movl %eax,(%_ASM_CX)
28964+
28965+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28966+ mov pax_user_shadow_base,%_ASM_BX
28967+ cmp %_ASM_BX,%_ASM_CX
28968+ jb 1234f
28969+ xor %ebx,%ebx
28970+1234:
28971+#endif
28972+
28973+#endif
28974+
28975+3: __copyuser_seg movl %eax,(_DEST)
28976 xor %eax,%eax
28977 EXIT
28978 ENDPROC(__put_user_4)
28979
28980 ENTRY(__put_user_8)
28981 ENTER
28982+
28983+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28984+ GET_THREAD_INFO(%_ASM_BX)
28985 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28986 sub $7,%_ASM_BX
28987 cmp %_ASM_BX,%_ASM_CX
28988 jae bad_put_user
28989 ASM_STAC
28990-4: mov %_ASM_AX,(%_ASM_CX)
28991+
28992+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28993+ mov pax_user_shadow_base,%_ASM_BX
28994+ cmp %_ASM_BX,%_ASM_CX
28995+ jb 1234f
28996+ xor %ebx,%ebx
28997+1234:
28998+#endif
28999+
29000+#endif
29001+
29002+4: __copyuser_seg mov %_ASM_AX,(_DEST)
29003 #ifdef CONFIG_X86_32
29004-5: movl %edx,4(%_ASM_CX)
29005+5: __copyuser_seg movl %edx,4(_DEST)
29006 #endif
29007 xor %eax,%eax
29008 EXIT
29009diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
29010index 1cad221..de671ee 100644
29011--- a/arch/x86/lib/rwlock.S
29012+++ b/arch/x86/lib/rwlock.S
29013@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29014 FRAME
29015 0: LOCK_PREFIX
29016 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29017+
29018+#ifdef CONFIG_PAX_REFCOUNT
29019+ jno 1234f
29020+ LOCK_PREFIX
29021+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29022+ int $4
29023+1234:
29024+ _ASM_EXTABLE(1234b, 1234b)
29025+#endif
29026+
29027 1: rep; nop
29028 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29029 jne 1b
29030 LOCK_PREFIX
29031 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29032+
29033+#ifdef CONFIG_PAX_REFCOUNT
29034+ jno 1234f
29035+ LOCK_PREFIX
29036+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29037+ int $4
29038+1234:
29039+ _ASM_EXTABLE(1234b, 1234b)
29040+#endif
29041+
29042 jnz 0b
29043 ENDFRAME
29044+ pax_force_retaddr
29045 ret
29046 CFI_ENDPROC
29047 END(__write_lock_failed)
29048@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29049 FRAME
29050 0: LOCK_PREFIX
29051 READ_LOCK_SIZE(inc) (%__lock_ptr)
29052+
29053+#ifdef CONFIG_PAX_REFCOUNT
29054+ jno 1234f
29055+ LOCK_PREFIX
29056+ READ_LOCK_SIZE(dec) (%__lock_ptr)
29057+ int $4
29058+1234:
29059+ _ASM_EXTABLE(1234b, 1234b)
29060+#endif
29061+
29062 1: rep; nop
29063 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29064 js 1b
29065 LOCK_PREFIX
29066 READ_LOCK_SIZE(dec) (%__lock_ptr)
29067+
29068+#ifdef CONFIG_PAX_REFCOUNT
29069+ jno 1234f
29070+ LOCK_PREFIX
29071+ READ_LOCK_SIZE(inc) (%__lock_ptr)
29072+ int $4
29073+1234:
29074+ _ASM_EXTABLE(1234b, 1234b)
29075+#endif
29076+
29077 js 0b
29078 ENDFRAME
29079+ pax_force_retaddr
29080 ret
29081 CFI_ENDPROC
29082 END(__read_lock_failed)
29083diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29084index 5dff5f0..cadebf4 100644
29085--- a/arch/x86/lib/rwsem.S
29086+++ b/arch/x86/lib/rwsem.S
29087@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29088 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29089 CFI_RESTORE __ASM_REG(dx)
29090 restore_common_regs
29091+ pax_force_retaddr
29092 ret
29093 CFI_ENDPROC
29094 ENDPROC(call_rwsem_down_read_failed)
29095@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29096 movq %rax,%rdi
29097 call rwsem_down_write_failed
29098 restore_common_regs
29099+ pax_force_retaddr
29100 ret
29101 CFI_ENDPROC
29102 ENDPROC(call_rwsem_down_write_failed)
29103@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29104 movq %rax,%rdi
29105 call rwsem_wake
29106 restore_common_regs
29107-1: ret
29108+1: pax_force_retaddr
29109+ ret
29110 CFI_ENDPROC
29111 ENDPROC(call_rwsem_wake)
29112
29113@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29114 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29115 CFI_RESTORE __ASM_REG(dx)
29116 restore_common_regs
29117+ pax_force_retaddr
29118 ret
29119 CFI_ENDPROC
29120 ENDPROC(call_rwsem_downgrade_wake)
29121diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29122index a63efd6..ccecad8 100644
29123--- a/arch/x86/lib/thunk_64.S
29124+++ b/arch/x86/lib/thunk_64.S
29125@@ -8,6 +8,7 @@
29126 #include <linux/linkage.h>
29127 #include <asm/dwarf2.h>
29128 #include <asm/calling.h>
29129+#include <asm/alternative-asm.h>
29130
29131 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29132 .macro THUNK name, func, put_ret_addr_in_rdi=0
29133@@ -41,5 +42,6 @@
29134 SAVE_ARGS
29135 restore:
29136 RESTORE_ARGS
29137+ pax_force_retaddr
29138 ret
29139 CFI_ENDPROC
29140diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29141index 3eb18ac..6890bc3 100644
29142--- a/arch/x86/lib/usercopy_32.c
29143+++ b/arch/x86/lib/usercopy_32.c
29144@@ -42,11 +42,13 @@ do { \
29145 int __d0; \
29146 might_fault(); \
29147 __asm__ __volatile__( \
29148+ __COPYUSER_SET_ES \
29149 ASM_STAC "\n" \
29150 "0: rep; stosl\n" \
29151 " movl %2,%0\n" \
29152 "1: rep; stosb\n" \
29153 "2: " ASM_CLAC "\n" \
29154+ __COPYUSER_RESTORE_ES \
29155 ".section .fixup,\"ax\"\n" \
29156 "3: lea 0(%2,%0,4),%0\n" \
29157 " jmp 2b\n" \
29158@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29159
29160 #ifdef CONFIG_X86_INTEL_USERCOPY
29161 static unsigned long
29162-__copy_user_intel(void __user *to, const void *from, unsigned long size)
29163+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
29164 {
29165 int d0, d1;
29166 __asm__ __volatile__(
29167@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29168 " .align 2,0x90\n"
29169 "3: movl 0(%4), %%eax\n"
29170 "4: movl 4(%4), %%edx\n"
29171- "5: movl %%eax, 0(%3)\n"
29172- "6: movl %%edx, 4(%3)\n"
29173+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
29174+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
29175 "7: movl 8(%4), %%eax\n"
29176 "8: movl 12(%4),%%edx\n"
29177- "9: movl %%eax, 8(%3)\n"
29178- "10: movl %%edx, 12(%3)\n"
29179+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
29180+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
29181 "11: movl 16(%4), %%eax\n"
29182 "12: movl 20(%4), %%edx\n"
29183- "13: movl %%eax, 16(%3)\n"
29184- "14: movl %%edx, 20(%3)\n"
29185+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
29186+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
29187 "15: movl 24(%4), %%eax\n"
29188 "16: movl 28(%4), %%edx\n"
29189- "17: movl %%eax, 24(%3)\n"
29190- "18: movl %%edx, 28(%3)\n"
29191+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
29192+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
29193 "19: movl 32(%4), %%eax\n"
29194 "20: movl 36(%4), %%edx\n"
29195- "21: movl %%eax, 32(%3)\n"
29196- "22: movl %%edx, 36(%3)\n"
29197+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
29198+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
29199 "23: movl 40(%4), %%eax\n"
29200 "24: movl 44(%4), %%edx\n"
29201- "25: movl %%eax, 40(%3)\n"
29202- "26: movl %%edx, 44(%3)\n"
29203+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
29204+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
29205 "27: movl 48(%4), %%eax\n"
29206 "28: movl 52(%4), %%edx\n"
29207- "29: movl %%eax, 48(%3)\n"
29208- "30: movl %%edx, 52(%3)\n"
29209+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
29210+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
29211 "31: movl 56(%4), %%eax\n"
29212 "32: movl 60(%4), %%edx\n"
29213- "33: movl %%eax, 56(%3)\n"
29214- "34: movl %%edx, 60(%3)\n"
29215+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
29216+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
29217 " addl $-64, %0\n"
29218 " addl $64, %4\n"
29219 " addl $64, %3\n"
29220@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29221 " shrl $2, %0\n"
29222 " andl $3, %%eax\n"
29223 " cld\n"
29224+ __COPYUSER_SET_ES
29225 "99: rep; movsl\n"
29226 "36: movl %%eax, %0\n"
29227 "37: rep; movsb\n"
29228 "100:\n"
29229+ __COPYUSER_RESTORE_ES
29230 ".section .fixup,\"ax\"\n"
29231 "101: lea 0(%%eax,%0,4),%0\n"
29232 " jmp 100b\n"
29233@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29234 }
29235
29236 static unsigned long
29237+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
29238+{
29239+ int d0, d1;
29240+ __asm__ __volatile__(
29241+ " .align 2,0x90\n"
29242+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
29243+ " cmpl $67, %0\n"
29244+ " jbe 3f\n"
29245+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
29246+ " .align 2,0x90\n"
29247+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
29248+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
29249+ "5: movl %%eax, 0(%3)\n"
29250+ "6: movl %%edx, 4(%3)\n"
29251+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
29252+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
29253+ "9: movl %%eax, 8(%3)\n"
29254+ "10: movl %%edx, 12(%3)\n"
29255+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
29256+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
29257+ "13: movl %%eax, 16(%3)\n"
29258+ "14: movl %%edx, 20(%3)\n"
29259+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
29260+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
29261+ "17: movl %%eax, 24(%3)\n"
29262+ "18: movl %%edx, 28(%3)\n"
29263+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
29264+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
29265+ "21: movl %%eax, 32(%3)\n"
29266+ "22: movl %%edx, 36(%3)\n"
29267+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
29268+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
29269+ "25: movl %%eax, 40(%3)\n"
29270+ "26: movl %%edx, 44(%3)\n"
29271+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
29272+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
29273+ "29: movl %%eax, 48(%3)\n"
29274+ "30: movl %%edx, 52(%3)\n"
29275+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
29276+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
29277+ "33: movl %%eax, 56(%3)\n"
29278+ "34: movl %%edx, 60(%3)\n"
29279+ " addl $-64, %0\n"
29280+ " addl $64, %4\n"
29281+ " addl $64, %3\n"
29282+ " cmpl $63, %0\n"
29283+ " ja 1b\n"
29284+ "35: movl %0, %%eax\n"
29285+ " shrl $2, %0\n"
29286+ " andl $3, %%eax\n"
29287+ " cld\n"
29288+ "99: rep; "__copyuser_seg" movsl\n"
29289+ "36: movl %%eax, %0\n"
29290+ "37: rep; "__copyuser_seg" movsb\n"
29291+ "100:\n"
29292+ ".section .fixup,\"ax\"\n"
29293+ "101: lea 0(%%eax,%0,4),%0\n"
29294+ " jmp 100b\n"
29295+ ".previous\n"
29296+ _ASM_EXTABLE(1b,100b)
29297+ _ASM_EXTABLE(2b,100b)
29298+ _ASM_EXTABLE(3b,100b)
29299+ _ASM_EXTABLE(4b,100b)
29300+ _ASM_EXTABLE(5b,100b)
29301+ _ASM_EXTABLE(6b,100b)
29302+ _ASM_EXTABLE(7b,100b)
29303+ _ASM_EXTABLE(8b,100b)
29304+ _ASM_EXTABLE(9b,100b)
29305+ _ASM_EXTABLE(10b,100b)
29306+ _ASM_EXTABLE(11b,100b)
29307+ _ASM_EXTABLE(12b,100b)
29308+ _ASM_EXTABLE(13b,100b)
29309+ _ASM_EXTABLE(14b,100b)
29310+ _ASM_EXTABLE(15b,100b)
29311+ _ASM_EXTABLE(16b,100b)
29312+ _ASM_EXTABLE(17b,100b)
29313+ _ASM_EXTABLE(18b,100b)
29314+ _ASM_EXTABLE(19b,100b)
29315+ _ASM_EXTABLE(20b,100b)
29316+ _ASM_EXTABLE(21b,100b)
29317+ _ASM_EXTABLE(22b,100b)
29318+ _ASM_EXTABLE(23b,100b)
29319+ _ASM_EXTABLE(24b,100b)
29320+ _ASM_EXTABLE(25b,100b)
29321+ _ASM_EXTABLE(26b,100b)
29322+ _ASM_EXTABLE(27b,100b)
29323+ _ASM_EXTABLE(28b,100b)
29324+ _ASM_EXTABLE(29b,100b)
29325+ _ASM_EXTABLE(30b,100b)
29326+ _ASM_EXTABLE(31b,100b)
29327+ _ASM_EXTABLE(32b,100b)
29328+ _ASM_EXTABLE(33b,100b)
29329+ _ASM_EXTABLE(34b,100b)
29330+ _ASM_EXTABLE(35b,100b)
29331+ _ASM_EXTABLE(36b,100b)
29332+ _ASM_EXTABLE(37b,100b)
29333+ _ASM_EXTABLE(99b,101b)
29334+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
29335+ : "1"(to), "2"(from), "0"(size)
29336+ : "eax", "edx", "memory");
29337+ return size;
29338+}
29339+
29340+static unsigned long __size_overflow(3)
29341 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29342 {
29343 int d0, d1;
29344 __asm__ __volatile__(
29345 " .align 2,0x90\n"
29346- "0: movl 32(%4), %%eax\n"
29347+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29348 " cmpl $67, %0\n"
29349 " jbe 2f\n"
29350- "1: movl 64(%4), %%eax\n"
29351+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29352 " .align 2,0x90\n"
29353- "2: movl 0(%4), %%eax\n"
29354- "21: movl 4(%4), %%edx\n"
29355+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29356+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29357 " movl %%eax, 0(%3)\n"
29358 " movl %%edx, 4(%3)\n"
29359- "3: movl 8(%4), %%eax\n"
29360- "31: movl 12(%4),%%edx\n"
29361+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29362+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29363 " movl %%eax, 8(%3)\n"
29364 " movl %%edx, 12(%3)\n"
29365- "4: movl 16(%4), %%eax\n"
29366- "41: movl 20(%4), %%edx\n"
29367+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29368+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29369 " movl %%eax, 16(%3)\n"
29370 " movl %%edx, 20(%3)\n"
29371- "10: movl 24(%4), %%eax\n"
29372- "51: movl 28(%4), %%edx\n"
29373+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29374+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29375 " movl %%eax, 24(%3)\n"
29376 " movl %%edx, 28(%3)\n"
29377- "11: movl 32(%4), %%eax\n"
29378- "61: movl 36(%4), %%edx\n"
29379+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29380+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29381 " movl %%eax, 32(%3)\n"
29382 " movl %%edx, 36(%3)\n"
29383- "12: movl 40(%4), %%eax\n"
29384- "71: movl 44(%4), %%edx\n"
29385+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29386+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29387 " movl %%eax, 40(%3)\n"
29388 " movl %%edx, 44(%3)\n"
29389- "13: movl 48(%4), %%eax\n"
29390- "81: movl 52(%4), %%edx\n"
29391+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29392+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29393 " movl %%eax, 48(%3)\n"
29394 " movl %%edx, 52(%3)\n"
29395- "14: movl 56(%4), %%eax\n"
29396- "91: movl 60(%4), %%edx\n"
29397+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29398+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29399 " movl %%eax, 56(%3)\n"
29400 " movl %%edx, 60(%3)\n"
29401 " addl $-64, %0\n"
29402@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29403 " shrl $2, %0\n"
29404 " andl $3, %%eax\n"
29405 " cld\n"
29406- "6: rep; movsl\n"
29407+ "6: rep; "__copyuser_seg" movsl\n"
29408 " movl %%eax,%0\n"
29409- "7: rep; movsb\n"
29410+ "7: rep; "__copyuser_seg" movsb\n"
29411 "8:\n"
29412 ".section .fixup,\"ax\"\n"
29413 "9: lea 0(%%eax,%0,4),%0\n"
29414@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29415 * hyoshiok@miraclelinux.com
29416 */
29417
29418-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29419+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
29420 const void __user *from, unsigned long size)
29421 {
29422 int d0, d1;
29423
29424 __asm__ __volatile__(
29425 " .align 2,0x90\n"
29426- "0: movl 32(%4), %%eax\n"
29427+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29428 " cmpl $67, %0\n"
29429 " jbe 2f\n"
29430- "1: movl 64(%4), %%eax\n"
29431+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29432 " .align 2,0x90\n"
29433- "2: movl 0(%4), %%eax\n"
29434- "21: movl 4(%4), %%edx\n"
29435+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29436+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29437 " movnti %%eax, 0(%3)\n"
29438 " movnti %%edx, 4(%3)\n"
29439- "3: movl 8(%4), %%eax\n"
29440- "31: movl 12(%4),%%edx\n"
29441+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29442+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29443 " movnti %%eax, 8(%3)\n"
29444 " movnti %%edx, 12(%3)\n"
29445- "4: movl 16(%4), %%eax\n"
29446- "41: movl 20(%4), %%edx\n"
29447+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29448+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29449 " movnti %%eax, 16(%3)\n"
29450 " movnti %%edx, 20(%3)\n"
29451- "10: movl 24(%4), %%eax\n"
29452- "51: movl 28(%4), %%edx\n"
29453+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29454+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29455 " movnti %%eax, 24(%3)\n"
29456 " movnti %%edx, 28(%3)\n"
29457- "11: movl 32(%4), %%eax\n"
29458- "61: movl 36(%4), %%edx\n"
29459+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29460+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29461 " movnti %%eax, 32(%3)\n"
29462 " movnti %%edx, 36(%3)\n"
29463- "12: movl 40(%4), %%eax\n"
29464- "71: movl 44(%4), %%edx\n"
29465+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29466+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29467 " movnti %%eax, 40(%3)\n"
29468 " movnti %%edx, 44(%3)\n"
29469- "13: movl 48(%4), %%eax\n"
29470- "81: movl 52(%4), %%edx\n"
29471+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29472+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29473 " movnti %%eax, 48(%3)\n"
29474 " movnti %%edx, 52(%3)\n"
29475- "14: movl 56(%4), %%eax\n"
29476- "91: movl 60(%4), %%edx\n"
29477+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29478+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29479 " movnti %%eax, 56(%3)\n"
29480 " movnti %%edx, 60(%3)\n"
29481 " addl $-64, %0\n"
29482@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29483 " shrl $2, %0\n"
29484 " andl $3, %%eax\n"
29485 " cld\n"
29486- "6: rep; movsl\n"
29487+ "6: rep; "__copyuser_seg" movsl\n"
29488 " movl %%eax,%0\n"
29489- "7: rep; movsb\n"
29490+ "7: rep; "__copyuser_seg" movsb\n"
29491 "8:\n"
29492 ".section .fixup,\"ax\"\n"
29493 "9: lea 0(%%eax,%0,4),%0\n"
29494@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29495 return size;
29496 }
29497
29498-static unsigned long __copy_user_intel_nocache(void *to,
29499+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
29500 const void __user *from, unsigned long size)
29501 {
29502 int d0, d1;
29503
29504 __asm__ __volatile__(
29505 " .align 2,0x90\n"
29506- "0: movl 32(%4), %%eax\n"
29507+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29508 " cmpl $67, %0\n"
29509 " jbe 2f\n"
29510- "1: movl 64(%4), %%eax\n"
29511+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29512 " .align 2,0x90\n"
29513- "2: movl 0(%4), %%eax\n"
29514- "21: movl 4(%4), %%edx\n"
29515+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29516+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29517 " movnti %%eax, 0(%3)\n"
29518 " movnti %%edx, 4(%3)\n"
29519- "3: movl 8(%4), %%eax\n"
29520- "31: movl 12(%4),%%edx\n"
29521+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29522+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29523 " movnti %%eax, 8(%3)\n"
29524 " movnti %%edx, 12(%3)\n"
29525- "4: movl 16(%4), %%eax\n"
29526- "41: movl 20(%4), %%edx\n"
29527+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29528+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29529 " movnti %%eax, 16(%3)\n"
29530 " movnti %%edx, 20(%3)\n"
29531- "10: movl 24(%4), %%eax\n"
29532- "51: movl 28(%4), %%edx\n"
29533+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29534+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29535 " movnti %%eax, 24(%3)\n"
29536 " movnti %%edx, 28(%3)\n"
29537- "11: movl 32(%4), %%eax\n"
29538- "61: movl 36(%4), %%edx\n"
29539+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29540+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29541 " movnti %%eax, 32(%3)\n"
29542 " movnti %%edx, 36(%3)\n"
29543- "12: movl 40(%4), %%eax\n"
29544- "71: movl 44(%4), %%edx\n"
29545+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29546+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29547 " movnti %%eax, 40(%3)\n"
29548 " movnti %%edx, 44(%3)\n"
29549- "13: movl 48(%4), %%eax\n"
29550- "81: movl 52(%4), %%edx\n"
29551+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29552+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29553 " movnti %%eax, 48(%3)\n"
29554 " movnti %%edx, 52(%3)\n"
29555- "14: movl 56(%4), %%eax\n"
29556- "91: movl 60(%4), %%edx\n"
29557+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29558+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29559 " movnti %%eax, 56(%3)\n"
29560 " movnti %%edx, 60(%3)\n"
29561 " addl $-64, %0\n"
29562@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
29563 " shrl $2, %0\n"
29564 " andl $3, %%eax\n"
29565 " cld\n"
29566- "6: rep; movsl\n"
29567+ "6: rep; "__copyuser_seg" movsl\n"
29568 " movl %%eax,%0\n"
29569- "7: rep; movsb\n"
29570+ "7: rep; "__copyuser_seg" movsb\n"
29571 "8:\n"
29572 ".section .fixup,\"ax\"\n"
29573 "9: lea 0(%%eax,%0,4),%0\n"
29574@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
29575 */
29576 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
29577 unsigned long size);
29578-unsigned long __copy_user_intel(void __user *to, const void *from,
29579+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
29580+ unsigned long size);
29581+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
29582 unsigned long size);
29583 unsigned long __copy_user_zeroing_intel_nocache(void *to,
29584 const void __user *from, unsigned long size);
29585 #endif /* CONFIG_X86_INTEL_USERCOPY */
29586
29587 /* Generic arbitrary sized copy. */
29588-#define __copy_user(to, from, size) \
29589+#define __copy_user(to, from, size, prefix, set, restore) \
29590 do { \
29591 int __d0, __d1, __d2; \
29592 __asm__ __volatile__( \
29593+ set \
29594 " cmp $7,%0\n" \
29595 " jbe 1f\n" \
29596 " movl %1,%0\n" \
29597 " negl %0\n" \
29598 " andl $7,%0\n" \
29599 " subl %0,%3\n" \
29600- "4: rep; movsb\n" \
29601+ "4: rep; "prefix"movsb\n" \
29602 " movl %3,%0\n" \
29603 " shrl $2,%0\n" \
29604 " andl $3,%3\n" \
29605 " .align 2,0x90\n" \
29606- "0: rep; movsl\n" \
29607+ "0: rep; "prefix"movsl\n" \
29608 " movl %3,%0\n" \
29609- "1: rep; movsb\n" \
29610+ "1: rep; "prefix"movsb\n" \
29611 "2:\n" \
29612+ restore \
29613 ".section .fixup,\"ax\"\n" \
29614 "5: addl %3,%0\n" \
29615 " jmp 2b\n" \
29616@@ -538,14 +650,14 @@ do { \
29617 " negl %0\n" \
29618 " andl $7,%0\n" \
29619 " subl %0,%3\n" \
29620- "4: rep; movsb\n" \
29621+ "4: rep; "__copyuser_seg"movsb\n" \
29622 " movl %3,%0\n" \
29623 " shrl $2,%0\n" \
29624 " andl $3,%3\n" \
29625 " .align 2,0x90\n" \
29626- "0: rep; movsl\n" \
29627+ "0: rep; "__copyuser_seg"movsl\n" \
29628 " movl %3,%0\n" \
29629- "1: rep; movsb\n" \
29630+ "1: rep; "__copyuser_seg"movsb\n" \
29631 "2:\n" \
29632 ".section .fixup,\"ax\"\n" \
29633 "5: addl %3,%0\n" \
29634@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
29635 {
29636 stac();
29637 if (movsl_is_ok(to, from, n))
29638- __copy_user(to, from, n);
29639+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
29640 else
29641- n = __copy_user_intel(to, from, n);
29642+ n = __generic_copy_to_user_intel(to, from, n);
29643 clac();
29644 return n;
29645 }
29646@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
29647 {
29648 stac();
29649 if (movsl_is_ok(to, from, n))
29650- __copy_user(to, from, n);
29651+ __copy_user(to, from, n, __copyuser_seg, "", "");
29652 else
29653- n = __copy_user_intel((void __user *)to,
29654- (const void *)from, n);
29655+ n = __generic_copy_from_user_intel(to, from, n);
29656 clac();
29657 return n;
29658 }
29659@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
29660 if (n > 64 && cpu_has_xmm2)
29661 n = __copy_user_intel_nocache(to, from, n);
29662 else
29663- __copy_user(to, from, n);
29664+ __copy_user(to, from, n, __copyuser_seg, "", "");
29665 #else
29666- __copy_user(to, from, n);
29667+ __copy_user(to, from, n, __copyuser_seg, "", "");
29668 #endif
29669 clac();
29670 return n;
29671 }
29672 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
29673
29674-/**
29675- * copy_to_user: - Copy a block of data into user space.
29676- * @to: Destination address, in user space.
29677- * @from: Source address, in kernel space.
29678- * @n: Number of bytes to copy.
29679- *
29680- * Context: User context only. This function may sleep.
29681- *
29682- * Copy data from kernel space to user space.
29683- *
29684- * Returns number of bytes that could not be copied.
29685- * On success, this will be zero.
29686- */
29687-unsigned long
29688-copy_to_user(void __user *to, const void *from, unsigned long n)
29689+#ifdef CONFIG_PAX_MEMORY_UDEREF
29690+void __set_fs(mm_segment_t x)
29691 {
29692- if (access_ok(VERIFY_WRITE, to, n))
29693- n = __copy_to_user(to, from, n);
29694- return n;
29695+ switch (x.seg) {
29696+ case 0:
29697+ loadsegment(gs, 0);
29698+ break;
29699+ case TASK_SIZE_MAX:
29700+ loadsegment(gs, __USER_DS);
29701+ break;
29702+ case -1UL:
29703+ loadsegment(gs, __KERNEL_DS);
29704+ break;
29705+ default:
29706+ BUG();
29707+ }
29708 }
29709-EXPORT_SYMBOL(copy_to_user);
29710+EXPORT_SYMBOL(__set_fs);
29711
29712-/**
29713- * copy_from_user: - Copy a block of data from user space.
29714- * @to: Destination address, in kernel space.
29715- * @from: Source address, in user space.
29716- * @n: Number of bytes to copy.
29717- *
29718- * Context: User context only. This function may sleep.
29719- *
29720- * Copy data from user space to kernel space.
29721- *
29722- * Returns number of bytes that could not be copied.
29723- * On success, this will be zero.
29724- *
29725- * If some data could not be copied, this function will pad the copied
29726- * data to the requested size using zero bytes.
29727- */
29728-unsigned long
29729-_copy_from_user(void *to, const void __user *from, unsigned long n)
29730+void set_fs(mm_segment_t x)
29731 {
29732- if (access_ok(VERIFY_READ, from, n))
29733- n = __copy_from_user(to, from, n);
29734- else
29735- memset(to, 0, n);
29736- return n;
29737+ current_thread_info()->addr_limit = x;
29738+ __set_fs(x);
29739 }
29740-EXPORT_SYMBOL(_copy_from_user);
29741+EXPORT_SYMBOL(set_fs);
29742+#endif
29743diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
29744index 906fea3..0194a18 100644
29745--- a/arch/x86/lib/usercopy_64.c
29746+++ b/arch/x86/lib/usercopy_64.c
29747@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
29748 might_fault();
29749 /* no memory constraint because it doesn't change any memory gcc knows
29750 about */
29751+ pax_open_userland();
29752 stac();
29753 asm volatile(
29754 " testq %[size8],%[size8]\n"
29755@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
29756 _ASM_EXTABLE(0b,3b)
29757 _ASM_EXTABLE(1b,2b)
29758 : [size8] "=&c"(size), [dst] "=&D" (__d0)
29759- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
29760+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
29761 [zero] "r" (0UL), [eight] "r" (8UL));
29762 clac();
29763+ pax_close_userland();
29764 return size;
29765 }
29766 EXPORT_SYMBOL(__clear_user);
29767@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
29768 }
29769 EXPORT_SYMBOL(clear_user);
29770
29771-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
29772+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
29773 {
29774- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
29775- return copy_user_generic((__force void *)to, (__force void *)from, len);
29776- }
29777- return len;
29778+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
29779+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
29780+ return len;
29781 }
29782 EXPORT_SYMBOL(copy_in_user);
29783
29784@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
29785 * it is not necessary to optimize tail handling.
29786 */
29787 unsigned long
29788-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
29789+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
29790 {
29791 char c;
29792 unsigned zero_len;
29793
29794+ clac();
29795+ pax_close_userland();
29796 for (; len; --len, to++) {
29797 if (__get_user_nocheck(c, from++, sizeof(char)))
29798 break;
29799@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
29800 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
29801 if (__put_user_nocheck(c, to++, sizeof(char)))
29802 break;
29803- clac();
29804 return len;
29805 }
29806diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
29807index 23d8e5f..9ccc13a 100644
29808--- a/arch/x86/mm/Makefile
29809+++ b/arch/x86/mm/Makefile
29810@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
29811 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
29812
29813 obj-$(CONFIG_MEMTEST) += memtest.o
29814+
29815+quote:="
29816+obj-$(CONFIG_X86_64) += uderef_64.o
29817+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
29818diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
29819index 903ec1e..c4166b2 100644
29820--- a/arch/x86/mm/extable.c
29821+++ b/arch/x86/mm/extable.c
29822@@ -6,12 +6,24 @@
29823 static inline unsigned long
29824 ex_insn_addr(const struct exception_table_entry *x)
29825 {
29826- return (unsigned long)&x->insn + x->insn;
29827+ unsigned long reloc = 0;
29828+
29829+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29830+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29831+#endif
29832+
29833+ return (unsigned long)&x->insn + x->insn + reloc;
29834 }
29835 static inline unsigned long
29836 ex_fixup_addr(const struct exception_table_entry *x)
29837 {
29838- return (unsigned long)&x->fixup + x->fixup;
29839+ unsigned long reloc = 0;
29840+
29841+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29842+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29843+#endif
29844+
29845+ return (unsigned long)&x->fixup + x->fixup + reloc;
29846 }
29847
29848 int fixup_exception(struct pt_regs *regs)
29849@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
29850 unsigned long new_ip;
29851
29852 #ifdef CONFIG_PNPBIOS
29853- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
29854+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
29855 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
29856 extern u32 pnp_bios_is_utter_crap;
29857 pnp_bios_is_utter_crap = 1;
29858@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
29859 i += 4;
29860 p->fixup -= i;
29861 i += 4;
29862+
29863+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29864+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
29865+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29866+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29867+#endif
29868+
29869 }
29870 }
29871
29872diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
29873index 654be4a..a4a3da1 100644
29874--- a/arch/x86/mm/fault.c
29875+++ b/arch/x86/mm/fault.c
29876@@ -14,11 +14,18 @@
29877 #include <linux/hugetlb.h> /* hstate_index_to_shift */
29878 #include <linux/prefetch.h> /* prefetchw */
29879 #include <linux/context_tracking.h> /* exception_enter(), ... */
29880+#include <linux/unistd.h>
29881+#include <linux/compiler.h>
29882
29883 #include <asm/traps.h> /* dotraplinkage, ... */
29884 #include <asm/pgalloc.h> /* pgd_*(), ... */
29885 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
29886 #include <asm/fixmap.h> /* VSYSCALL_START */
29887+#include <asm/tlbflush.h>
29888+
29889+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29890+#include <asm/stacktrace.h>
29891+#endif
29892
29893 /*
29894 * Page fault error code bits:
29895@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
29896 int ret = 0;
29897
29898 /* kprobe_running() needs smp_processor_id() */
29899- if (kprobes_built_in() && !user_mode_vm(regs)) {
29900+ if (kprobes_built_in() && !user_mode(regs)) {
29901 preempt_disable();
29902 if (kprobe_running() && kprobe_fault_handler(regs, 14))
29903 ret = 1;
29904@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
29905 return !instr_lo || (instr_lo>>1) == 1;
29906 case 0x00:
29907 /* Prefetch instruction is 0x0F0D or 0x0F18 */
29908- if (probe_kernel_address(instr, opcode))
29909+ if (user_mode(regs)) {
29910+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29911+ return 0;
29912+ } else if (probe_kernel_address(instr, opcode))
29913 return 0;
29914
29915 *prefetch = (instr_lo == 0xF) &&
29916@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
29917 while (instr < max_instr) {
29918 unsigned char opcode;
29919
29920- if (probe_kernel_address(instr, opcode))
29921+ if (user_mode(regs)) {
29922+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29923+ break;
29924+ } else if (probe_kernel_address(instr, opcode))
29925 break;
29926
29927 instr++;
29928@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
29929 force_sig_info(si_signo, &info, tsk);
29930 }
29931
29932+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29933+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
29934+#endif
29935+
29936+#ifdef CONFIG_PAX_EMUTRAMP
29937+static int pax_handle_fetch_fault(struct pt_regs *regs);
29938+#endif
29939+
29940+#ifdef CONFIG_PAX_PAGEEXEC
29941+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
29942+{
29943+ pgd_t *pgd;
29944+ pud_t *pud;
29945+ pmd_t *pmd;
29946+
29947+ pgd = pgd_offset(mm, address);
29948+ if (!pgd_present(*pgd))
29949+ return NULL;
29950+ pud = pud_offset(pgd, address);
29951+ if (!pud_present(*pud))
29952+ return NULL;
29953+ pmd = pmd_offset(pud, address);
29954+ if (!pmd_present(*pmd))
29955+ return NULL;
29956+ return pmd;
29957+}
29958+#endif
29959+
29960 DEFINE_SPINLOCK(pgd_lock);
29961 LIST_HEAD(pgd_list);
29962
29963@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
29964 for (address = VMALLOC_START & PMD_MASK;
29965 address >= TASK_SIZE && address < FIXADDR_TOP;
29966 address += PMD_SIZE) {
29967+
29968+#ifdef CONFIG_PAX_PER_CPU_PGD
29969+ unsigned long cpu;
29970+#else
29971 struct page *page;
29972+#endif
29973
29974 spin_lock(&pgd_lock);
29975+
29976+#ifdef CONFIG_PAX_PER_CPU_PGD
29977+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29978+ pgd_t *pgd = get_cpu_pgd(cpu, user);
29979+ pmd_t *ret;
29980+
29981+ ret = vmalloc_sync_one(pgd, address);
29982+ if (!ret)
29983+ break;
29984+ pgd = get_cpu_pgd(cpu, kernel);
29985+#else
29986 list_for_each_entry(page, &pgd_list, lru) {
29987+ pgd_t *pgd;
29988 spinlock_t *pgt_lock;
29989 pmd_t *ret;
29990
29991@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
29992 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
29993
29994 spin_lock(pgt_lock);
29995- ret = vmalloc_sync_one(page_address(page), address);
29996+ pgd = page_address(page);
29997+#endif
29998+
29999+ ret = vmalloc_sync_one(pgd, address);
30000+
30001+#ifndef CONFIG_PAX_PER_CPU_PGD
30002 spin_unlock(pgt_lock);
30003+#endif
30004
30005 if (!ret)
30006 break;
30007@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30008 * an interrupt in the middle of a task switch..
30009 */
30010 pgd_paddr = read_cr3();
30011+
30012+#ifdef CONFIG_PAX_PER_CPU_PGD
30013+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30014+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30015+#endif
30016+
30017 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30018 if (!pmd_k)
30019 return -1;
30020@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30021 * happen within a race in page table update. In the later
30022 * case just flush:
30023 */
30024- pgd = pgd_offset(current->active_mm, address);
30025+
30026 pgd_ref = pgd_offset_k(address);
30027 if (pgd_none(*pgd_ref))
30028 return -1;
30029
30030+#ifdef CONFIG_PAX_PER_CPU_PGD
30031+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30032+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30033+ if (pgd_none(*pgd)) {
30034+ set_pgd(pgd, *pgd_ref);
30035+ arch_flush_lazy_mmu_mode();
30036+ } else {
30037+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30038+ }
30039+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30040+#else
30041+ pgd = pgd_offset(current->active_mm, address);
30042+#endif
30043+
30044 if (pgd_none(*pgd)) {
30045 set_pgd(pgd, *pgd_ref);
30046 arch_flush_lazy_mmu_mode();
30047@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30048 static int is_errata100(struct pt_regs *regs, unsigned long address)
30049 {
30050 #ifdef CONFIG_X86_64
30051- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30052+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30053 return 1;
30054 #endif
30055 return 0;
30056@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30057 }
30058
30059 static const char nx_warning[] = KERN_CRIT
30060-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30061+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30062
30063 static void
30064 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30065@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30066 if (!oops_may_print())
30067 return;
30068
30069- if (error_code & PF_INSTR) {
30070+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30071 unsigned int level;
30072
30073 pte_t *pte = lookup_address(address, &level);
30074
30075 if (pte && pte_present(*pte) && !pte_exec(*pte))
30076- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30077+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30078 }
30079
30080+#ifdef CONFIG_PAX_KERNEXEC
30081+ if (init_mm.start_code <= address && address < init_mm.end_code) {
30082+ if (current->signal->curr_ip)
30083+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30084+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
30085+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30086+ else
30087+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30088+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30089+ }
30090+#endif
30091+
30092 printk(KERN_ALERT "BUG: unable to handle kernel ");
30093 if (address < PAGE_SIZE)
30094 printk(KERN_CONT "NULL pointer dereference");
30095@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30096 return;
30097 }
30098 #endif
30099+
30100+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30101+ if (pax_is_fetch_fault(regs, error_code, address)) {
30102+
30103+#ifdef CONFIG_PAX_EMUTRAMP
30104+ switch (pax_handle_fetch_fault(regs)) {
30105+ case 2:
30106+ return;
30107+ }
30108+#endif
30109+
30110+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30111+ do_group_exit(SIGKILL);
30112+ }
30113+#endif
30114+
30115 /* Kernel addresses are always protection faults: */
30116 if (address >= TASK_SIZE)
30117 error_code |= PF_PROT;
30118@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30119 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30120 printk(KERN_ERR
30121 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30122- tsk->comm, tsk->pid, address);
30123+ tsk->comm, task_pid_nr(tsk), address);
30124 code = BUS_MCEERR_AR;
30125 }
30126 #endif
30127@@ -898,6 +1010,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30128 return 1;
30129 }
30130
30131+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30132+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30133+{
30134+ pte_t *pte;
30135+ pmd_t *pmd;
30136+ spinlock_t *ptl;
30137+ unsigned char pte_mask;
30138+
30139+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30140+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
30141+ return 0;
30142+
30143+ /* PaX: it's our fault, let's handle it if we can */
30144+
30145+ /* PaX: take a look at read faults before acquiring any locks */
30146+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30147+ /* instruction fetch attempt from a protected page in user mode */
30148+ up_read(&mm->mmap_sem);
30149+
30150+#ifdef CONFIG_PAX_EMUTRAMP
30151+ switch (pax_handle_fetch_fault(regs)) {
30152+ case 2:
30153+ return 1;
30154+ }
30155+#endif
30156+
30157+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30158+ do_group_exit(SIGKILL);
30159+ }
30160+
30161+ pmd = pax_get_pmd(mm, address);
30162+ if (unlikely(!pmd))
30163+ return 0;
30164+
30165+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30166+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30167+ pte_unmap_unlock(pte, ptl);
30168+ return 0;
30169+ }
30170+
30171+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30172+ /* write attempt to a protected page in user mode */
30173+ pte_unmap_unlock(pte, ptl);
30174+ return 0;
30175+ }
30176+
30177+#ifdef CONFIG_SMP
30178+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30179+#else
30180+ if (likely(address > get_limit(regs->cs)))
30181+#endif
30182+ {
30183+ set_pte(pte, pte_mkread(*pte));
30184+ __flush_tlb_one(address);
30185+ pte_unmap_unlock(pte, ptl);
30186+ up_read(&mm->mmap_sem);
30187+ return 1;
30188+ }
30189+
30190+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
30191+
30192+ /*
30193+ * PaX: fill DTLB with user rights and retry
30194+ */
30195+ __asm__ __volatile__ (
30196+ "orb %2,(%1)\n"
30197+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
30198+/*
30199+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
30200+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
30201+ * page fault when examined during a TLB load attempt. this is true not only
30202+ * for PTEs holding a non-present entry but also present entries that will
30203+ * raise a page fault (such as those set up by PaX, or the copy-on-write
30204+ * mechanism). in effect it means that we do *not* need to flush the TLBs
30205+ * for our target pages since their PTEs are simply not in the TLBs at all.
30206+
30207+ * the best thing in omitting it is that we gain around 15-20% speed in the
30208+ * fast path of the page fault handler and can get rid of tracing since we
30209+ * can no longer flush unintended entries.
30210+ */
30211+ "invlpg (%0)\n"
30212+#endif
30213+ __copyuser_seg"testb $0,(%0)\n"
30214+ "xorb %3,(%1)\n"
30215+ :
30216+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
30217+ : "memory", "cc");
30218+ pte_unmap_unlock(pte, ptl);
30219+ up_read(&mm->mmap_sem);
30220+ return 1;
30221+}
30222+#endif
30223+
30224 /*
30225 * Handle a spurious fault caused by a stale TLB entry.
30226 *
30227@@ -964,6 +1169,9 @@ int show_unhandled_signals = 1;
30228 static inline int
30229 access_error(unsigned long error_code, struct vm_area_struct *vma)
30230 {
30231+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
30232+ return 1;
30233+
30234 if (error_code & PF_WRITE) {
30235 /* write, present and write, not present: */
30236 if (unlikely(!(vma->vm_flags & VM_WRITE)))
30237@@ -992,7 +1200,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
30238 if (error_code & PF_USER)
30239 return false;
30240
30241- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
30242+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
30243 return false;
30244
30245 return true;
30246@@ -1008,18 +1216,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30247 {
30248 struct vm_area_struct *vma;
30249 struct task_struct *tsk;
30250- unsigned long address;
30251 struct mm_struct *mm;
30252 int fault;
30253 int write = error_code & PF_WRITE;
30254 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
30255 (write ? FAULT_FLAG_WRITE : 0);
30256
30257- tsk = current;
30258- mm = tsk->mm;
30259-
30260 /* Get the faulting address: */
30261- address = read_cr2();
30262+ unsigned long address = read_cr2();
30263+
30264+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30265+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
30266+ if (!search_exception_tables(regs->ip)) {
30267+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30268+ bad_area_nosemaphore(regs, error_code, address);
30269+ return;
30270+ }
30271+ if (address < pax_user_shadow_base) {
30272+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30273+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
30274+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
30275+ } else
30276+ address -= pax_user_shadow_base;
30277+ }
30278+#endif
30279+
30280+ tsk = current;
30281+ mm = tsk->mm;
30282
30283 /*
30284 * Detect and handle instructions that would cause a page fault for
30285@@ -1080,7 +1303,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30286 * User-mode registers count as a user access even for any
30287 * potential system fault or CPU buglet:
30288 */
30289- if (user_mode_vm(regs)) {
30290+ if (user_mode(regs)) {
30291 local_irq_enable();
30292 error_code |= PF_USER;
30293 } else {
30294@@ -1142,6 +1365,11 @@ retry:
30295 might_sleep();
30296 }
30297
30298+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30299+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
30300+ return;
30301+#endif
30302+
30303 vma = find_vma(mm, address);
30304 if (unlikely(!vma)) {
30305 bad_area(regs, error_code, address);
30306@@ -1153,18 +1381,24 @@ retry:
30307 bad_area(regs, error_code, address);
30308 return;
30309 }
30310- if (error_code & PF_USER) {
30311- /*
30312- * Accessing the stack below %sp is always a bug.
30313- * The large cushion allows instructions like enter
30314- * and pusha to work. ("enter $65535, $31" pushes
30315- * 32 pointers and then decrements %sp by 65535.)
30316- */
30317- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
30318- bad_area(regs, error_code, address);
30319- return;
30320- }
30321+ /*
30322+ * Accessing the stack below %sp is always a bug.
30323+ * The large cushion allows instructions like enter
30324+ * and pusha to work. ("enter $65535, $31" pushes
30325+ * 32 pointers and then decrements %sp by 65535.)
30326+ */
30327+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
30328+ bad_area(regs, error_code, address);
30329+ return;
30330 }
30331+
30332+#ifdef CONFIG_PAX_SEGMEXEC
30333+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
30334+ bad_area(regs, error_code, address);
30335+ return;
30336+ }
30337+#endif
30338+
30339 if (unlikely(expand_stack(vma, address))) {
30340 bad_area(regs, error_code, address);
30341 return;
30342@@ -1230,3 +1464,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
30343 __do_page_fault(regs, error_code);
30344 exception_exit(prev_state);
30345 }
30346+
30347+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30348+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
30349+{
30350+ struct mm_struct *mm = current->mm;
30351+ unsigned long ip = regs->ip;
30352+
30353+ if (v8086_mode(regs))
30354+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
30355+
30356+#ifdef CONFIG_PAX_PAGEEXEC
30357+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
30358+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
30359+ return true;
30360+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
30361+ return true;
30362+ return false;
30363+ }
30364+#endif
30365+
30366+#ifdef CONFIG_PAX_SEGMEXEC
30367+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
30368+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
30369+ return true;
30370+ return false;
30371+ }
30372+#endif
30373+
30374+ return false;
30375+}
30376+#endif
30377+
30378+#ifdef CONFIG_PAX_EMUTRAMP
30379+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
30380+{
30381+ int err;
30382+
30383+ do { /* PaX: libffi trampoline emulation */
30384+ unsigned char mov, jmp;
30385+ unsigned int addr1, addr2;
30386+
30387+#ifdef CONFIG_X86_64
30388+ if ((regs->ip + 9) >> 32)
30389+ break;
30390+#endif
30391+
30392+ err = get_user(mov, (unsigned char __user *)regs->ip);
30393+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30394+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30395+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30396+
30397+ if (err)
30398+ break;
30399+
30400+ if (mov == 0xB8 && jmp == 0xE9) {
30401+ regs->ax = addr1;
30402+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30403+ return 2;
30404+ }
30405+ } while (0);
30406+
30407+ do { /* PaX: gcc trampoline emulation #1 */
30408+ unsigned char mov1, mov2;
30409+ unsigned short jmp;
30410+ unsigned int addr1, addr2;
30411+
30412+#ifdef CONFIG_X86_64
30413+ if ((regs->ip + 11) >> 32)
30414+ break;
30415+#endif
30416+
30417+ err = get_user(mov1, (unsigned char __user *)regs->ip);
30418+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30419+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
30420+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30421+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
30422+
30423+ if (err)
30424+ break;
30425+
30426+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
30427+ regs->cx = addr1;
30428+ regs->ax = addr2;
30429+ regs->ip = addr2;
30430+ return 2;
30431+ }
30432+ } while (0);
30433+
30434+ do { /* PaX: gcc trampoline emulation #2 */
30435+ unsigned char mov, jmp;
30436+ unsigned int addr1, addr2;
30437+
30438+#ifdef CONFIG_X86_64
30439+ if ((regs->ip + 9) >> 32)
30440+ break;
30441+#endif
30442+
30443+ err = get_user(mov, (unsigned char __user *)regs->ip);
30444+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30445+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30446+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30447+
30448+ if (err)
30449+ break;
30450+
30451+ if (mov == 0xB9 && jmp == 0xE9) {
30452+ regs->cx = addr1;
30453+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30454+ return 2;
30455+ }
30456+ } while (0);
30457+
30458+ return 1; /* PaX in action */
30459+}
30460+
30461+#ifdef CONFIG_X86_64
30462+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
30463+{
30464+ int err;
30465+
30466+ do { /* PaX: libffi trampoline emulation */
30467+ unsigned short mov1, mov2, jmp1;
30468+ unsigned char stcclc, jmp2;
30469+ unsigned long addr1, addr2;
30470+
30471+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30472+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
30473+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
30474+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
30475+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
30476+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
30477+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
30478+
30479+ if (err)
30480+ break;
30481+
30482+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30483+ regs->r11 = addr1;
30484+ regs->r10 = addr2;
30485+ if (stcclc == 0xF8)
30486+ regs->flags &= ~X86_EFLAGS_CF;
30487+ else
30488+ regs->flags |= X86_EFLAGS_CF;
30489+ regs->ip = addr1;
30490+ return 2;
30491+ }
30492+ } while (0);
30493+
30494+ do { /* PaX: gcc trampoline emulation #1 */
30495+ unsigned short mov1, mov2, jmp1;
30496+ unsigned char jmp2;
30497+ unsigned int addr1;
30498+ unsigned long addr2;
30499+
30500+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30501+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
30502+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
30503+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
30504+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
30505+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
30506+
30507+ if (err)
30508+ break;
30509+
30510+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30511+ regs->r11 = addr1;
30512+ regs->r10 = addr2;
30513+ regs->ip = addr1;
30514+ return 2;
30515+ }
30516+ } while (0);
30517+
30518+ do { /* PaX: gcc trampoline emulation #2 */
30519+ unsigned short mov1, mov2, jmp1;
30520+ unsigned char jmp2;
30521+ unsigned long addr1, addr2;
30522+
30523+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30524+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
30525+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
30526+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
30527+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
30528+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
30529+
30530+ if (err)
30531+ break;
30532+
30533+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30534+ regs->r11 = addr1;
30535+ regs->r10 = addr2;
30536+ regs->ip = addr1;
30537+ return 2;
30538+ }
30539+ } while (0);
30540+
30541+ return 1; /* PaX in action */
30542+}
30543+#endif
30544+
30545+/*
30546+ * PaX: decide what to do with offenders (regs->ip = fault address)
30547+ *
30548+ * returns 1 when task should be killed
30549+ * 2 when gcc trampoline was detected
30550+ */
30551+static int pax_handle_fetch_fault(struct pt_regs *regs)
30552+{
30553+ if (v8086_mode(regs))
30554+ return 1;
30555+
30556+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
30557+ return 1;
30558+
30559+#ifdef CONFIG_X86_32
30560+ return pax_handle_fetch_fault_32(regs);
30561+#else
30562+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
30563+ return pax_handle_fetch_fault_32(regs);
30564+ else
30565+ return pax_handle_fetch_fault_64(regs);
30566+#endif
30567+}
30568+#endif
30569+
30570+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30571+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
30572+{
30573+ long i;
30574+
30575+ printk(KERN_ERR "PAX: bytes at PC: ");
30576+ for (i = 0; i < 20; i++) {
30577+ unsigned char c;
30578+ if (get_user(c, (unsigned char __force_user *)pc+i))
30579+ printk(KERN_CONT "?? ");
30580+ else
30581+ printk(KERN_CONT "%02x ", c);
30582+ }
30583+ printk("\n");
30584+
30585+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
30586+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
30587+ unsigned long c;
30588+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
30589+#ifdef CONFIG_X86_32
30590+ printk(KERN_CONT "???????? ");
30591+#else
30592+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
30593+ printk(KERN_CONT "???????? ???????? ");
30594+ else
30595+ printk(KERN_CONT "???????????????? ");
30596+#endif
30597+ } else {
30598+#ifdef CONFIG_X86_64
30599+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
30600+ printk(KERN_CONT "%08x ", (unsigned int)c);
30601+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
30602+ } else
30603+#endif
30604+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
30605+ }
30606+ }
30607+ printk("\n");
30608+}
30609+#endif
30610+
30611+/**
30612+ * probe_kernel_write(): safely attempt to write to a location
30613+ * @dst: address to write to
30614+ * @src: pointer to the data that shall be written
30615+ * @size: size of the data chunk
30616+ *
30617+ * Safely write to address @dst from the buffer at @src. If a kernel fault
30618+ * happens, handle that and return -EFAULT.
30619+ */
30620+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
30621+{
30622+ long ret;
30623+ mm_segment_t old_fs = get_fs();
30624+
30625+ set_fs(KERNEL_DS);
30626+ pagefault_disable();
30627+ pax_open_kernel();
30628+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
30629+ pax_close_kernel();
30630+ pagefault_enable();
30631+ set_fs(old_fs);
30632+
30633+ return ret ? -EFAULT : 0;
30634+}
30635diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
30636index dd74e46..7d26398 100644
30637--- a/arch/x86/mm/gup.c
30638+++ b/arch/x86/mm/gup.c
30639@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
30640 addr = start;
30641 len = (unsigned long) nr_pages << PAGE_SHIFT;
30642 end = start + len;
30643- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
30644+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
30645 (void __user *)start, len)))
30646 return 0;
30647
30648diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
30649index 252b8f5..4dcfdc1 100644
30650--- a/arch/x86/mm/highmem_32.c
30651+++ b/arch/x86/mm/highmem_32.c
30652@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
30653 idx = type + KM_TYPE_NR*smp_processor_id();
30654 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
30655 BUG_ON(!pte_none(*(kmap_pte-idx)));
30656+
30657+ pax_open_kernel();
30658 set_pte(kmap_pte-idx, mk_pte(page, prot));
30659+ pax_close_kernel();
30660+
30661 arch_flush_lazy_mmu_mode();
30662
30663 return (void *)vaddr;
30664diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
30665index ae1aa71..d9bea75 100644
30666--- a/arch/x86/mm/hugetlbpage.c
30667+++ b/arch/x86/mm/hugetlbpage.c
30668@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
30669 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
30670 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
30671 unsigned long addr, unsigned long len,
30672- unsigned long pgoff, unsigned long flags)
30673+ unsigned long pgoff, unsigned long flags, unsigned long offset)
30674 {
30675 struct hstate *h = hstate_file(file);
30676 struct vm_unmapped_area_info info;
30677-
30678+
30679 info.flags = 0;
30680 info.length = len;
30681 info.low_limit = TASK_UNMAPPED_BASE;
30682+
30683+#ifdef CONFIG_PAX_RANDMMAP
30684+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
30685+ info.low_limit += current->mm->delta_mmap;
30686+#endif
30687+
30688 info.high_limit = TASK_SIZE;
30689 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
30690 info.align_offset = 0;
30691+ info.threadstack_offset = offset;
30692 return vm_unmapped_area(&info);
30693 }
30694
30695 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30696 unsigned long addr0, unsigned long len,
30697- unsigned long pgoff, unsigned long flags)
30698+ unsigned long pgoff, unsigned long flags, unsigned long offset)
30699 {
30700 struct hstate *h = hstate_file(file);
30701 struct vm_unmapped_area_info info;
30702@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30703 info.high_limit = current->mm->mmap_base;
30704 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
30705 info.align_offset = 0;
30706+ info.threadstack_offset = offset;
30707 addr = vm_unmapped_area(&info);
30708
30709 /*
30710@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30711 VM_BUG_ON(addr != -ENOMEM);
30712 info.flags = 0;
30713 info.low_limit = TASK_UNMAPPED_BASE;
30714+
30715+#ifdef CONFIG_PAX_RANDMMAP
30716+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
30717+ info.low_limit += current->mm->delta_mmap;
30718+#endif
30719+
30720 info.high_limit = TASK_SIZE;
30721 addr = vm_unmapped_area(&info);
30722 }
30723@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
30724 struct hstate *h = hstate_file(file);
30725 struct mm_struct *mm = current->mm;
30726 struct vm_area_struct *vma;
30727+ unsigned long pax_task_size = TASK_SIZE;
30728+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
30729
30730 if (len & ~huge_page_mask(h))
30731 return -EINVAL;
30732- if (len > TASK_SIZE)
30733+
30734+#ifdef CONFIG_PAX_SEGMEXEC
30735+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30736+ pax_task_size = SEGMEXEC_TASK_SIZE;
30737+#endif
30738+
30739+ pax_task_size -= PAGE_SIZE;
30740+
30741+ if (len > pax_task_size)
30742 return -ENOMEM;
30743
30744 if (flags & MAP_FIXED) {
30745@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
30746 return addr;
30747 }
30748
30749+#ifdef CONFIG_PAX_RANDMMAP
30750+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
30751+#endif
30752+
30753 if (addr) {
30754 addr = ALIGN(addr, huge_page_size(h));
30755 vma = find_vma(mm, addr);
30756- if (TASK_SIZE - len >= addr &&
30757- (!vma || addr + len <= vma->vm_start))
30758+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
30759 return addr;
30760 }
30761 if (mm->get_unmapped_area == arch_get_unmapped_area)
30762 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
30763- pgoff, flags);
30764+ pgoff, flags, offset);
30765 else
30766 return hugetlb_get_unmapped_area_topdown(file, addr, len,
30767- pgoff, flags);
30768+ pgoff, flags, offset);
30769 }
30770
30771 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
30772diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
30773index 1f34e92..c97b98f 100644
30774--- a/arch/x86/mm/init.c
30775+++ b/arch/x86/mm/init.c
30776@@ -4,6 +4,7 @@
30777 #include <linux/swap.h>
30778 #include <linux/memblock.h>
30779 #include <linux/bootmem.h> /* for max_low_pfn */
30780+#include <linux/tboot.h>
30781
30782 #include <asm/cacheflush.h>
30783 #include <asm/e820.h>
30784@@ -17,6 +18,8 @@
30785 #include <asm/proto.h>
30786 #include <asm/dma.h> /* for MAX_DMA_PFN */
30787 #include <asm/microcode.h>
30788+#include <asm/desc.h>
30789+#include <asm/bios_ebda.h>
30790
30791 #include "mm_internal.h"
30792
30793@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
30794 early_ioremap_page_table_range_init();
30795 #endif
30796
30797+#ifdef CONFIG_PAX_PER_CPU_PGD
30798+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
30799+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30800+ KERNEL_PGD_PTRS);
30801+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
30802+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30803+ KERNEL_PGD_PTRS);
30804+ load_cr3(get_cpu_pgd(0, kernel));
30805+#else
30806 load_cr3(swapper_pg_dir);
30807+#endif
30808+
30809 __flush_tlb_all();
30810
30811 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
30812@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
30813 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
30814 * mmio resources as well as potential bios/acpi data regions.
30815 */
30816+
30817+#ifdef CONFIG_GRKERNSEC_KMEM
30818+static unsigned int ebda_start __read_only;
30819+static unsigned int ebda_end __read_only;
30820+#endif
30821+
30822 int devmem_is_allowed(unsigned long pagenr)
30823 {
30824- if (pagenr < 256)
30825+#ifdef CONFIG_GRKERNSEC_KMEM
30826+ /* allow BDA */
30827+ if (!pagenr)
30828 return 1;
30829+ /* allow EBDA */
30830+ if (pagenr >= ebda_start && pagenr < ebda_end)
30831+ return 1;
30832+ /* if tboot is in use, allow access to its hardcoded serial log range */
30833+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
30834+ return 1;
30835+#else
30836+ if (!pagenr)
30837+ return 1;
30838+#ifdef CONFIG_VM86
30839+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
30840+ return 1;
30841+#endif
30842+#endif
30843+
30844+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
30845+ return 1;
30846+#ifdef CONFIG_GRKERNSEC_KMEM
30847+ /* throw out everything else below 1MB */
30848+ if (pagenr <= 256)
30849+ return 0;
30850+#endif
30851 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
30852 return 0;
30853 if (!page_is_ram(pagenr))
30854@@ -538,8 +582,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
30855 #endif
30856 }
30857
30858+#ifdef CONFIG_GRKERNSEC_KMEM
30859+static inline void gr_init_ebda(void)
30860+{
30861+ unsigned int ebda_addr;
30862+ unsigned int ebda_size = 0;
30863+
30864+ ebda_addr = get_bios_ebda();
30865+ if (ebda_addr) {
30866+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
30867+ ebda_size <<= 10;
30868+ }
30869+ if (ebda_addr && ebda_size) {
30870+ ebda_start = ebda_addr >> PAGE_SHIFT;
30871+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
30872+ } else {
30873+ ebda_start = 0x9f000 >> PAGE_SHIFT;
30874+ ebda_end = 0xa0000 >> PAGE_SHIFT;
30875+ }
30876+}
30877+#else
30878+static inline void gr_init_ebda(void) { }
30879+#endif
30880+
30881 void free_initmem(void)
30882 {
30883+#ifdef CONFIG_PAX_KERNEXEC
30884+#ifdef CONFIG_X86_32
30885+ /* PaX: limit KERNEL_CS to actual size */
30886+ unsigned long addr, limit;
30887+ struct desc_struct d;
30888+ int cpu;
30889+#else
30890+ pgd_t *pgd;
30891+ pud_t *pud;
30892+ pmd_t *pmd;
30893+ unsigned long addr, end;
30894+#endif
30895+#endif
30896+
30897+ gr_init_ebda();
30898+
30899+#ifdef CONFIG_PAX_KERNEXEC
30900+#ifdef CONFIG_X86_32
30901+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
30902+ limit = (limit - 1UL) >> PAGE_SHIFT;
30903+
30904+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
30905+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30906+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
30907+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
30908+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
30909+ }
30910+
30911+ /* PaX: make KERNEL_CS read-only */
30912+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
30913+ if (!paravirt_enabled())
30914+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
30915+/*
30916+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
30917+ pgd = pgd_offset_k(addr);
30918+ pud = pud_offset(pgd, addr);
30919+ pmd = pmd_offset(pud, addr);
30920+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30921+ }
30922+*/
30923+#ifdef CONFIG_X86_PAE
30924+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
30925+/*
30926+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
30927+ pgd = pgd_offset_k(addr);
30928+ pud = pud_offset(pgd, addr);
30929+ pmd = pmd_offset(pud, addr);
30930+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30931+ }
30932+*/
30933+#endif
30934+
30935+#ifdef CONFIG_MODULES
30936+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
30937+#endif
30938+
30939+#else
30940+ /* PaX: make kernel code/rodata read-only, rest non-executable */
30941+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
30942+ pgd = pgd_offset_k(addr);
30943+ pud = pud_offset(pgd, addr);
30944+ pmd = pmd_offset(pud, addr);
30945+ if (!pmd_present(*pmd))
30946+ continue;
30947+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
30948+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30949+ else
30950+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30951+ }
30952+
30953+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
30954+ end = addr + KERNEL_IMAGE_SIZE;
30955+ for (; addr < end; addr += PMD_SIZE) {
30956+ pgd = pgd_offset_k(addr);
30957+ pud = pud_offset(pgd, addr);
30958+ pmd = pmd_offset(pud, addr);
30959+ if (!pmd_present(*pmd))
30960+ continue;
30961+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
30962+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30963+ }
30964+#endif
30965+
30966+ flush_tlb_all();
30967+#endif
30968+
30969 free_init_pages("unused kernel memory",
30970 (unsigned long)(&__init_begin),
30971 (unsigned long)(&__init_end));
30972diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
30973index 3ac7e31..89611b7 100644
30974--- a/arch/x86/mm/init_32.c
30975+++ b/arch/x86/mm/init_32.c
30976@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
30977 bool __read_mostly __vmalloc_start_set = false;
30978
30979 /*
30980- * Creates a middle page table and puts a pointer to it in the
30981- * given global directory entry. This only returns the gd entry
30982- * in non-PAE compilation mode, since the middle layer is folded.
30983- */
30984-static pmd_t * __init one_md_table_init(pgd_t *pgd)
30985-{
30986- pud_t *pud;
30987- pmd_t *pmd_table;
30988-
30989-#ifdef CONFIG_X86_PAE
30990- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
30991- pmd_table = (pmd_t *)alloc_low_page();
30992- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
30993- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
30994- pud = pud_offset(pgd, 0);
30995- BUG_ON(pmd_table != pmd_offset(pud, 0));
30996-
30997- return pmd_table;
30998- }
30999-#endif
31000- pud = pud_offset(pgd, 0);
31001- pmd_table = pmd_offset(pud, 0);
31002-
31003- return pmd_table;
31004-}
31005-
31006-/*
31007 * Create a page table and place a pointer to it in a middle page
31008 * directory entry:
31009 */
31010@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
31011 pte_t *page_table = (pte_t *)alloc_low_page();
31012
31013 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31014+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31015+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31016+#else
31017 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31018+#endif
31019 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31020 }
31021
31022 return pte_offset_kernel(pmd, 0);
31023 }
31024
31025+static pmd_t * __init one_md_table_init(pgd_t *pgd)
31026+{
31027+ pud_t *pud;
31028+ pmd_t *pmd_table;
31029+
31030+ pud = pud_offset(pgd, 0);
31031+ pmd_table = pmd_offset(pud, 0);
31032+
31033+ return pmd_table;
31034+}
31035+
31036 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31037 {
31038 int pgd_idx = pgd_index(vaddr);
31039@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31040 int pgd_idx, pmd_idx;
31041 unsigned long vaddr;
31042 pgd_t *pgd;
31043+ pud_t *pud;
31044 pmd_t *pmd;
31045 pte_t *pte = NULL;
31046 unsigned long count = page_table_range_init_count(start, end);
31047@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31048 pgd = pgd_base + pgd_idx;
31049
31050 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31051- pmd = one_md_table_init(pgd);
31052- pmd = pmd + pmd_index(vaddr);
31053+ pud = pud_offset(pgd, vaddr);
31054+ pmd = pmd_offset(pud, vaddr);
31055+
31056+#ifdef CONFIG_X86_PAE
31057+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31058+#endif
31059+
31060 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31061 pmd++, pmd_idx++) {
31062 pte = page_table_kmap_check(one_page_table_init(pmd),
31063@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31064 }
31065 }
31066
31067-static inline int is_kernel_text(unsigned long addr)
31068+static inline int is_kernel_text(unsigned long start, unsigned long end)
31069 {
31070- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31071- return 1;
31072- return 0;
31073+ if ((start > ktla_ktva((unsigned long)_etext) ||
31074+ end <= ktla_ktva((unsigned long)_stext)) &&
31075+ (start > ktla_ktva((unsigned long)_einittext) ||
31076+ end <= ktla_ktva((unsigned long)_sinittext)) &&
31077+
31078+#ifdef CONFIG_ACPI_SLEEP
31079+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31080+#endif
31081+
31082+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31083+ return 0;
31084+ return 1;
31085 }
31086
31087 /*
31088@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31089 unsigned long last_map_addr = end;
31090 unsigned long start_pfn, end_pfn;
31091 pgd_t *pgd_base = swapper_pg_dir;
31092- int pgd_idx, pmd_idx, pte_ofs;
31093+ unsigned int pgd_idx, pmd_idx, pte_ofs;
31094 unsigned long pfn;
31095 pgd_t *pgd;
31096+ pud_t *pud;
31097 pmd_t *pmd;
31098 pte_t *pte;
31099 unsigned pages_2m, pages_4k;
31100@@ -291,8 +295,13 @@ repeat:
31101 pfn = start_pfn;
31102 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31103 pgd = pgd_base + pgd_idx;
31104- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31105- pmd = one_md_table_init(pgd);
31106+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31107+ pud = pud_offset(pgd, 0);
31108+ pmd = pmd_offset(pud, 0);
31109+
31110+#ifdef CONFIG_X86_PAE
31111+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31112+#endif
31113
31114 if (pfn >= end_pfn)
31115 continue;
31116@@ -304,14 +313,13 @@ repeat:
31117 #endif
31118 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31119 pmd++, pmd_idx++) {
31120- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31121+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31122
31123 /*
31124 * Map with big pages if possible, otherwise
31125 * create normal page tables:
31126 */
31127 if (use_pse) {
31128- unsigned int addr2;
31129 pgprot_t prot = PAGE_KERNEL_LARGE;
31130 /*
31131 * first pass will use the same initial
31132@@ -322,11 +330,7 @@ repeat:
31133 _PAGE_PSE);
31134
31135 pfn &= PMD_MASK >> PAGE_SHIFT;
31136- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31137- PAGE_OFFSET + PAGE_SIZE-1;
31138-
31139- if (is_kernel_text(addr) ||
31140- is_kernel_text(addr2))
31141+ if (is_kernel_text(address, address + PMD_SIZE))
31142 prot = PAGE_KERNEL_LARGE_EXEC;
31143
31144 pages_2m++;
31145@@ -343,7 +347,7 @@ repeat:
31146 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31147 pte += pte_ofs;
31148 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31149- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31150+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31151 pgprot_t prot = PAGE_KERNEL;
31152 /*
31153 * first pass will use the same initial
31154@@ -351,7 +355,7 @@ repeat:
31155 */
31156 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31157
31158- if (is_kernel_text(addr))
31159+ if (is_kernel_text(address, address + PAGE_SIZE))
31160 prot = PAGE_KERNEL_EXEC;
31161
31162 pages_4k++;
31163@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31164
31165 pud = pud_offset(pgd, va);
31166 pmd = pmd_offset(pud, va);
31167- if (!pmd_present(*pmd))
31168+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31169 break;
31170
31171 /* should not be large page here */
31172@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31173
31174 static void __init pagetable_init(void)
31175 {
31176- pgd_t *pgd_base = swapper_pg_dir;
31177-
31178- permanent_kmaps_init(pgd_base);
31179+ permanent_kmaps_init(swapper_pg_dir);
31180 }
31181
31182-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31183+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31184 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31185
31186 /* user-defined highmem size */
31187@@ -772,7 +774,7 @@ void __init mem_init(void)
31188 after_bootmem = 1;
31189
31190 codesize = (unsigned long) &_etext - (unsigned long) &_text;
31191- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
31192+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
31193 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
31194
31195 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
31196@@ -813,10 +815,10 @@ void __init mem_init(void)
31197 ((unsigned long)&__init_end -
31198 (unsigned long)&__init_begin) >> 10,
31199
31200- (unsigned long)&_etext, (unsigned long)&_edata,
31201- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
31202+ (unsigned long)&_sdata, (unsigned long)&_edata,
31203+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
31204
31205- (unsigned long)&_text, (unsigned long)&_etext,
31206+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
31207 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
31208
31209 /*
31210@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
31211 if (!kernel_set_to_readonly)
31212 return;
31213
31214+ start = ktla_ktva(start);
31215 pr_debug("Set kernel text: %lx - %lx for read write\n",
31216 start, start+size);
31217
31218@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
31219 if (!kernel_set_to_readonly)
31220 return;
31221
31222+ start = ktla_ktva(start);
31223 pr_debug("Set kernel text: %lx - %lx for read only\n",
31224 start, start+size);
31225
31226@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
31227 unsigned long start = PFN_ALIGN(_text);
31228 unsigned long size = PFN_ALIGN(_etext) - start;
31229
31230+ start = ktla_ktva(start);
31231 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
31232 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
31233 size >> 10);
31234diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
31235index bb00c46..bf91a67 100644
31236--- a/arch/x86/mm/init_64.c
31237+++ b/arch/x86/mm/init_64.c
31238@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
31239 * around without checking the pgd every time.
31240 */
31241
31242-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
31243+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
31244 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31245
31246 int force_personality32;
31247@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31248
31249 for (address = start; address <= end; address += PGDIR_SIZE) {
31250 const pgd_t *pgd_ref = pgd_offset_k(address);
31251+
31252+#ifdef CONFIG_PAX_PER_CPU_PGD
31253+ unsigned long cpu;
31254+#else
31255 struct page *page;
31256+#endif
31257
31258 if (pgd_none(*pgd_ref))
31259 continue;
31260
31261 spin_lock(&pgd_lock);
31262+
31263+#ifdef CONFIG_PAX_PER_CPU_PGD
31264+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31265+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
31266+
31267+ if (pgd_none(*pgd))
31268+ set_pgd(pgd, *pgd_ref);
31269+ else
31270+ BUG_ON(pgd_page_vaddr(*pgd)
31271+ != pgd_page_vaddr(*pgd_ref));
31272+ pgd = pgd_offset_cpu(cpu, kernel, address);
31273+#else
31274 list_for_each_entry(page, &pgd_list, lru) {
31275 pgd_t *pgd;
31276 spinlock_t *pgt_lock;
31277@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31278 /* the pgt_lock only for Xen */
31279 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31280 spin_lock(pgt_lock);
31281+#endif
31282
31283 if (pgd_none(*pgd))
31284 set_pgd(pgd, *pgd_ref);
31285@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31286 BUG_ON(pgd_page_vaddr(*pgd)
31287 != pgd_page_vaddr(*pgd_ref));
31288
31289+#ifndef CONFIG_PAX_PER_CPU_PGD
31290 spin_unlock(pgt_lock);
31291+#endif
31292+
31293 }
31294 spin_unlock(&pgd_lock);
31295 }
31296@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
31297 {
31298 if (pgd_none(*pgd)) {
31299 pud_t *pud = (pud_t *)spp_getpage();
31300- pgd_populate(&init_mm, pgd, pud);
31301+ pgd_populate_kernel(&init_mm, pgd, pud);
31302 if (pud != pud_offset(pgd, 0))
31303 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
31304 pud, pud_offset(pgd, 0));
31305@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
31306 {
31307 if (pud_none(*pud)) {
31308 pmd_t *pmd = (pmd_t *) spp_getpage();
31309- pud_populate(&init_mm, pud, pmd);
31310+ pud_populate_kernel(&init_mm, pud, pmd);
31311 if (pmd != pmd_offset(pud, 0))
31312 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
31313 pmd, pmd_offset(pud, 0));
31314@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
31315 pmd = fill_pmd(pud, vaddr);
31316 pte = fill_pte(pmd, vaddr);
31317
31318+ pax_open_kernel();
31319 set_pte(pte, new_pte);
31320+ pax_close_kernel();
31321
31322 /*
31323 * It's enough to flush this one mapping.
31324@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
31325 pgd = pgd_offset_k((unsigned long)__va(phys));
31326 if (pgd_none(*pgd)) {
31327 pud = (pud_t *) spp_getpage();
31328- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
31329- _PAGE_USER));
31330+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
31331 }
31332 pud = pud_offset(pgd, (unsigned long)__va(phys));
31333 if (pud_none(*pud)) {
31334 pmd = (pmd_t *) spp_getpage();
31335- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
31336- _PAGE_USER));
31337+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
31338 }
31339 pmd = pmd_offset(pud, phys);
31340 BUG_ON(!pmd_none(*pmd));
31341@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
31342 prot);
31343
31344 spin_lock(&init_mm.page_table_lock);
31345- pud_populate(&init_mm, pud, pmd);
31346+ pud_populate_kernel(&init_mm, pud, pmd);
31347 spin_unlock(&init_mm.page_table_lock);
31348 }
31349 __flush_tlb_all();
31350@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
31351 page_size_mask);
31352
31353 spin_lock(&init_mm.page_table_lock);
31354- pgd_populate(&init_mm, pgd, pud);
31355+ pgd_populate_kernel(&init_mm, pgd, pud);
31356 spin_unlock(&init_mm.page_table_lock);
31357 pgd_changed = true;
31358 }
31359@@ -1221,8 +1242,8 @@ int kern_addr_valid(unsigned long addr)
31360 static struct vm_area_struct gate_vma = {
31361 .vm_start = VSYSCALL_START,
31362 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
31363- .vm_page_prot = PAGE_READONLY_EXEC,
31364- .vm_flags = VM_READ | VM_EXEC
31365+ .vm_page_prot = PAGE_READONLY,
31366+ .vm_flags = VM_READ
31367 };
31368
31369 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31370@@ -1256,7 +1277,7 @@ int in_gate_area_no_mm(unsigned long addr)
31371
31372 const char *arch_vma_name(struct vm_area_struct *vma)
31373 {
31374- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31375+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31376 return "[vdso]";
31377 if (vma == &gate_vma)
31378 return "[vsyscall]";
31379diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
31380index 7b179b4..6bd17777 100644
31381--- a/arch/x86/mm/iomap_32.c
31382+++ b/arch/x86/mm/iomap_32.c
31383@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
31384 type = kmap_atomic_idx_push();
31385 idx = type + KM_TYPE_NR * smp_processor_id();
31386 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31387+
31388+ pax_open_kernel();
31389 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
31390+ pax_close_kernel();
31391+
31392 arch_flush_lazy_mmu_mode();
31393
31394 return (void *)vaddr;
31395diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
31396index 9a1e658..da003f3 100644
31397--- a/arch/x86/mm/ioremap.c
31398+++ b/arch/x86/mm/ioremap.c
31399@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
31400 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
31401 int is_ram = page_is_ram(pfn);
31402
31403- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
31404+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
31405 return NULL;
31406 WARN_ON_ONCE(is_ram);
31407 }
31408@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
31409 *
31410 * Caller must ensure there is only one unmapping for the same pointer.
31411 */
31412-void iounmap(volatile void __iomem *addr)
31413+void iounmap(const volatile void __iomem *addr)
31414 {
31415 struct vm_struct *p, *o;
31416
31417@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31418
31419 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
31420 if (page_is_ram(start >> PAGE_SHIFT))
31421+#ifdef CONFIG_HIGHMEM
31422+ if ((start >> PAGE_SHIFT) < max_low_pfn)
31423+#endif
31424 return __va(phys);
31425
31426 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
31427@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31428 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
31429 {
31430 if (page_is_ram(phys >> PAGE_SHIFT))
31431+#ifdef CONFIG_HIGHMEM
31432+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
31433+#endif
31434 return;
31435
31436 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
31437@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
31438 early_param("early_ioremap_debug", early_ioremap_debug_setup);
31439
31440 static __initdata int after_paging_init;
31441-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
31442+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
31443
31444 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
31445 {
31446@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
31447 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
31448
31449 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
31450- memset(bm_pte, 0, sizeof(bm_pte));
31451- pmd_populate_kernel(&init_mm, pmd, bm_pte);
31452+ pmd_populate_user(&init_mm, pmd, bm_pte);
31453
31454 /*
31455 * The boot-ioremap range spans multiple pmds, for which
31456diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
31457index d87dd6d..bf3fa66 100644
31458--- a/arch/x86/mm/kmemcheck/kmemcheck.c
31459+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
31460@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
31461 * memory (e.g. tracked pages)? For now, we need this to avoid
31462 * invoking kmemcheck for PnP BIOS calls.
31463 */
31464- if (regs->flags & X86_VM_MASK)
31465+ if (v8086_mode(regs))
31466 return false;
31467- if (regs->cs != __KERNEL_CS)
31468+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
31469 return false;
31470
31471 pte = kmemcheck_pte_lookup(address);
31472diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
31473index 5c1ae28..45f4ac9 100644
31474--- a/arch/x86/mm/mmap.c
31475+++ b/arch/x86/mm/mmap.c
31476@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
31477 * Leave an at least ~128 MB hole with possible stack randomization.
31478 */
31479 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
31480-#define MAX_GAP (TASK_SIZE/6*5)
31481+#define MAX_GAP (pax_task_size/6*5)
31482
31483 static int mmap_is_legacy(void)
31484 {
31485@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
31486 return rnd << PAGE_SHIFT;
31487 }
31488
31489-static unsigned long mmap_base(void)
31490+static unsigned long mmap_base(struct mm_struct *mm)
31491 {
31492 unsigned long gap = rlimit(RLIMIT_STACK);
31493+ unsigned long pax_task_size = TASK_SIZE;
31494+
31495+#ifdef CONFIG_PAX_SEGMEXEC
31496+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31497+ pax_task_size = SEGMEXEC_TASK_SIZE;
31498+#endif
31499
31500 if (gap < MIN_GAP)
31501 gap = MIN_GAP;
31502 else if (gap > MAX_GAP)
31503 gap = MAX_GAP;
31504
31505- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
31506+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
31507 }
31508
31509 /*
31510 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
31511 * does, but not when emulating X86_32
31512 */
31513-static unsigned long mmap_legacy_base(void)
31514+static unsigned long mmap_legacy_base(struct mm_struct *mm)
31515 {
31516- if (mmap_is_ia32())
31517+ if (mmap_is_ia32()) {
31518+
31519+#ifdef CONFIG_PAX_SEGMEXEC
31520+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31521+ return SEGMEXEC_TASK_UNMAPPED_BASE;
31522+ else
31523+#endif
31524+
31525 return TASK_UNMAPPED_BASE;
31526- else
31527+ } else
31528 return TASK_UNMAPPED_BASE + mmap_rnd();
31529 }
31530
31531@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
31532 */
31533 void arch_pick_mmap_layout(struct mm_struct *mm)
31534 {
31535- mm->mmap_legacy_base = mmap_legacy_base();
31536- mm->mmap_base = mmap_base();
31537+ mm->mmap_legacy_base = mmap_legacy_base(mm);
31538+ mm->mmap_base = mmap_base(mm);
31539+
31540+#ifdef CONFIG_PAX_RANDMMAP
31541+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
31542+ mm->mmap_legacy_base += mm->delta_mmap;
31543+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
31544+ }
31545+#endif
31546
31547 if (mmap_is_legacy()) {
31548 mm->mmap_base = mm->mmap_legacy_base;
31549diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
31550index dc0b727..f612039 100644
31551--- a/arch/x86/mm/mmio-mod.c
31552+++ b/arch/x86/mm/mmio-mod.c
31553@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
31554 break;
31555 default:
31556 {
31557- unsigned char *ip = (unsigned char *)instptr;
31558+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
31559 my_trace->opcode = MMIO_UNKNOWN_OP;
31560 my_trace->width = 0;
31561 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
31562@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
31563 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
31564 void __iomem *addr)
31565 {
31566- static atomic_t next_id;
31567+ static atomic_unchecked_t next_id;
31568 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
31569 /* These are page-unaligned. */
31570 struct mmiotrace_map map = {
31571@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
31572 .private = trace
31573 },
31574 .phys = offset,
31575- .id = atomic_inc_return(&next_id)
31576+ .id = atomic_inc_return_unchecked(&next_id)
31577 };
31578 map.map_id = trace->id;
31579
31580@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
31581 ioremap_trace_core(offset, size, addr);
31582 }
31583
31584-static void iounmap_trace_core(volatile void __iomem *addr)
31585+static void iounmap_trace_core(const volatile void __iomem *addr)
31586 {
31587 struct mmiotrace_map map = {
31588 .phys = 0,
31589@@ -328,7 +328,7 @@ not_enabled:
31590 }
31591 }
31592
31593-void mmiotrace_iounmap(volatile void __iomem *addr)
31594+void mmiotrace_iounmap(const volatile void __iomem *addr)
31595 {
31596 might_sleep();
31597 if (is_enabled()) /* recheck and proper locking in *_core() */
31598diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
31599index a71c4e2..301ae44 100644
31600--- a/arch/x86/mm/numa.c
31601+++ b/arch/x86/mm/numa.c
31602@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
31603 return true;
31604 }
31605
31606-static int __init numa_register_memblks(struct numa_meminfo *mi)
31607+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
31608 {
31609 unsigned long uninitialized_var(pfn_align);
31610 int i, nid;
31611diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
31612index d0b1773..4c3327c 100644
31613--- a/arch/x86/mm/pageattr-test.c
31614+++ b/arch/x86/mm/pageattr-test.c
31615@@ -36,7 +36,7 @@ enum {
31616
31617 static int pte_testbit(pte_t pte)
31618 {
31619- return pte_flags(pte) & _PAGE_UNUSED1;
31620+ return pte_flags(pte) & _PAGE_CPA_TEST;
31621 }
31622
31623 struct split_state {
31624diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
31625index bb32480..75f2f5e 100644
31626--- a/arch/x86/mm/pageattr.c
31627+++ b/arch/x86/mm/pageattr.c
31628@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31629 */
31630 #ifdef CONFIG_PCI_BIOS
31631 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
31632- pgprot_val(forbidden) |= _PAGE_NX;
31633+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31634 #endif
31635
31636 /*
31637@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31638 * Does not cover __inittext since that is gone later on. On
31639 * 64bit we do not enforce !NX on the low mapping
31640 */
31641- if (within(address, (unsigned long)_text, (unsigned long)_etext))
31642- pgprot_val(forbidden) |= _PAGE_NX;
31643+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
31644+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31645
31646+#ifdef CONFIG_DEBUG_RODATA
31647 /*
31648 * The .rodata section needs to be read-only. Using the pfn
31649 * catches all aliases.
31650@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31651 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
31652 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
31653 pgprot_val(forbidden) |= _PAGE_RW;
31654+#endif
31655
31656 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
31657 /*
31658@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31659 }
31660 #endif
31661
31662+#ifdef CONFIG_PAX_KERNEXEC
31663+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
31664+ pgprot_val(forbidden) |= _PAGE_RW;
31665+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31666+ }
31667+#endif
31668+
31669 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
31670
31671 return prot;
31672@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
31673 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
31674 {
31675 /* change init_mm */
31676+ pax_open_kernel();
31677 set_pte_atomic(kpte, pte);
31678+
31679 #ifdef CONFIG_X86_32
31680 if (!SHARED_KERNEL_PMD) {
31681+
31682+#ifdef CONFIG_PAX_PER_CPU_PGD
31683+ unsigned long cpu;
31684+#else
31685 struct page *page;
31686+#endif
31687
31688+#ifdef CONFIG_PAX_PER_CPU_PGD
31689+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31690+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
31691+#else
31692 list_for_each_entry(page, &pgd_list, lru) {
31693- pgd_t *pgd;
31694+ pgd_t *pgd = (pgd_t *)page_address(page);
31695+#endif
31696+
31697 pud_t *pud;
31698 pmd_t *pmd;
31699
31700- pgd = (pgd_t *)page_address(page) + pgd_index(address);
31701+ pgd += pgd_index(address);
31702 pud = pud_offset(pgd, address);
31703 pmd = pmd_offset(pud, address);
31704 set_pte_atomic((pte_t *)pmd, pte);
31705 }
31706 }
31707 #endif
31708+ pax_close_kernel();
31709 }
31710
31711 static int
31712diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
31713index 6574388..87e9bef 100644
31714--- a/arch/x86/mm/pat.c
31715+++ b/arch/x86/mm/pat.c
31716@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
31717
31718 if (!entry) {
31719 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
31720- current->comm, current->pid, start, end - 1);
31721+ current->comm, task_pid_nr(current), start, end - 1);
31722 return -EINVAL;
31723 }
31724
31725@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31726
31727 while (cursor < to) {
31728 if (!devmem_is_allowed(pfn)) {
31729- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
31730- current->comm, from, to - 1);
31731+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
31732+ current->comm, from, to - 1, cursor);
31733 return 0;
31734 }
31735 cursor += PAGE_SIZE;
31736@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
31737 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
31738 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
31739 "for [mem %#010Lx-%#010Lx]\n",
31740- current->comm, current->pid,
31741+ current->comm, task_pid_nr(current),
31742 cattr_name(flags),
31743 base, (unsigned long long)(base + size-1));
31744 return -EINVAL;
31745@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
31746 flags = lookup_memtype(paddr);
31747 if (want_flags != flags) {
31748 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
31749- current->comm, current->pid,
31750+ current->comm, task_pid_nr(current),
31751 cattr_name(want_flags),
31752 (unsigned long long)paddr,
31753 (unsigned long long)(paddr + size - 1),
31754@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
31755 free_memtype(paddr, paddr + size);
31756 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
31757 " for [mem %#010Lx-%#010Lx], got %s\n",
31758- current->comm, current->pid,
31759+ current->comm, task_pid_nr(current),
31760 cattr_name(want_flags),
31761 (unsigned long long)paddr,
31762 (unsigned long long)(paddr + size - 1),
31763diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
31764index 415f6c4..d319983 100644
31765--- a/arch/x86/mm/pat_rbtree.c
31766+++ b/arch/x86/mm/pat_rbtree.c
31767@@ -160,7 +160,7 @@ success:
31768
31769 failure:
31770 printk(KERN_INFO "%s:%d conflicting memory types "
31771- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
31772+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
31773 end, cattr_name(found_type), cattr_name(match->type));
31774 return -EBUSY;
31775 }
31776diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
31777index 9f0614d..92ae64a 100644
31778--- a/arch/x86/mm/pf_in.c
31779+++ b/arch/x86/mm/pf_in.c
31780@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
31781 int i;
31782 enum reason_type rv = OTHERS;
31783
31784- p = (unsigned char *)ins_addr;
31785+ p = (unsigned char *)ktla_ktva(ins_addr);
31786 p += skip_prefix(p, &prf);
31787 p += get_opcode(p, &opcode);
31788
31789@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
31790 struct prefix_bits prf;
31791 int i;
31792
31793- p = (unsigned char *)ins_addr;
31794+ p = (unsigned char *)ktla_ktva(ins_addr);
31795 p += skip_prefix(p, &prf);
31796 p += get_opcode(p, &opcode);
31797
31798@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
31799 struct prefix_bits prf;
31800 int i;
31801
31802- p = (unsigned char *)ins_addr;
31803+ p = (unsigned char *)ktla_ktva(ins_addr);
31804 p += skip_prefix(p, &prf);
31805 p += get_opcode(p, &opcode);
31806
31807@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
31808 struct prefix_bits prf;
31809 int i;
31810
31811- p = (unsigned char *)ins_addr;
31812+ p = (unsigned char *)ktla_ktva(ins_addr);
31813 p += skip_prefix(p, &prf);
31814 p += get_opcode(p, &opcode);
31815 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
31816@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
31817 struct prefix_bits prf;
31818 int i;
31819
31820- p = (unsigned char *)ins_addr;
31821+ p = (unsigned char *)ktla_ktva(ins_addr);
31822 p += skip_prefix(p, &prf);
31823 p += get_opcode(p, &opcode);
31824 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
31825diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
31826index 17fda6a..f7d54a0 100644
31827--- a/arch/x86/mm/pgtable.c
31828+++ b/arch/x86/mm/pgtable.c
31829@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
31830 list_del(&page->lru);
31831 }
31832
31833-#define UNSHARED_PTRS_PER_PGD \
31834- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31835+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31836+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
31837
31838+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
31839+{
31840+ unsigned int count = USER_PGD_PTRS;
31841
31842+ if (!pax_user_shadow_base)
31843+ return;
31844+
31845+ while (count--)
31846+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
31847+}
31848+#endif
31849+
31850+#ifdef CONFIG_PAX_PER_CPU_PGD
31851+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
31852+{
31853+ unsigned int count = USER_PGD_PTRS;
31854+
31855+ while (count--) {
31856+ pgd_t pgd;
31857+
31858+#ifdef CONFIG_X86_64
31859+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
31860+#else
31861+ pgd = *src++;
31862+#endif
31863+
31864+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31865+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
31866+#endif
31867+
31868+ *dst++ = pgd;
31869+ }
31870+
31871+}
31872+#endif
31873+
31874+#ifdef CONFIG_X86_64
31875+#define pxd_t pud_t
31876+#define pyd_t pgd_t
31877+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
31878+#define pxd_free(mm, pud) pud_free((mm), (pud))
31879+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
31880+#define pyd_offset(mm, address) pgd_offset((mm), (address))
31881+#define PYD_SIZE PGDIR_SIZE
31882+#else
31883+#define pxd_t pmd_t
31884+#define pyd_t pud_t
31885+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
31886+#define pxd_free(mm, pud) pmd_free((mm), (pud))
31887+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
31888+#define pyd_offset(mm, address) pud_offset((mm), (address))
31889+#define PYD_SIZE PUD_SIZE
31890+#endif
31891+
31892+#ifdef CONFIG_PAX_PER_CPU_PGD
31893+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
31894+static inline void pgd_dtor(pgd_t *pgd) {}
31895+#else
31896 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
31897 {
31898 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
31899@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
31900 pgd_list_del(pgd);
31901 spin_unlock(&pgd_lock);
31902 }
31903+#endif
31904
31905 /*
31906 * List of all pgd's needed for non-PAE so it can invalidate entries
31907@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
31908 * -- nyc
31909 */
31910
31911-#ifdef CONFIG_X86_PAE
31912+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
31913 /*
31914 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
31915 * updating the top-level pagetable entries to guarantee the
31916@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
31917 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
31918 * and initialize the kernel pmds here.
31919 */
31920-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
31921+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31922
31923 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31924 {
31925@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31926 */
31927 flush_tlb_mm(mm);
31928 }
31929+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
31930+#define PREALLOCATED_PXDS USER_PGD_PTRS
31931 #else /* !CONFIG_X86_PAE */
31932
31933 /* No need to prepopulate any pagetable entries in non-PAE modes. */
31934-#define PREALLOCATED_PMDS 0
31935+#define PREALLOCATED_PXDS 0
31936
31937 #endif /* CONFIG_X86_PAE */
31938
31939-static void free_pmds(pmd_t *pmds[])
31940+static void free_pxds(pxd_t *pxds[])
31941 {
31942 int i;
31943
31944- for(i = 0; i < PREALLOCATED_PMDS; i++)
31945- if (pmds[i])
31946- free_page((unsigned long)pmds[i]);
31947+ for(i = 0; i < PREALLOCATED_PXDS; i++)
31948+ if (pxds[i])
31949+ free_page((unsigned long)pxds[i]);
31950 }
31951
31952-static int preallocate_pmds(pmd_t *pmds[])
31953+static int preallocate_pxds(pxd_t *pxds[])
31954 {
31955 int i;
31956 bool failed = false;
31957
31958- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31959- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
31960- if (pmd == NULL)
31961+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31962+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
31963+ if (pxd == NULL)
31964 failed = true;
31965- pmds[i] = pmd;
31966+ pxds[i] = pxd;
31967 }
31968
31969 if (failed) {
31970- free_pmds(pmds);
31971+ free_pxds(pxds);
31972 return -ENOMEM;
31973 }
31974
31975@@ -219,51 +279,55 @@ static int preallocate_pmds(pmd_t *pmds[])
31976 * preallocate which never got a corresponding vma will need to be
31977 * freed manually.
31978 */
31979-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
31980+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
31981 {
31982 int i;
31983
31984- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31985+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31986 pgd_t pgd = pgdp[i];
31987
31988 if (pgd_val(pgd) != 0) {
31989- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
31990+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
31991
31992- pgdp[i] = native_make_pgd(0);
31993+ set_pgd(pgdp + i, native_make_pgd(0));
31994
31995- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
31996- pmd_free(mm, pmd);
31997+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
31998+ pxd_free(mm, pxd);
31999 }
32000 }
32001 }
32002
32003-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
32004+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
32005 {
32006- pud_t *pud;
32007+ pyd_t *pyd;
32008 unsigned long addr;
32009 int i;
32010
32011- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32012+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32013 return;
32014
32015- pud = pud_offset(pgd, 0);
32016+#ifdef CONFIG_X86_64
32017+ pyd = pyd_offset(mm, 0L);
32018+#else
32019+ pyd = pyd_offset(pgd, 0L);
32020+#endif
32021
32022- for (addr = i = 0; i < PREALLOCATED_PMDS;
32023- i++, pud++, addr += PUD_SIZE) {
32024- pmd_t *pmd = pmds[i];
32025+ for (addr = i = 0; i < PREALLOCATED_PXDS;
32026+ i++, pyd++, addr += PYD_SIZE) {
32027+ pxd_t *pxd = pxds[i];
32028
32029 if (i >= KERNEL_PGD_BOUNDARY)
32030- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32031- sizeof(pmd_t) * PTRS_PER_PMD);
32032+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32033+ sizeof(pxd_t) * PTRS_PER_PMD);
32034
32035- pud_populate(mm, pud, pmd);
32036+ pyd_populate(mm, pyd, pxd);
32037 }
32038 }
32039
32040 pgd_t *pgd_alloc(struct mm_struct *mm)
32041 {
32042 pgd_t *pgd;
32043- pmd_t *pmds[PREALLOCATED_PMDS];
32044+ pxd_t *pxds[PREALLOCATED_PXDS];
32045
32046 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32047
32048@@ -272,11 +336,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32049
32050 mm->pgd = pgd;
32051
32052- if (preallocate_pmds(pmds) != 0)
32053+ if (preallocate_pxds(pxds) != 0)
32054 goto out_free_pgd;
32055
32056 if (paravirt_pgd_alloc(mm) != 0)
32057- goto out_free_pmds;
32058+ goto out_free_pxds;
32059
32060 /*
32061 * Make sure that pre-populating the pmds is atomic with
32062@@ -286,14 +350,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32063 spin_lock(&pgd_lock);
32064
32065 pgd_ctor(mm, pgd);
32066- pgd_prepopulate_pmd(mm, pgd, pmds);
32067+ pgd_prepopulate_pxd(mm, pgd, pxds);
32068
32069 spin_unlock(&pgd_lock);
32070
32071 return pgd;
32072
32073-out_free_pmds:
32074- free_pmds(pmds);
32075+out_free_pxds:
32076+ free_pxds(pxds);
32077 out_free_pgd:
32078 free_page((unsigned long)pgd);
32079 out:
32080@@ -302,7 +366,7 @@ out:
32081
32082 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32083 {
32084- pgd_mop_up_pmds(mm, pgd);
32085+ pgd_mop_up_pxds(mm, pgd);
32086 pgd_dtor(pgd);
32087 paravirt_pgd_free(mm, pgd);
32088 free_page((unsigned long)pgd);
32089diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32090index a69bcb8..19068ab 100644
32091--- a/arch/x86/mm/pgtable_32.c
32092+++ b/arch/x86/mm/pgtable_32.c
32093@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32094 return;
32095 }
32096 pte = pte_offset_kernel(pmd, vaddr);
32097+
32098+ pax_open_kernel();
32099 if (pte_val(pteval))
32100 set_pte_at(&init_mm, vaddr, pte, pteval);
32101 else
32102 pte_clear(&init_mm, vaddr, pte);
32103+ pax_close_kernel();
32104
32105 /*
32106 * It's enough to flush this one mapping.
32107diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32108index e666cbb..61788c45 100644
32109--- a/arch/x86/mm/physaddr.c
32110+++ b/arch/x86/mm/physaddr.c
32111@@ -10,7 +10,7 @@
32112 #ifdef CONFIG_X86_64
32113
32114 #ifdef CONFIG_DEBUG_VIRTUAL
32115-unsigned long __phys_addr(unsigned long x)
32116+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32117 {
32118 unsigned long y = x - __START_KERNEL_map;
32119
32120@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32121 #else
32122
32123 #ifdef CONFIG_DEBUG_VIRTUAL
32124-unsigned long __phys_addr(unsigned long x)
32125+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32126 {
32127 unsigned long phys_addr = x - PAGE_OFFSET;
32128 /* VMALLOC_* aren't constants */
32129diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32130index 410531d..0f16030 100644
32131--- a/arch/x86/mm/setup_nx.c
32132+++ b/arch/x86/mm/setup_nx.c
32133@@ -5,8 +5,10 @@
32134 #include <asm/pgtable.h>
32135 #include <asm/proto.h>
32136
32137+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32138 static int disable_nx __cpuinitdata;
32139
32140+#ifndef CONFIG_PAX_PAGEEXEC
32141 /*
32142 * noexec = on|off
32143 *
32144@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32145 return 0;
32146 }
32147 early_param("noexec", noexec_setup);
32148+#endif
32149+
32150+#endif
32151
32152 void __cpuinit x86_configure_nx(void)
32153 {
32154+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32155 if (cpu_has_nx && !disable_nx)
32156 __supported_pte_mask |= _PAGE_NX;
32157 else
32158+#endif
32159 __supported_pte_mask &= ~_PAGE_NX;
32160 }
32161
32162diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32163index 282375f..e03a98f 100644
32164--- a/arch/x86/mm/tlb.c
32165+++ b/arch/x86/mm/tlb.c
32166@@ -48,7 +48,11 @@ void leave_mm(int cpu)
32167 BUG();
32168 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32169 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32170+
32171+#ifndef CONFIG_PAX_PER_CPU_PGD
32172 load_cr3(swapper_pg_dir);
32173+#endif
32174+
32175 }
32176 }
32177 EXPORT_SYMBOL_GPL(leave_mm);
32178diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32179new file mode 100644
32180index 0000000..dace51c
32181--- /dev/null
32182+++ b/arch/x86/mm/uderef_64.c
32183@@ -0,0 +1,37 @@
32184+#include <linux/mm.h>
32185+#include <asm/pgtable.h>
32186+#include <asm/uaccess.h>
32187+
32188+#ifdef CONFIG_PAX_MEMORY_UDEREF
32189+/* PaX: due to the special call convention these functions must
32190+ * - remain leaf functions under all configurations,
32191+ * - never be called directly, only dereferenced from the wrappers.
32192+ */
32193+void __pax_open_userland(void)
32194+{
32195+ unsigned int cpu;
32196+
32197+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32198+ return;
32199+
32200+ cpu = raw_get_cpu();
32201+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
32202+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
32203+ raw_put_cpu_no_resched();
32204+}
32205+EXPORT_SYMBOL(__pax_open_userland);
32206+
32207+void __pax_close_userland(void)
32208+{
32209+ unsigned int cpu;
32210+
32211+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32212+ return;
32213+
32214+ cpu = raw_get_cpu();
32215+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
32216+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
32217+ raw_put_cpu_no_resched();
32218+}
32219+EXPORT_SYMBOL(__pax_close_userland);
32220+#endif
32221diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
32222index 877b9a1..a8ecf42 100644
32223--- a/arch/x86/net/bpf_jit.S
32224+++ b/arch/x86/net/bpf_jit.S
32225@@ -9,6 +9,7 @@
32226 */
32227 #include <linux/linkage.h>
32228 #include <asm/dwarf2.h>
32229+#include <asm/alternative-asm.h>
32230
32231 /*
32232 * Calling convention :
32233@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
32234 jle bpf_slow_path_word
32235 mov (SKBDATA,%rsi),%eax
32236 bswap %eax /* ntohl() */
32237+ pax_force_retaddr
32238 ret
32239
32240 sk_load_half:
32241@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
32242 jle bpf_slow_path_half
32243 movzwl (SKBDATA,%rsi),%eax
32244 rol $8,%ax # ntohs()
32245+ pax_force_retaddr
32246 ret
32247
32248 sk_load_byte:
32249@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
32250 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
32251 jle bpf_slow_path_byte
32252 movzbl (SKBDATA,%rsi),%eax
32253+ pax_force_retaddr
32254 ret
32255
32256 /**
32257@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
32258 movzbl (SKBDATA,%rsi),%ebx
32259 and $15,%bl
32260 shl $2,%bl
32261+ pax_force_retaddr
32262 ret
32263
32264 /* rsi contains offset and can be scratched */
32265@@ -109,6 +114,7 @@ bpf_slow_path_word:
32266 js bpf_error
32267 mov -12(%rbp),%eax
32268 bswap %eax
32269+ pax_force_retaddr
32270 ret
32271
32272 bpf_slow_path_half:
32273@@ -117,12 +123,14 @@ bpf_slow_path_half:
32274 mov -12(%rbp),%ax
32275 rol $8,%ax
32276 movzwl %ax,%eax
32277+ pax_force_retaddr
32278 ret
32279
32280 bpf_slow_path_byte:
32281 bpf_slow_path_common(1)
32282 js bpf_error
32283 movzbl -12(%rbp),%eax
32284+ pax_force_retaddr
32285 ret
32286
32287 bpf_slow_path_byte_msh:
32288@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
32289 and $15,%al
32290 shl $2,%al
32291 xchg %eax,%ebx
32292+ pax_force_retaddr
32293 ret
32294
32295 #define sk_negative_common(SIZE) \
32296@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
32297 sk_negative_common(4)
32298 mov (%rax), %eax
32299 bswap %eax
32300+ pax_force_retaddr
32301 ret
32302
32303 bpf_slow_path_half_neg:
32304@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
32305 mov (%rax),%ax
32306 rol $8,%ax
32307 movzwl %ax,%eax
32308+ pax_force_retaddr
32309 ret
32310
32311 bpf_slow_path_byte_neg:
32312@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
32313 .globl sk_load_byte_negative_offset
32314 sk_negative_common(1)
32315 movzbl (%rax), %eax
32316+ pax_force_retaddr
32317 ret
32318
32319 bpf_slow_path_byte_msh_neg:
32320@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
32321 and $15,%al
32322 shl $2,%al
32323 xchg %eax,%ebx
32324+ pax_force_retaddr
32325 ret
32326
32327 bpf_error:
32328@@ -197,4 +210,5 @@ bpf_error:
32329 xor %eax,%eax
32330 mov -8(%rbp),%rbx
32331 leaveq
32332+ pax_force_retaddr
32333 ret
32334diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
32335index f66b540..3e88dfb 100644
32336--- a/arch/x86/net/bpf_jit_comp.c
32337+++ b/arch/x86/net/bpf_jit_comp.c
32338@@ -12,6 +12,7 @@
32339 #include <linux/netdevice.h>
32340 #include <linux/filter.h>
32341 #include <linux/if_vlan.h>
32342+#include <linux/random.h>
32343
32344 /*
32345 * Conventions :
32346@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32347 return ptr + len;
32348 }
32349
32350+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32351+#define MAX_INSTR_CODE_SIZE 96
32352+#else
32353+#define MAX_INSTR_CODE_SIZE 64
32354+#endif
32355+
32356 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
32357
32358 #define EMIT1(b1) EMIT(b1, 1)
32359 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
32360 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
32361 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
32362+
32363+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32364+/* original constant will appear in ecx */
32365+#define DILUTE_CONST_SEQUENCE(_off, _key) \
32366+do { \
32367+ /* mov ecx, randkey */ \
32368+ EMIT1(0xb9); \
32369+ EMIT(_key, 4); \
32370+ /* xor ecx, randkey ^ off */ \
32371+ EMIT2(0x81, 0xf1); \
32372+ EMIT((_key) ^ (_off), 4); \
32373+} while (0)
32374+
32375+#define EMIT1_off32(b1, _off) \
32376+do { \
32377+ switch (b1) { \
32378+ case 0x05: /* add eax, imm32 */ \
32379+ case 0x2d: /* sub eax, imm32 */ \
32380+ case 0x25: /* and eax, imm32 */ \
32381+ case 0x0d: /* or eax, imm32 */ \
32382+ case 0xb8: /* mov eax, imm32 */ \
32383+ case 0x35: /* xor eax, imm32 */ \
32384+ case 0x3d: /* cmp eax, imm32 */ \
32385+ case 0xa9: /* test eax, imm32 */ \
32386+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32387+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
32388+ break; \
32389+ case 0xbb: /* mov ebx, imm32 */ \
32390+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32391+ /* mov ebx, ecx */ \
32392+ EMIT2(0x89, 0xcb); \
32393+ break; \
32394+ case 0xbe: /* mov esi, imm32 */ \
32395+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32396+ /* mov esi, ecx */ \
32397+ EMIT2(0x89, 0xce); \
32398+ break; \
32399+ case 0xe8: /* call rel imm32, always to known funcs */ \
32400+ EMIT1(b1); \
32401+ EMIT(_off, 4); \
32402+ break; \
32403+ case 0xe9: /* jmp rel imm32 */ \
32404+ EMIT1(b1); \
32405+ EMIT(_off, 4); \
32406+ /* prevent fall-through, we're not called if off = 0 */ \
32407+ EMIT(0xcccccccc, 4); \
32408+ EMIT(0xcccccccc, 4); \
32409+ break; \
32410+ default: \
32411+ BUILD_BUG(); \
32412+ } \
32413+} while (0)
32414+
32415+#define EMIT2_off32(b1, b2, _off) \
32416+do { \
32417+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
32418+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
32419+ EMIT(randkey, 4); \
32420+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
32421+ EMIT((_off) - randkey, 4); \
32422+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
32423+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32424+ /* imul eax, ecx */ \
32425+ EMIT3(0x0f, 0xaf, 0xc1); \
32426+ } else { \
32427+ BUILD_BUG(); \
32428+ } \
32429+} while (0)
32430+#else
32431 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
32432+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
32433+#endif
32434
32435 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
32436 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
32437@@ -90,6 +168,24 @@ do { \
32438 #define X86_JBE 0x76
32439 #define X86_JA 0x77
32440
32441+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32442+#define APPEND_FLOW_VERIFY() \
32443+do { \
32444+ /* mov ecx, randkey */ \
32445+ EMIT1(0xb9); \
32446+ EMIT(randkey, 4); \
32447+ /* cmp ecx, randkey */ \
32448+ EMIT2(0x81, 0xf9); \
32449+ EMIT(randkey, 4); \
32450+ /* jz after 8 int 3s */ \
32451+ EMIT2(0x74, 0x08); \
32452+ EMIT(0xcccccccc, 4); \
32453+ EMIT(0xcccccccc, 4); \
32454+} while (0)
32455+#else
32456+#define APPEND_FLOW_VERIFY() do { } while (0)
32457+#endif
32458+
32459 #define EMIT_COND_JMP(op, offset) \
32460 do { \
32461 if (is_near(offset)) \
32462@@ -97,6 +193,7 @@ do { \
32463 else { \
32464 EMIT2(0x0f, op + 0x10); \
32465 EMIT(offset, 4); /* jxx .+off32 */ \
32466+ APPEND_FLOW_VERIFY(); \
32467 } \
32468 } while (0)
32469
32470@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
32471 set_fs(old_fs);
32472 }
32473
32474+struct bpf_jit_work {
32475+ struct work_struct work;
32476+ void *image;
32477+};
32478+
32479 #define CHOOSE_LOAD_FUNC(K, func) \
32480 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
32481
32482@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
32483
32484 void bpf_jit_compile(struct sk_filter *fp)
32485 {
32486- u8 temp[64];
32487+ u8 temp[MAX_INSTR_CODE_SIZE];
32488 u8 *prog;
32489 unsigned int proglen, oldproglen = 0;
32490 int ilen, i;
32491@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
32492 unsigned int *addrs;
32493 const struct sock_filter *filter = fp->insns;
32494 int flen = fp->len;
32495+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32496+ unsigned int randkey;
32497+#endif
32498
32499 if (!bpf_jit_enable)
32500 return;
32501@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
32502 if (addrs == NULL)
32503 return;
32504
32505+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
32506+ if (!fp->work)
32507+ goto out;
32508+
32509+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32510+ randkey = get_random_int();
32511+#endif
32512+
32513 /* Before first pass, make a rough estimation of addrs[]
32514- * each bpf instruction is translated to less than 64 bytes
32515+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
32516 */
32517 for (proglen = 0, i = 0; i < flen; i++) {
32518- proglen += 64;
32519+ proglen += MAX_INSTR_CODE_SIZE;
32520 addrs[i] = proglen;
32521 }
32522 cleanup_addr = proglen; /* epilogue address */
32523@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
32524 case BPF_S_ALU_MUL_K: /* A *= K */
32525 if (is_imm8(K))
32526 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
32527- else {
32528- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
32529- EMIT(K, 4);
32530- }
32531+ else
32532+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
32533 break;
32534 case BPF_S_ALU_DIV_X: /* A /= X; */
32535 seen |= SEEN_XREG;
32536@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
32537 break;
32538 case BPF_S_ALU_MOD_K: /* A %= K; */
32539 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
32540+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32541+ DILUTE_CONST_SEQUENCE(K, randkey);
32542+#else
32543 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
32544+#endif
32545 EMIT2(0xf7, 0xf1); /* div %ecx */
32546 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
32547 break;
32548 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
32549+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32550+ DILUTE_CONST_SEQUENCE(K, randkey);
32551+ // imul rax, rcx
32552+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
32553+#else
32554 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
32555 EMIT(K, 4);
32556+#endif
32557 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
32558 break;
32559 case BPF_S_ALU_AND_X:
32560@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
32561 if (is_imm8(K)) {
32562 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
32563 } else {
32564- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
32565- EMIT(K, 4);
32566+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
32567 }
32568 } else {
32569 EMIT2(0x89,0xde); /* mov %ebx,%esi */
32570@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32571 break;
32572 default:
32573 /* hmm, too complex filter, give up with jit compiler */
32574- goto out;
32575+ goto error;
32576 }
32577 ilen = prog - temp;
32578 if (image) {
32579 if (unlikely(proglen + ilen > oldproglen)) {
32580 pr_err("bpb_jit_compile fatal error\n");
32581- kfree(addrs);
32582- module_free(NULL, image);
32583- return;
32584+ module_free_exec(NULL, image);
32585+ goto error;
32586 }
32587+ pax_open_kernel();
32588 memcpy(image + proglen, temp, ilen);
32589+ pax_close_kernel();
32590 }
32591 proglen += ilen;
32592 addrs[i] = proglen;
32593@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32594 break;
32595 }
32596 if (proglen == oldproglen) {
32597- image = module_alloc(max_t(unsigned int,
32598- proglen,
32599- sizeof(struct work_struct)));
32600+ image = module_alloc_exec(proglen);
32601 if (!image)
32602- goto out;
32603+ goto error;
32604 }
32605 oldproglen = proglen;
32606 }
32607@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32608 if (image) {
32609 bpf_flush_icache(image, image + proglen);
32610 fp->bpf_func = (void *)image;
32611- }
32612+ } else
32613+error:
32614+ kfree(fp->work);
32615+
32616 out:
32617 kfree(addrs);
32618 return;
32619@@ -740,18 +862,20 @@ out:
32620
32621 static void jit_free_defer(struct work_struct *arg)
32622 {
32623- module_free(NULL, arg);
32624+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
32625+ kfree(arg);
32626 }
32627
32628 /* run from softirq, we must use a work_struct to call
32629- * module_free() from process context
32630+ * module_free_exec() from process context
32631 */
32632 void bpf_jit_free(struct sk_filter *fp)
32633 {
32634 if (fp->bpf_func != sk_run_filter) {
32635- struct work_struct *work = (struct work_struct *)fp->bpf_func;
32636+ struct work_struct *work = &fp->work->work;
32637
32638 INIT_WORK(work, jit_free_defer);
32639+ fp->work->image = fp->bpf_func;
32640 schedule_work(work);
32641 }
32642 }
32643diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
32644index d6aa6e8..266395a 100644
32645--- a/arch/x86/oprofile/backtrace.c
32646+++ b/arch/x86/oprofile/backtrace.c
32647@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
32648 struct stack_frame_ia32 *fp;
32649 unsigned long bytes;
32650
32651- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
32652+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
32653 if (bytes != sizeof(bufhead))
32654 return NULL;
32655
32656- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
32657+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
32658
32659 oprofile_add_trace(bufhead[0].return_address);
32660
32661@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
32662 struct stack_frame bufhead[2];
32663 unsigned long bytes;
32664
32665- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
32666+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
32667 if (bytes != sizeof(bufhead))
32668 return NULL;
32669
32670@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
32671 {
32672 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
32673
32674- if (!user_mode_vm(regs)) {
32675+ if (!user_mode(regs)) {
32676 unsigned long stack = kernel_stack_pointer(regs);
32677 if (depth)
32678 dump_trace(NULL, regs, (unsigned long *)stack, 0,
32679diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
32680index 48768df..ba9143c 100644
32681--- a/arch/x86/oprofile/nmi_int.c
32682+++ b/arch/x86/oprofile/nmi_int.c
32683@@ -23,6 +23,7 @@
32684 #include <asm/nmi.h>
32685 #include <asm/msr.h>
32686 #include <asm/apic.h>
32687+#include <asm/pgtable.h>
32688
32689 #include "op_counter.h"
32690 #include "op_x86_model.h"
32691@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
32692 if (ret)
32693 return ret;
32694
32695- if (!model->num_virt_counters)
32696- model->num_virt_counters = model->num_counters;
32697+ if (!model->num_virt_counters) {
32698+ pax_open_kernel();
32699+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
32700+ pax_close_kernel();
32701+ }
32702
32703 mux_init(ops);
32704
32705diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
32706index b2b9443..be58856 100644
32707--- a/arch/x86/oprofile/op_model_amd.c
32708+++ b/arch/x86/oprofile/op_model_amd.c
32709@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
32710 num_counters = AMD64_NUM_COUNTERS;
32711 }
32712
32713- op_amd_spec.num_counters = num_counters;
32714- op_amd_spec.num_controls = num_counters;
32715- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
32716+ pax_open_kernel();
32717+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
32718+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
32719+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
32720+ pax_close_kernel();
32721
32722 return 0;
32723 }
32724diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
32725index d90528e..0127e2b 100644
32726--- a/arch/x86/oprofile/op_model_ppro.c
32727+++ b/arch/x86/oprofile/op_model_ppro.c
32728@@ -19,6 +19,7 @@
32729 #include <asm/msr.h>
32730 #include <asm/apic.h>
32731 #include <asm/nmi.h>
32732+#include <asm/pgtable.h>
32733
32734 #include "op_x86_model.h"
32735 #include "op_counter.h"
32736@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
32737
32738 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
32739
32740- op_arch_perfmon_spec.num_counters = num_counters;
32741- op_arch_perfmon_spec.num_controls = num_counters;
32742+ pax_open_kernel();
32743+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
32744+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
32745+ pax_close_kernel();
32746 }
32747
32748 static int arch_perfmon_init(struct oprofile_operations *ignore)
32749diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
32750index 71e8a67..6a313bb 100644
32751--- a/arch/x86/oprofile/op_x86_model.h
32752+++ b/arch/x86/oprofile/op_x86_model.h
32753@@ -52,7 +52,7 @@ struct op_x86_model_spec {
32754 void (*switch_ctrl)(struct op_x86_model_spec const *model,
32755 struct op_msrs const * const msrs);
32756 #endif
32757-};
32758+} __do_const;
32759
32760 struct op_counter_config;
32761
32762diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
32763index e9e6ed5..e47ae67 100644
32764--- a/arch/x86/pci/amd_bus.c
32765+++ b/arch/x86/pci/amd_bus.c
32766@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
32767 return NOTIFY_OK;
32768 }
32769
32770-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
32771+static struct notifier_block amd_cpu_notifier = {
32772 .notifier_call = amd_cpu_notify,
32773 };
32774
32775diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
32776index 372e9b8..e775a6c 100644
32777--- a/arch/x86/pci/irq.c
32778+++ b/arch/x86/pci/irq.c
32779@@ -50,7 +50,7 @@ struct irq_router {
32780 struct irq_router_handler {
32781 u16 vendor;
32782 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
32783-};
32784+} __do_const;
32785
32786 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
32787 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
32788@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
32789 return 0;
32790 }
32791
32792-static __initdata struct irq_router_handler pirq_routers[] = {
32793+static __initconst const struct irq_router_handler pirq_routers[] = {
32794 { PCI_VENDOR_ID_INTEL, intel_router_probe },
32795 { PCI_VENDOR_ID_AL, ali_router_probe },
32796 { PCI_VENDOR_ID_ITE, ite_router_probe },
32797@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
32798 static void __init pirq_find_router(struct irq_router *r)
32799 {
32800 struct irq_routing_table *rt = pirq_table;
32801- struct irq_router_handler *h;
32802+ const struct irq_router_handler *h;
32803
32804 #ifdef CONFIG_PCI_BIOS
32805 if (!rt->signature) {
32806@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
32807 return 0;
32808 }
32809
32810-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
32811+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
32812 {
32813 .callback = fix_broken_hp_bios_irq9,
32814 .ident = "HP Pavilion N5400 Series Laptop",
32815diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
32816index 6eb18c4..20d83de 100644
32817--- a/arch/x86/pci/mrst.c
32818+++ b/arch/x86/pci/mrst.c
32819@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
32820 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
32821 pci_mmcfg_late_init();
32822 pcibios_enable_irq = mrst_pci_irq_enable;
32823- pci_root_ops = pci_mrst_ops;
32824+ pax_open_kernel();
32825+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
32826+ pax_close_kernel();
32827 pci_soc_mode = 1;
32828 /* Continue with standard init */
32829 return 1;
32830diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
32831index c77b24a..c979855 100644
32832--- a/arch/x86/pci/pcbios.c
32833+++ b/arch/x86/pci/pcbios.c
32834@@ -79,7 +79,7 @@ union bios32 {
32835 static struct {
32836 unsigned long address;
32837 unsigned short segment;
32838-} bios32_indirect = { 0, __KERNEL_CS };
32839+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
32840
32841 /*
32842 * Returns the entry point for the given service, NULL on error
32843@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
32844 unsigned long length; /* %ecx */
32845 unsigned long entry; /* %edx */
32846 unsigned long flags;
32847+ struct desc_struct d, *gdt;
32848
32849 local_irq_save(flags);
32850- __asm__("lcall *(%%edi); cld"
32851+
32852+ gdt = get_cpu_gdt_table(smp_processor_id());
32853+
32854+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
32855+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32856+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
32857+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32858+
32859+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
32860 : "=a" (return_code),
32861 "=b" (address),
32862 "=c" (length),
32863 "=d" (entry)
32864 : "0" (service),
32865 "1" (0),
32866- "D" (&bios32_indirect));
32867+ "D" (&bios32_indirect),
32868+ "r"(__PCIBIOS_DS)
32869+ : "memory");
32870+
32871+ pax_open_kernel();
32872+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
32873+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
32874+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
32875+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
32876+ pax_close_kernel();
32877+
32878 local_irq_restore(flags);
32879
32880 switch (return_code) {
32881- case 0:
32882- return address + entry;
32883- case 0x80: /* Not present */
32884- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32885- return 0;
32886- default: /* Shouldn't happen */
32887- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32888- service, return_code);
32889+ case 0: {
32890+ int cpu;
32891+ unsigned char flags;
32892+
32893+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
32894+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
32895+ printk(KERN_WARNING "bios32_service: not valid\n");
32896 return 0;
32897+ }
32898+ address = address + PAGE_OFFSET;
32899+ length += 16UL; /* some BIOSs underreport this... */
32900+ flags = 4;
32901+ if (length >= 64*1024*1024) {
32902+ length >>= PAGE_SHIFT;
32903+ flags |= 8;
32904+ }
32905+
32906+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32907+ gdt = get_cpu_gdt_table(cpu);
32908+ pack_descriptor(&d, address, length, 0x9b, flags);
32909+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32910+ pack_descriptor(&d, address, length, 0x93, flags);
32911+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32912+ }
32913+ return entry;
32914+ }
32915+ case 0x80: /* Not present */
32916+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32917+ return 0;
32918+ default: /* Shouldn't happen */
32919+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32920+ service, return_code);
32921+ return 0;
32922 }
32923 }
32924
32925 static struct {
32926 unsigned long address;
32927 unsigned short segment;
32928-} pci_indirect = { 0, __KERNEL_CS };
32929+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
32930
32931-static int pci_bios_present;
32932+static int pci_bios_present __read_only;
32933
32934 static int check_pcibios(void)
32935 {
32936@@ -131,11 +174,13 @@ static int check_pcibios(void)
32937 unsigned long flags, pcibios_entry;
32938
32939 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
32940- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
32941+ pci_indirect.address = pcibios_entry;
32942
32943 local_irq_save(flags);
32944- __asm__(
32945- "lcall *(%%edi); cld\n\t"
32946+ __asm__("movw %w6, %%ds\n\t"
32947+ "lcall *%%ss:(%%edi); cld\n\t"
32948+ "push %%ss\n\t"
32949+ "pop %%ds\n\t"
32950 "jc 1f\n\t"
32951 "xor %%ah, %%ah\n"
32952 "1:"
32953@@ -144,7 +189,8 @@ static int check_pcibios(void)
32954 "=b" (ebx),
32955 "=c" (ecx)
32956 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
32957- "D" (&pci_indirect)
32958+ "D" (&pci_indirect),
32959+ "r" (__PCIBIOS_DS)
32960 : "memory");
32961 local_irq_restore(flags);
32962
32963@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32964
32965 switch (len) {
32966 case 1:
32967- __asm__("lcall *(%%esi); cld\n\t"
32968+ __asm__("movw %w6, %%ds\n\t"
32969+ "lcall *%%ss:(%%esi); cld\n\t"
32970+ "push %%ss\n\t"
32971+ "pop %%ds\n\t"
32972 "jc 1f\n\t"
32973 "xor %%ah, %%ah\n"
32974 "1:"
32975@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32976 : "1" (PCIBIOS_READ_CONFIG_BYTE),
32977 "b" (bx),
32978 "D" ((long)reg),
32979- "S" (&pci_indirect));
32980+ "S" (&pci_indirect),
32981+ "r" (__PCIBIOS_DS));
32982 /*
32983 * Zero-extend the result beyond 8 bits, do not trust the
32984 * BIOS having done it:
32985@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32986 *value &= 0xff;
32987 break;
32988 case 2:
32989- __asm__("lcall *(%%esi); cld\n\t"
32990+ __asm__("movw %w6, %%ds\n\t"
32991+ "lcall *%%ss:(%%esi); cld\n\t"
32992+ "push %%ss\n\t"
32993+ "pop %%ds\n\t"
32994 "jc 1f\n\t"
32995 "xor %%ah, %%ah\n"
32996 "1:"
32997@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32998 : "1" (PCIBIOS_READ_CONFIG_WORD),
32999 "b" (bx),
33000 "D" ((long)reg),
33001- "S" (&pci_indirect));
33002+ "S" (&pci_indirect),
33003+ "r" (__PCIBIOS_DS));
33004 /*
33005 * Zero-extend the result beyond 16 bits, do not trust the
33006 * BIOS having done it:
33007@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33008 *value &= 0xffff;
33009 break;
33010 case 4:
33011- __asm__("lcall *(%%esi); cld\n\t"
33012+ __asm__("movw %w6, %%ds\n\t"
33013+ "lcall *%%ss:(%%esi); cld\n\t"
33014+ "push %%ss\n\t"
33015+ "pop %%ds\n\t"
33016 "jc 1f\n\t"
33017 "xor %%ah, %%ah\n"
33018 "1:"
33019@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33020 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33021 "b" (bx),
33022 "D" ((long)reg),
33023- "S" (&pci_indirect));
33024+ "S" (&pci_indirect),
33025+ "r" (__PCIBIOS_DS));
33026 break;
33027 }
33028
33029@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33030
33031 switch (len) {
33032 case 1:
33033- __asm__("lcall *(%%esi); cld\n\t"
33034+ __asm__("movw %w6, %%ds\n\t"
33035+ "lcall *%%ss:(%%esi); cld\n\t"
33036+ "push %%ss\n\t"
33037+ "pop %%ds\n\t"
33038 "jc 1f\n\t"
33039 "xor %%ah, %%ah\n"
33040 "1:"
33041@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33042 "c" (value),
33043 "b" (bx),
33044 "D" ((long)reg),
33045- "S" (&pci_indirect));
33046+ "S" (&pci_indirect),
33047+ "r" (__PCIBIOS_DS));
33048 break;
33049 case 2:
33050- __asm__("lcall *(%%esi); cld\n\t"
33051+ __asm__("movw %w6, %%ds\n\t"
33052+ "lcall *%%ss:(%%esi); cld\n\t"
33053+ "push %%ss\n\t"
33054+ "pop %%ds\n\t"
33055 "jc 1f\n\t"
33056 "xor %%ah, %%ah\n"
33057 "1:"
33058@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33059 "c" (value),
33060 "b" (bx),
33061 "D" ((long)reg),
33062- "S" (&pci_indirect));
33063+ "S" (&pci_indirect),
33064+ "r" (__PCIBIOS_DS));
33065 break;
33066 case 4:
33067- __asm__("lcall *(%%esi); cld\n\t"
33068+ __asm__("movw %w6, %%ds\n\t"
33069+ "lcall *%%ss:(%%esi); cld\n\t"
33070+ "push %%ss\n\t"
33071+ "pop %%ds\n\t"
33072 "jc 1f\n\t"
33073 "xor %%ah, %%ah\n"
33074 "1:"
33075@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33076 "c" (value),
33077 "b" (bx),
33078 "D" ((long)reg),
33079- "S" (&pci_indirect));
33080+ "S" (&pci_indirect),
33081+ "r" (__PCIBIOS_DS));
33082 break;
33083 }
33084
33085@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33086
33087 DBG("PCI: Fetching IRQ routing table... ");
33088 __asm__("push %%es\n\t"
33089+ "movw %w8, %%ds\n\t"
33090 "push %%ds\n\t"
33091 "pop %%es\n\t"
33092- "lcall *(%%esi); cld\n\t"
33093+ "lcall *%%ss:(%%esi); cld\n\t"
33094 "pop %%es\n\t"
33095+ "push %%ss\n\t"
33096+ "pop %%ds\n"
33097 "jc 1f\n\t"
33098 "xor %%ah, %%ah\n"
33099 "1:"
33100@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33101 "1" (0),
33102 "D" ((long) &opt),
33103 "S" (&pci_indirect),
33104- "m" (opt)
33105+ "m" (opt),
33106+ "r" (__PCIBIOS_DS)
33107 : "memory");
33108 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33109 if (ret & 0xff00)
33110@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33111 {
33112 int ret;
33113
33114- __asm__("lcall *(%%esi); cld\n\t"
33115+ __asm__("movw %w5, %%ds\n\t"
33116+ "lcall *%%ss:(%%esi); cld\n\t"
33117+ "push %%ss\n\t"
33118+ "pop %%ds\n"
33119 "jc 1f\n\t"
33120 "xor %%ah, %%ah\n"
33121 "1:"
33122@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33123 : "0" (PCIBIOS_SET_PCI_HW_INT),
33124 "b" ((dev->bus->number << 8) | dev->devfn),
33125 "c" ((irq << 8) | (pin + 10)),
33126- "S" (&pci_indirect));
33127+ "S" (&pci_indirect),
33128+ "r" (__PCIBIOS_DS));
33129 return !(ret & 0xff00);
33130 }
33131 EXPORT_SYMBOL(pcibios_set_irq_routing);
33132diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33133index 40e4469..d915bf9 100644
33134--- a/arch/x86/platform/efi/efi_32.c
33135+++ b/arch/x86/platform/efi/efi_32.c
33136@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33137 {
33138 struct desc_ptr gdt_descr;
33139
33140+#ifdef CONFIG_PAX_KERNEXEC
33141+ struct desc_struct d;
33142+#endif
33143+
33144 local_irq_save(efi_rt_eflags);
33145
33146 load_cr3(initial_page_table);
33147 __flush_tlb_all();
33148
33149+#ifdef CONFIG_PAX_KERNEXEC
33150+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33151+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33152+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33153+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33154+#endif
33155+
33156 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33157 gdt_descr.size = GDT_SIZE - 1;
33158 load_gdt(&gdt_descr);
33159@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33160 {
33161 struct desc_ptr gdt_descr;
33162
33163+#ifdef CONFIG_PAX_KERNEXEC
33164+ struct desc_struct d;
33165+
33166+ memset(&d, 0, sizeof d);
33167+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33168+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33169+#endif
33170+
33171 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33172 gdt_descr.size = GDT_SIZE - 1;
33173 load_gdt(&gdt_descr);
33174
33175+#ifdef CONFIG_PAX_PER_CPU_PGD
33176+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33177+#else
33178 load_cr3(swapper_pg_dir);
33179+#endif
33180+
33181 __flush_tlb_all();
33182
33183 local_irq_restore(efi_rt_eflags);
33184diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33185index 39a0e7f1..872396e 100644
33186--- a/arch/x86/platform/efi/efi_64.c
33187+++ b/arch/x86/platform/efi/efi_64.c
33188@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33189 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33190 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33191 }
33192+
33193+#ifdef CONFIG_PAX_PER_CPU_PGD
33194+ load_cr3(swapper_pg_dir);
33195+#endif
33196+
33197 __flush_tlb_all();
33198 }
33199
33200@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33201 for (pgd = 0; pgd < n_pgds; pgd++)
33202 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33203 kfree(save_pgd);
33204+
33205+#ifdef CONFIG_PAX_PER_CPU_PGD
33206+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33207+#endif
33208+
33209 __flush_tlb_all();
33210 local_irq_restore(efi_flags);
33211 early_code_mapping_set_exec(0);
33212diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
33213index fbe66e6..eae5e38 100644
33214--- a/arch/x86/platform/efi/efi_stub_32.S
33215+++ b/arch/x86/platform/efi/efi_stub_32.S
33216@@ -6,7 +6,9 @@
33217 */
33218
33219 #include <linux/linkage.h>
33220+#include <linux/init.h>
33221 #include <asm/page_types.h>
33222+#include <asm/segment.h>
33223
33224 /*
33225 * efi_call_phys(void *, ...) is a function with variable parameters.
33226@@ -20,7 +22,7 @@
33227 * service functions will comply with gcc calling convention, too.
33228 */
33229
33230-.text
33231+__INIT
33232 ENTRY(efi_call_phys)
33233 /*
33234 * 0. The function can only be called in Linux kernel. So CS has been
33235@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
33236 * The mapping of lower virtual memory has been created in prelog and
33237 * epilog.
33238 */
33239- movl $1f, %edx
33240- subl $__PAGE_OFFSET, %edx
33241- jmp *%edx
33242+#ifdef CONFIG_PAX_KERNEXEC
33243+ movl $(__KERNEXEC_EFI_DS), %edx
33244+ mov %edx, %ds
33245+ mov %edx, %es
33246+ mov %edx, %ss
33247+ addl $2f,(1f)
33248+ ljmp *(1f)
33249+
33250+__INITDATA
33251+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
33252+.previous
33253+
33254+2:
33255+ subl $2b,(1b)
33256+#else
33257+ jmp 1f-__PAGE_OFFSET
33258 1:
33259+#endif
33260
33261 /*
33262 * 2. Now on the top of stack is the return
33263@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
33264 * parameter 2, ..., param n. To make things easy, we save the return
33265 * address of efi_call_phys in a global variable.
33266 */
33267- popl %edx
33268- movl %edx, saved_return_addr
33269- /* get the function pointer into ECX*/
33270- popl %ecx
33271- movl %ecx, efi_rt_function_ptr
33272- movl $2f, %edx
33273- subl $__PAGE_OFFSET, %edx
33274- pushl %edx
33275+ popl (saved_return_addr)
33276+ popl (efi_rt_function_ptr)
33277
33278 /*
33279 * 3. Clear PG bit in %CR0.
33280@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
33281 /*
33282 * 5. Call the physical function.
33283 */
33284- jmp *%ecx
33285+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
33286
33287-2:
33288 /*
33289 * 6. After EFI runtime service returns, control will return to
33290 * following instruction. We'd better readjust stack pointer first.
33291@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
33292 movl %cr0, %edx
33293 orl $0x80000000, %edx
33294 movl %edx, %cr0
33295- jmp 1f
33296-1:
33297+
33298 /*
33299 * 8. Now restore the virtual mode from flat mode by
33300 * adding EIP with PAGE_OFFSET.
33301 */
33302- movl $1f, %edx
33303- jmp *%edx
33304+#ifdef CONFIG_PAX_KERNEXEC
33305+ movl $(__KERNEL_DS), %edx
33306+ mov %edx, %ds
33307+ mov %edx, %es
33308+ mov %edx, %ss
33309+ ljmp $(__KERNEL_CS),$1f
33310+#else
33311+ jmp 1f+__PAGE_OFFSET
33312+#endif
33313 1:
33314
33315 /*
33316 * 9. Balance the stack. And because EAX contain the return value,
33317 * we'd better not clobber it.
33318 */
33319- leal efi_rt_function_ptr, %edx
33320- movl (%edx), %ecx
33321- pushl %ecx
33322+ pushl (efi_rt_function_ptr)
33323
33324 /*
33325- * 10. Push the saved return address onto the stack and return.
33326+ * 10. Return to the saved return address.
33327 */
33328- leal saved_return_addr, %edx
33329- movl (%edx), %ecx
33330- pushl %ecx
33331- ret
33332+ jmpl *(saved_return_addr)
33333 ENDPROC(efi_call_phys)
33334 .previous
33335
33336-.data
33337+__INITDATA
33338 saved_return_addr:
33339 .long 0
33340 efi_rt_function_ptr:
33341diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
33342index 4c07cca..2c8427d 100644
33343--- a/arch/x86/platform/efi/efi_stub_64.S
33344+++ b/arch/x86/platform/efi/efi_stub_64.S
33345@@ -7,6 +7,7 @@
33346 */
33347
33348 #include <linux/linkage.h>
33349+#include <asm/alternative-asm.h>
33350
33351 #define SAVE_XMM \
33352 mov %rsp, %rax; \
33353@@ -40,6 +41,7 @@ ENTRY(efi_call0)
33354 call *%rdi
33355 addq $32, %rsp
33356 RESTORE_XMM
33357+ pax_force_retaddr 0, 1
33358 ret
33359 ENDPROC(efi_call0)
33360
33361@@ -50,6 +52,7 @@ ENTRY(efi_call1)
33362 call *%rdi
33363 addq $32, %rsp
33364 RESTORE_XMM
33365+ pax_force_retaddr 0, 1
33366 ret
33367 ENDPROC(efi_call1)
33368
33369@@ -60,6 +63,7 @@ ENTRY(efi_call2)
33370 call *%rdi
33371 addq $32, %rsp
33372 RESTORE_XMM
33373+ pax_force_retaddr 0, 1
33374 ret
33375 ENDPROC(efi_call2)
33376
33377@@ -71,6 +75,7 @@ ENTRY(efi_call3)
33378 call *%rdi
33379 addq $32, %rsp
33380 RESTORE_XMM
33381+ pax_force_retaddr 0, 1
33382 ret
33383 ENDPROC(efi_call3)
33384
33385@@ -83,6 +88,7 @@ ENTRY(efi_call4)
33386 call *%rdi
33387 addq $32, %rsp
33388 RESTORE_XMM
33389+ pax_force_retaddr 0, 1
33390 ret
33391 ENDPROC(efi_call4)
33392
33393@@ -96,6 +102,7 @@ ENTRY(efi_call5)
33394 call *%rdi
33395 addq $48, %rsp
33396 RESTORE_XMM
33397+ pax_force_retaddr 0, 1
33398 ret
33399 ENDPROC(efi_call5)
33400
33401@@ -112,5 +119,6 @@ ENTRY(efi_call6)
33402 call *%rdi
33403 addq $48, %rsp
33404 RESTORE_XMM
33405+ pax_force_retaddr 0, 1
33406 ret
33407 ENDPROC(efi_call6)
33408diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
33409index a0a0a43..a48e233 100644
33410--- a/arch/x86/platform/mrst/mrst.c
33411+++ b/arch/x86/platform/mrst/mrst.c
33412@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
33413 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
33414 int sfi_mrtc_num;
33415
33416-static void mrst_power_off(void)
33417+static __noreturn void mrst_power_off(void)
33418 {
33419+ BUG();
33420 }
33421
33422-static void mrst_reboot(void)
33423+static __noreturn void mrst_reboot(void)
33424 {
33425 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
33426+ BUG();
33427 }
33428
33429 /* parse all the mtimer info to a static mtimer array */
33430diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
33431index d6ee929..3637cb5 100644
33432--- a/arch/x86/platform/olpc/olpc_dt.c
33433+++ b/arch/x86/platform/olpc/olpc_dt.c
33434@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
33435 return res;
33436 }
33437
33438-static struct of_pdt_ops prom_olpc_ops __initdata = {
33439+static struct of_pdt_ops prom_olpc_ops __initconst = {
33440 .nextprop = olpc_dt_nextprop,
33441 .getproplen = olpc_dt_getproplen,
33442 .getproperty = olpc_dt_getproperty,
33443diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
33444index 1cf5b30..fd45732 100644
33445--- a/arch/x86/power/cpu.c
33446+++ b/arch/x86/power/cpu.c
33447@@ -137,11 +137,8 @@ static void do_fpu_end(void)
33448 static void fix_processor_context(void)
33449 {
33450 int cpu = smp_processor_id();
33451- struct tss_struct *t = &per_cpu(init_tss, cpu);
33452-#ifdef CONFIG_X86_64
33453- struct desc_struct *desc = get_cpu_gdt_table(cpu);
33454- tss_desc tss;
33455-#endif
33456+ struct tss_struct *t = init_tss + cpu;
33457+
33458 set_tss_desc(cpu, t); /*
33459 * This just modifies memory; should not be
33460 * necessary. But... This is necessary, because
33461@@ -150,10 +147,6 @@ static void fix_processor_context(void)
33462 */
33463
33464 #ifdef CONFIG_X86_64
33465- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
33466- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
33467- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
33468-
33469 syscall_init(); /* This sets MSR_*STAR and related */
33470 #endif
33471 load_TR_desc(); /* This does ltr */
33472diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
33473index a44f457..9140171 100644
33474--- a/arch/x86/realmode/init.c
33475+++ b/arch/x86/realmode/init.c
33476@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
33477 __va(real_mode_header->trampoline_header);
33478
33479 #ifdef CONFIG_X86_32
33480- trampoline_header->start = __pa_symbol(startup_32_smp);
33481+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
33482+
33483+#ifdef CONFIG_PAX_KERNEXEC
33484+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
33485+#endif
33486+
33487+ trampoline_header->boot_cs = __BOOT_CS;
33488 trampoline_header->gdt_limit = __BOOT_DS + 7;
33489 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
33490 #else
33491@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
33492 *trampoline_cr4_features = read_cr4();
33493
33494 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
33495- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
33496+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
33497 trampoline_pgd[511] = init_level4_pgt[511].pgd;
33498 #endif
33499 }
33500diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
33501index 8869287..d577672 100644
33502--- a/arch/x86/realmode/rm/Makefile
33503+++ b/arch/x86/realmode/rm/Makefile
33504@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
33505 $(call cc-option, -fno-unit-at-a-time)) \
33506 $(call cc-option, -fno-stack-protector) \
33507 $(call cc-option, -mpreferred-stack-boundary=2)
33508+ifdef CONSTIFY_PLUGIN
33509+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
33510+endif
33511 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
33512 GCOV_PROFILE := n
33513diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
33514index a28221d..93c40f1 100644
33515--- a/arch/x86/realmode/rm/header.S
33516+++ b/arch/x86/realmode/rm/header.S
33517@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
33518 #endif
33519 /* APM/BIOS reboot */
33520 .long pa_machine_real_restart_asm
33521-#ifdef CONFIG_X86_64
33522+#ifdef CONFIG_X86_32
33523+ .long __KERNEL_CS
33524+#else
33525 .long __KERNEL32_CS
33526 #endif
33527 END(real_mode_header)
33528diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
33529index c1b2791..f9e31c7 100644
33530--- a/arch/x86/realmode/rm/trampoline_32.S
33531+++ b/arch/x86/realmode/rm/trampoline_32.S
33532@@ -25,6 +25,12 @@
33533 #include <asm/page_types.h>
33534 #include "realmode.h"
33535
33536+#ifdef CONFIG_PAX_KERNEXEC
33537+#define ta(X) (X)
33538+#else
33539+#define ta(X) (pa_ ## X)
33540+#endif
33541+
33542 .text
33543 .code16
33544
33545@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
33546
33547 cli # We should be safe anyway
33548
33549- movl tr_start, %eax # where we need to go
33550-
33551 movl $0xA5A5A5A5, trampoline_status
33552 # write marker for master knows we're running
33553
33554@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
33555 movw $1, %dx # protected mode (PE) bit
33556 lmsw %dx # into protected mode
33557
33558- ljmpl $__BOOT_CS, $pa_startup_32
33559+ ljmpl *(trampoline_header)
33560
33561 .section ".text32","ax"
33562 .code32
33563@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
33564 .balign 8
33565 GLOBAL(trampoline_header)
33566 tr_start: .space 4
33567- tr_gdt_pad: .space 2
33568+ tr_boot_cs: .space 2
33569 tr_gdt: .space 6
33570 END(trampoline_header)
33571
33572diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
33573index bb360dc..d0fd8f8 100644
33574--- a/arch/x86/realmode/rm/trampoline_64.S
33575+++ b/arch/x86/realmode/rm/trampoline_64.S
33576@@ -94,6 +94,7 @@ ENTRY(startup_32)
33577 movl %edx, %gs
33578
33579 movl pa_tr_cr4, %eax
33580+ andl $~X86_CR4_PCIDE, %eax
33581 movl %eax, %cr4 # Enable PAE mode
33582
33583 # Setup trampoline 4 level pagetables
33584@@ -107,7 +108,7 @@ ENTRY(startup_32)
33585 wrmsr
33586
33587 # Enable paging and in turn activate Long Mode
33588- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
33589+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
33590 movl %eax, %cr0
33591
33592 /*
33593diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
33594index e812034..c747134 100644
33595--- a/arch/x86/tools/Makefile
33596+++ b/arch/x86/tools/Makefile
33597@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
33598
33599 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
33600
33601-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
33602+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
33603 hostprogs-y += relocs
33604 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
33605 relocs: $(obj)/relocs
33606diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
33607index f7bab68..b6d9886 100644
33608--- a/arch/x86/tools/relocs.c
33609+++ b/arch/x86/tools/relocs.c
33610@@ -1,5 +1,7 @@
33611 /* This is included from relocs_32/64.c */
33612
33613+#include "../../../include/generated/autoconf.h"
33614+
33615 #define ElfW(type) _ElfW(ELF_BITS, type)
33616 #define _ElfW(bits, type) __ElfW(bits, type)
33617 #define __ElfW(bits, type) Elf##bits##_##type
33618@@ -11,6 +13,7 @@
33619 #define Elf_Sym ElfW(Sym)
33620
33621 static Elf_Ehdr ehdr;
33622+static Elf_Phdr *phdr;
33623
33624 struct relocs {
33625 uint32_t *offset;
33626@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
33627 }
33628 }
33629
33630+static void read_phdrs(FILE *fp)
33631+{
33632+ unsigned int i;
33633+
33634+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
33635+ if (!phdr) {
33636+ die("Unable to allocate %d program headers\n",
33637+ ehdr.e_phnum);
33638+ }
33639+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
33640+ die("Seek to %d failed: %s\n",
33641+ ehdr.e_phoff, strerror(errno));
33642+ }
33643+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
33644+ die("Cannot read ELF program headers: %s\n",
33645+ strerror(errno));
33646+ }
33647+ for(i = 0; i < ehdr.e_phnum; i++) {
33648+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
33649+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
33650+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
33651+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
33652+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
33653+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
33654+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
33655+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
33656+ }
33657+
33658+}
33659+
33660 static void read_shdrs(FILE *fp)
33661 {
33662- int i;
33663+ unsigned int i;
33664 Elf_Shdr shdr;
33665
33666 secs = calloc(ehdr.e_shnum, sizeof(struct section));
33667@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
33668
33669 static void read_strtabs(FILE *fp)
33670 {
33671- int i;
33672+ unsigned int i;
33673 for (i = 0; i < ehdr.e_shnum; i++) {
33674 struct section *sec = &secs[i];
33675 if (sec->shdr.sh_type != SHT_STRTAB) {
33676@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
33677
33678 static void read_symtabs(FILE *fp)
33679 {
33680- int i,j;
33681+ unsigned int i,j;
33682 for (i = 0; i < ehdr.e_shnum; i++) {
33683 struct section *sec = &secs[i];
33684 if (sec->shdr.sh_type != SHT_SYMTAB) {
33685@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
33686 }
33687
33688
33689-static void read_relocs(FILE *fp)
33690+static void read_relocs(FILE *fp, int use_real_mode)
33691 {
33692- int i,j;
33693+ unsigned int i,j;
33694+ uint32_t base;
33695+
33696 for (i = 0; i < ehdr.e_shnum; i++) {
33697 struct section *sec = &secs[i];
33698 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33699@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
33700 die("Cannot read symbol table: %s\n",
33701 strerror(errno));
33702 }
33703+ base = 0;
33704+
33705+#ifdef CONFIG_X86_32
33706+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
33707+ if (phdr[j].p_type != PT_LOAD )
33708+ continue;
33709+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
33710+ continue;
33711+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
33712+ break;
33713+ }
33714+#endif
33715+
33716 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
33717 Elf_Rel *rel = &sec->reltab[j];
33718- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
33719+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
33720 rel->r_info = elf_xword_to_cpu(rel->r_info);
33721 #if (SHT_REL_TYPE == SHT_RELA)
33722 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
33723@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
33724
33725 static void print_absolute_symbols(void)
33726 {
33727- int i;
33728+ unsigned int i;
33729 const char *format;
33730
33731 if (ELF_BITS == 64)
33732@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
33733 for (i = 0; i < ehdr.e_shnum; i++) {
33734 struct section *sec = &secs[i];
33735 char *sym_strtab;
33736- int j;
33737+ unsigned int j;
33738
33739 if (sec->shdr.sh_type != SHT_SYMTAB) {
33740 continue;
33741@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
33742
33743 static void print_absolute_relocs(void)
33744 {
33745- int i, printed = 0;
33746+ unsigned int i, printed = 0;
33747 const char *format;
33748
33749 if (ELF_BITS == 64)
33750@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
33751 struct section *sec_applies, *sec_symtab;
33752 char *sym_strtab;
33753 Elf_Sym *sh_symtab;
33754- int j;
33755+ unsigned int j;
33756 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33757 continue;
33758 }
33759@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
33760 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
33761 Elf_Sym *sym, const char *symname))
33762 {
33763- int i;
33764+ unsigned int i;
33765 /* Walk through the relocations */
33766 for (i = 0; i < ehdr.e_shnum; i++) {
33767 char *sym_strtab;
33768 Elf_Sym *sh_symtab;
33769 struct section *sec_applies, *sec_symtab;
33770- int j;
33771+ unsigned int j;
33772 struct section *sec = &secs[i];
33773
33774 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33775@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33776 {
33777 unsigned r_type = ELF32_R_TYPE(rel->r_info);
33778 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
33779+ char *sym_strtab = sec->link->link->strtab;
33780+
33781+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
33782+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
33783+ return 0;
33784+
33785+#ifdef CONFIG_PAX_KERNEXEC
33786+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
33787+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
33788+ return 0;
33789+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
33790+ return 0;
33791+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
33792+ return 0;
33793+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
33794+ return 0;
33795+#endif
33796
33797 switch (r_type) {
33798 case R_386_NONE:
33799@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
33800
33801 static void emit_relocs(int as_text, int use_real_mode)
33802 {
33803- int i;
33804+ unsigned int i;
33805 int (*write_reloc)(uint32_t, FILE *) = write32;
33806 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33807 const char *symname);
33808@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
33809 {
33810 regex_init(use_real_mode);
33811 read_ehdr(fp);
33812+ read_phdrs(fp);
33813 read_shdrs(fp);
33814 read_strtabs(fp);
33815 read_symtabs(fp);
33816- read_relocs(fp);
33817+ read_relocs(fp, use_real_mode);
33818 if (ELF_BITS == 64)
33819 percpu_init();
33820 if (show_absolute_syms) {
33821diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
33822index 80ffa5b..a33bd15 100644
33823--- a/arch/x86/um/tls_32.c
33824+++ b/arch/x86/um/tls_32.c
33825@@ -260,7 +260,7 @@ out:
33826 if (unlikely(task == current &&
33827 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
33828 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
33829- "without flushed TLS.", current->pid);
33830+ "without flushed TLS.", task_pid_nr(current));
33831 }
33832
33833 return 0;
33834diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
33835index fd14be1..e3c79c0 100644
33836--- a/arch/x86/vdso/Makefile
33837+++ b/arch/x86/vdso/Makefile
33838@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
33839 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
33840 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
33841
33842-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33843+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33844 GCOV_PROFILE := n
33845
33846 #
33847diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
33848index 0faad64..39ef157 100644
33849--- a/arch/x86/vdso/vdso32-setup.c
33850+++ b/arch/x86/vdso/vdso32-setup.c
33851@@ -25,6 +25,7 @@
33852 #include <asm/tlbflush.h>
33853 #include <asm/vdso.h>
33854 #include <asm/proto.h>
33855+#include <asm/mman.h>
33856
33857 enum {
33858 VDSO_DISABLED = 0,
33859@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
33860 void enable_sep_cpu(void)
33861 {
33862 int cpu = get_cpu();
33863- struct tss_struct *tss = &per_cpu(init_tss, cpu);
33864+ struct tss_struct *tss = init_tss + cpu;
33865
33866 if (!boot_cpu_has(X86_FEATURE_SEP)) {
33867 put_cpu();
33868@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
33869 gate_vma.vm_start = FIXADDR_USER_START;
33870 gate_vma.vm_end = FIXADDR_USER_END;
33871 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
33872- gate_vma.vm_page_prot = __P101;
33873+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
33874
33875 return 0;
33876 }
33877@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33878 if (compat)
33879 addr = VDSO_HIGH_BASE;
33880 else {
33881- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
33882+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
33883 if (IS_ERR_VALUE(addr)) {
33884 ret = addr;
33885 goto up_fail;
33886 }
33887 }
33888
33889- current->mm->context.vdso = (void *)addr;
33890+ current->mm->context.vdso = addr;
33891
33892 if (compat_uses_vma || !compat) {
33893 /*
33894@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33895 }
33896
33897 current_thread_info()->sysenter_return =
33898- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33899+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33900
33901 up_fail:
33902 if (ret)
33903- current->mm->context.vdso = NULL;
33904+ current->mm->context.vdso = 0;
33905
33906 up_write(&mm->mmap_sem);
33907
33908@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
33909
33910 const char *arch_vma_name(struct vm_area_struct *vma)
33911 {
33912- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
33913+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
33914 return "[vdso]";
33915+
33916+#ifdef CONFIG_PAX_SEGMEXEC
33917+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
33918+ return "[vdso]";
33919+#endif
33920+
33921 return NULL;
33922 }
33923
33924@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
33925 * Check to see if the corresponding task was created in compat vdso
33926 * mode.
33927 */
33928- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
33929+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
33930 return &gate_vma;
33931 return NULL;
33932 }
33933diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
33934index 431e875..cbb23f3 100644
33935--- a/arch/x86/vdso/vma.c
33936+++ b/arch/x86/vdso/vma.c
33937@@ -16,8 +16,6 @@
33938 #include <asm/vdso.h>
33939 #include <asm/page.h>
33940
33941-unsigned int __read_mostly vdso_enabled = 1;
33942-
33943 extern char vdso_start[], vdso_end[];
33944 extern unsigned short vdso_sync_cpuid;
33945
33946@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
33947 * unaligned here as a result of stack start randomization.
33948 */
33949 addr = PAGE_ALIGN(addr);
33950- addr = align_vdso_addr(addr);
33951
33952 return addr;
33953 }
33954@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
33955 unsigned size)
33956 {
33957 struct mm_struct *mm = current->mm;
33958- unsigned long addr;
33959+ unsigned long addr = 0;
33960 int ret;
33961
33962- if (!vdso_enabled)
33963- return 0;
33964-
33965 down_write(&mm->mmap_sem);
33966+
33967+#ifdef CONFIG_PAX_RANDMMAP
33968+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33969+#endif
33970+
33971 addr = vdso_addr(mm->start_stack, size);
33972+ addr = align_vdso_addr(addr);
33973 addr = get_unmapped_area(NULL, addr, size, 0, 0);
33974 if (IS_ERR_VALUE(addr)) {
33975 ret = addr;
33976 goto up_fail;
33977 }
33978
33979- current->mm->context.vdso = (void *)addr;
33980+ mm->context.vdso = addr;
33981
33982 ret = install_special_mapping(mm, addr, size,
33983 VM_READ|VM_EXEC|
33984 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
33985 pages);
33986- if (ret) {
33987- current->mm->context.vdso = NULL;
33988- goto up_fail;
33989- }
33990+ if (ret)
33991+ mm->context.vdso = 0;
33992
33993 up_fail:
33994 up_write(&mm->mmap_sem);
33995@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33996 vdsox32_size);
33997 }
33998 #endif
33999-
34000-static __init int vdso_setup(char *s)
34001-{
34002- vdso_enabled = simple_strtoul(s, NULL, 0);
34003- return 0;
34004-}
34005-__setup("vdso=", vdso_setup);
34006diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
34007index a492be2..08678da 100644
34008--- a/arch/x86/xen/enlighten.c
34009+++ b/arch/x86/xen/enlighten.c
34010@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
34011
34012 struct shared_info xen_dummy_shared_info;
34013
34014-void *xen_initial_gdt;
34015-
34016 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34017 __read_mostly int xen_have_vector_callback;
34018 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34019@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34020 {
34021 unsigned long va = dtr->address;
34022 unsigned int size = dtr->size + 1;
34023- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34024- unsigned long frames[pages];
34025+ unsigned long frames[65536 / PAGE_SIZE];
34026 int f;
34027
34028 /*
34029@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34030 {
34031 unsigned long va = dtr->address;
34032 unsigned int size = dtr->size + 1;
34033- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34034- unsigned long frames[pages];
34035+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34036 int f;
34037
34038 /*
34039@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34040 * 8-byte entries, or 16 4k pages..
34041 */
34042
34043- BUG_ON(size > 65536);
34044+ BUG_ON(size > GDT_SIZE);
34045 BUG_ON(va & ~PAGE_MASK);
34046
34047 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34048@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34049 return 0;
34050 }
34051
34052-static void set_xen_basic_apic_ops(void)
34053+static void __init set_xen_basic_apic_ops(void)
34054 {
34055 apic->read = xen_apic_read;
34056 apic->write = xen_apic_write;
34057@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34058 #endif
34059 };
34060
34061-static void xen_reboot(int reason)
34062+static __noreturn void xen_reboot(int reason)
34063 {
34064 struct sched_shutdown r = { .reason = reason };
34065
34066- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34067- BUG();
34068+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34069+ BUG();
34070 }
34071
34072-static void xen_restart(char *msg)
34073+static __noreturn void xen_restart(char *msg)
34074 {
34075 xen_reboot(SHUTDOWN_reboot);
34076 }
34077
34078-static void xen_emergency_restart(void)
34079+static __noreturn void xen_emergency_restart(void)
34080 {
34081 xen_reboot(SHUTDOWN_reboot);
34082 }
34083
34084-static void xen_machine_halt(void)
34085+static __noreturn void xen_machine_halt(void)
34086 {
34087 xen_reboot(SHUTDOWN_poweroff);
34088 }
34089
34090-static void xen_machine_power_off(void)
34091+static __noreturn void xen_machine_power_off(void)
34092 {
34093 if (pm_power_off)
34094 pm_power_off();
34095@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
34096 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34097
34098 /* Work out if we support NX */
34099- x86_configure_nx();
34100+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34101+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34102+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34103+ unsigned l, h;
34104+
34105+ __supported_pte_mask |= _PAGE_NX;
34106+ rdmsr(MSR_EFER, l, h);
34107+ l |= EFER_NX;
34108+ wrmsr(MSR_EFER, l, h);
34109+ }
34110+#endif
34111
34112 xen_setup_features();
34113
34114@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
34115
34116 machine_ops = xen_machine_ops;
34117
34118- /*
34119- * The only reliable way to retain the initial address of the
34120- * percpu gdt_page is to remember it here, so we can go and
34121- * mark it RW later, when the initial percpu area is freed.
34122- */
34123- xen_initial_gdt = &per_cpu(gdt_page, 0);
34124-
34125 xen_smp_init();
34126
34127 #ifdef CONFIG_ACPI_NUMA
34128@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
34129 return NOTIFY_OK;
34130 }
34131
34132-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
34133+static struct notifier_block xen_hvm_cpu_notifier = {
34134 .notifier_call = xen_hvm_cpu_notify,
34135 };
34136
34137diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34138index fdc3ba2..3daee39 100644
34139--- a/arch/x86/xen/mmu.c
34140+++ b/arch/x86/xen/mmu.c
34141@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34142 /* L3_k[510] -> level2_kernel_pgt
34143 * L3_i[511] -> level2_fixmap_pgt */
34144 convert_pfn_mfn(level3_kernel_pgt);
34145+ convert_pfn_mfn(level3_vmalloc_start_pgt);
34146+ convert_pfn_mfn(level3_vmalloc_end_pgt);
34147+ convert_pfn_mfn(level3_vmemmap_pgt);
34148
34149 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34150 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34151@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34152 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34153 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34154 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34155+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34156+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34157+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34158 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34159 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34160+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34161 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34162 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34163
34164@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34165 pv_mmu_ops.set_pud = xen_set_pud;
34166 #if PAGETABLE_LEVELS == 4
34167 pv_mmu_ops.set_pgd = xen_set_pgd;
34168+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34169 #endif
34170
34171 /* This will work as long as patching hasn't happened yet
34172@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34173 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34174 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34175 .set_pgd = xen_set_pgd_hyper,
34176+ .set_pgd_batched = xen_set_pgd_hyper,
34177
34178 .alloc_pud = xen_alloc_pmd_init,
34179 .release_pud = xen_release_pmd_init,
34180diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
34181index a1e58e1..9392ad8 100644
34182--- a/arch/x86/xen/smp.c
34183+++ b/arch/x86/xen/smp.c
34184@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
34185 {
34186 BUG_ON(smp_processor_id() != 0);
34187 native_smp_prepare_boot_cpu();
34188-
34189- /* We've switched to the "real" per-cpu gdt, so make sure the
34190- old memory can be recycled */
34191- make_lowmem_page_readwrite(xen_initial_gdt);
34192-
34193 xen_filter_cpu_maps();
34194 xen_setup_vcpu_info_placement();
34195 }
34196@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34197 ctxt->user_regs.ss = __KERNEL_DS;
34198 #ifdef CONFIG_X86_32
34199 ctxt->user_regs.fs = __KERNEL_PERCPU;
34200- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34201+ savesegment(gs, ctxt->user_regs.gs);
34202 #else
34203 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34204 #endif
34205@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34206
34207 {
34208 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34209- ctxt->user_regs.ds = __USER_DS;
34210- ctxt->user_regs.es = __USER_DS;
34211+ ctxt->user_regs.ds = __KERNEL_DS;
34212+ ctxt->user_regs.es = __KERNEL_DS;
34213
34214 xen_copy_trap_info(ctxt->trap_ctxt);
34215
34216@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
34217 int rc;
34218
34219 per_cpu(current_task, cpu) = idle;
34220+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
34221 #ifdef CONFIG_X86_32
34222 irq_ctx_init(cpu);
34223 #else
34224 clear_tsk_thread_flag(idle, TIF_FORK);
34225- per_cpu(kernel_stack, cpu) =
34226- (unsigned long)task_stack_page(idle) -
34227- KERNEL_STACK_OFFSET + THREAD_SIZE;
34228+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
34229 #endif
34230 xen_setup_runstate_info(cpu);
34231 xen_setup_timer(cpu);
34232@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
34233
34234 void __init xen_smp_init(void)
34235 {
34236- smp_ops = xen_smp_ops;
34237+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
34238 xen_fill_possible_map();
34239 xen_init_spinlocks();
34240 }
34241diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
34242index 33ca6e4..0ded929 100644
34243--- a/arch/x86/xen/xen-asm_32.S
34244+++ b/arch/x86/xen/xen-asm_32.S
34245@@ -84,14 +84,14 @@ ENTRY(xen_iret)
34246 ESP_OFFSET=4 # bytes pushed onto stack
34247
34248 /*
34249- * Store vcpu_info pointer for easy access. Do it this way to
34250- * avoid having to reload %fs
34251+ * Store vcpu_info pointer for easy access.
34252 */
34253 #ifdef CONFIG_SMP
34254- GET_THREAD_INFO(%eax)
34255- movl %ss:TI_cpu(%eax), %eax
34256- movl %ss:__per_cpu_offset(,%eax,4), %eax
34257- mov %ss:xen_vcpu(%eax), %eax
34258+ push %fs
34259+ mov $(__KERNEL_PERCPU), %eax
34260+ mov %eax, %fs
34261+ mov PER_CPU_VAR(xen_vcpu), %eax
34262+ pop %fs
34263 #else
34264 movl %ss:xen_vcpu, %eax
34265 #endif
34266diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
34267index 7faed58..ba4427c 100644
34268--- a/arch/x86/xen/xen-head.S
34269+++ b/arch/x86/xen/xen-head.S
34270@@ -19,6 +19,17 @@ ENTRY(startup_xen)
34271 #ifdef CONFIG_X86_32
34272 mov %esi,xen_start_info
34273 mov $init_thread_union+THREAD_SIZE,%esp
34274+#ifdef CONFIG_SMP
34275+ movl $cpu_gdt_table,%edi
34276+ movl $__per_cpu_load,%eax
34277+ movw %ax,__KERNEL_PERCPU + 2(%edi)
34278+ rorl $16,%eax
34279+ movb %al,__KERNEL_PERCPU + 4(%edi)
34280+ movb %ah,__KERNEL_PERCPU + 7(%edi)
34281+ movl $__per_cpu_end - 1,%eax
34282+ subl $__per_cpu_start,%eax
34283+ movw %ax,__KERNEL_PERCPU + 0(%edi)
34284+#endif
34285 #else
34286 mov %rsi,xen_start_info
34287 mov $init_thread_union+THREAD_SIZE,%rsp
34288diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
34289index a95b417..b6dbd0b 100644
34290--- a/arch/x86/xen/xen-ops.h
34291+++ b/arch/x86/xen/xen-ops.h
34292@@ -10,8 +10,6 @@
34293 extern const char xen_hypervisor_callback[];
34294 extern const char xen_failsafe_callback[];
34295
34296-extern void *xen_initial_gdt;
34297-
34298 struct trap_info;
34299 void xen_copy_trap_info(struct trap_info *traps);
34300
34301diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
34302index 525bd3d..ef888b1 100644
34303--- a/arch/xtensa/variants/dc232b/include/variant/core.h
34304+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
34305@@ -119,9 +119,9 @@
34306 ----------------------------------------------------------------------*/
34307
34308 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
34309-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
34310 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
34311 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
34312+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34313
34314 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
34315 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
34316diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
34317index 2f33760..835e50a 100644
34318--- a/arch/xtensa/variants/fsf/include/variant/core.h
34319+++ b/arch/xtensa/variants/fsf/include/variant/core.h
34320@@ -11,6 +11,7 @@
34321 #ifndef _XTENSA_CORE_H
34322 #define _XTENSA_CORE_H
34323
34324+#include <linux/const.h>
34325
34326 /****************************************************************************
34327 Parameters Useful for Any Code, USER or PRIVILEGED
34328@@ -112,9 +113,9 @@
34329 ----------------------------------------------------------------------*/
34330
34331 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34332-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34333 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34334 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34335+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34336
34337 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
34338 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
34339diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
34340index af00795..2bb8105 100644
34341--- a/arch/xtensa/variants/s6000/include/variant/core.h
34342+++ b/arch/xtensa/variants/s6000/include/variant/core.h
34343@@ -11,6 +11,7 @@
34344 #ifndef _XTENSA_CORE_CONFIGURATION_H
34345 #define _XTENSA_CORE_CONFIGURATION_H
34346
34347+#include <linux/const.h>
34348
34349 /****************************************************************************
34350 Parameters Useful for Any Code, USER or PRIVILEGED
34351@@ -118,9 +119,9 @@
34352 ----------------------------------------------------------------------*/
34353
34354 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34355-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34356 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34357 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34358+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34359
34360 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
34361 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
34362diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
34363index 58916af..eb9dbcf6 100644
34364--- a/block/blk-iopoll.c
34365+++ b/block/blk-iopoll.c
34366@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
34367 }
34368 EXPORT_SYMBOL(blk_iopoll_complete);
34369
34370-static void blk_iopoll_softirq(struct softirq_action *h)
34371+static void blk_iopoll_softirq(void)
34372 {
34373 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
34374 int rearm = 0, budget = blk_iopoll_budget;
34375@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
34376 return NOTIFY_OK;
34377 }
34378
34379-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
34380+static struct notifier_block blk_iopoll_cpu_notifier = {
34381 .notifier_call = blk_iopoll_cpu_notify,
34382 };
34383
34384diff --git a/block/blk-map.c b/block/blk-map.c
34385index 623e1cd..ca1e109 100644
34386--- a/block/blk-map.c
34387+++ b/block/blk-map.c
34388@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
34389 if (!len || !kbuf)
34390 return -EINVAL;
34391
34392- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
34393+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
34394 if (do_copy)
34395 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
34396 else
34397diff --git a/block/blk-softirq.c b/block/blk-softirq.c
34398index 467c8de..f3628c5 100644
34399--- a/block/blk-softirq.c
34400+++ b/block/blk-softirq.c
34401@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
34402 * Softirq action handler - move entries to local list and loop over them
34403 * while passing them to the queue registered handler.
34404 */
34405-static void blk_done_softirq(struct softirq_action *h)
34406+static void blk_done_softirq(void)
34407 {
34408 struct list_head *cpu_list, local_list;
34409
34410@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
34411 return NOTIFY_OK;
34412 }
34413
34414-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
34415+static struct notifier_block blk_cpu_notifier = {
34416 .notifier_call = blk_cpu_notify,
34417 };
34418
34419diff --git a/block/bsg.c b/block/bsg.c
34420index 420a5a9..23834aa 100644
34421--- a/block/bsg.c
34422+++ b/block/bsg.c
34423@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
34424 struct sg_io_v4 *hdr, struct bsg_device *bd,
34425 fmode_t has_write_perm)
34426 {
34427+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34428+ unsigned char *cmdptr;
34429+
34430 if (hdr->request_len > BLK_MAX_CDB) {
34431 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
34432 if (!rq->cmd)
34433 return -ENOMEM;
34434- }
34435+ cmdptr = rq->cmd;
34436+ } else
34437+ cmdptr = tmpcmd;
34438
34439- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
34440+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
34441 hdr->request_len))
34442 return -EFAULT;
34443
34444+ if (cmdptr != rq->cmd)
34445+ memcpy(rq->cmd, cmdptr, hdr->request_len);
34446+
34447 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
34448 if (blk_verify_command(rq->cmd, has_write_perm))
34449 return -EPERM;
34450diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
34451index 7c668c8..db3521c 100644
34452--- a/block/compat_ioctl.c
34453+++ b/block/compat_ioctl.c
34454@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
34455 err |= __get_user(f->spec1, &uf->spec1);
34456 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
34457 err |= __get_user(name, &uf->name);
34458- f->name = compat_ptr(name);
34459+ f->name = (void __force_kernel *)compat_ptr(name);
34460 if (err) {
34461 err = -EFAULT;
34462 goto out;
34463diff --git a/block/genhd.c b/block/genhd.c
34464index cdeb527..10aa34db 100644
34465--- a/block/genhd.c
34466+++ b/block/genhd.c
34467@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
34468
34469 /*
34470 * Register device numbers dev..(dev+range-1)
34471- * range must be nonzero
34472+ * Noop if @range is zero.
34473 * The hash chain is sorted on range, so that subranges can override.
34474 */
34475 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
34476 struct kobject *(*probe)(dev_t, int *, void *),
34477 int (*lock)(dev_t, void *), void *data)
34478 {
34479- kobj_map(bdev_map, devt, range, module, probe, lock, data);
34480+ if (range)
34481+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
34482 }
34483
34484 EXPORT_SYMBOL(blk_register_region);
34485
34486+/* undo blk_register_region(), noop if @range is zero */
34487 void blk_unregister_region(dev_t devt, unsigned long range)
34488 {
34489- kobj_unmap(bdev_map, devt, range);
34490+ if (range)
34491+ kobj_unmap(bdev_map, devt, range);
34492 }
34493
34494 EXPORT_SYMBOL(blk_unregister_region);
34495diff --git a/block/partitions/efi.c b/block/partitions/efi.c
34496index c85fc89..51e690b 100644
34497--- a/block/partitions/efi.c
34498+++ b/block/partitions/efi.c
34499@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
34500 if (!gpt)
34501 return NULL;
34502
34503+ if (!le32_to_cpu(gpt->num_partition_entries))
34504+ return NULL;
34505+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
34506+ if (!pte)
34507+ return NULL;
34508+
34509 count = le32_to_cpu(gpt->num_partition_entries) *
34510 le32_to_cpu(gpt->sizeof_partition_entry);
34511- if (!count)
34512- return NULL;
34513- pte = kmalloc(count, GFP_KERNEL);
34514- if (!pte)
34515- return NULL;
34516-
34517 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
34518 (u8 *) pte,
34519 count) < count) {
34520diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
34521index a5ffcc9..3cedc9c 100644
34522--- a/block/scsi_ioctl.c
34523+++ b/block/scsi_ioctl.c
34524@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
34525 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
34526 struct sg_io_hdr *hdr, fmode_t mode)
34527 {
34528- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
34529+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34530+ unsigned char *cmdptr;
34531+
34532+ if (rq->cmd != rq->__cmd)
34533+ cmdptr = rq->cmd;
34534+ else
34535+ cmdptr = tmpcmd;
34536+
34537+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
34538 return -EFAULT;
34539+
34540+ if (cmdptr != rq->cmd)
34541+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
34542+
34543 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
34544 return -EPERM;
34545
34546@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
34547 int err;
34548 unsigned int in_len, out_len, bytes, opcode, cmdlen;
34549 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
34550+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34551+ unsigned char *cmdptr;
34552
34553 if (!sic)
34554 return -EINVAL;
34555@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
34556 */
34557 err = -EFAULT;
34558 rq->cmd_len = cmdlen;
34559- if (copy_from_user(rq->cmd, sic->data, cmdlen))
34560+
34561+ if (rq->cmd != rq->__cmd)
34562+ cmdptr = rq->cmd;
34563+ else
34564+ cmdptr = tmpcmd;
34565+
34566+ if (copy_from_user(cmdptr, sic->data, cmdlen))
34567 goto error;
34568
34569+ if (rq->cmd != cmdptr)
34570+ memcpy(rq->cmd, cmdptr, cmdlen);
34571+
34572 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
34573 goto error;
34574
34575diff --git a/crypto/cryptd.c b/crypto/cryptd.c
34576index 7bdd61b..afec999 100644
34577--- a/crypto/cryptd.c
34578+++ b/crypto/cryptd.c
34579@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
34580
34581 struct cryptd_blkcipher_request_ctx {
34582 crypto_completion_t complete;
34583-};
34584+} __no_const;
34585
34586 struct cryptd_hash_ctx {
34587 struct crypto_shash *child;
34588@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
34589
34590 struct cryptd_aead_request_ctx {
34591 crypto_completion_t complete;
34592-};
34593+} __no_const;
34594
34595 static void cryptd_queue_worker(struct work_struct *work);
34596
34597diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
34598index b2c99dc..476c9fb 100644
34599--- a/crypto/pcrypt.c
34600+++ b/crypto/pcrypt.c
34601@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
34602 int ret;
34603
34604 pinst->kobj.kset = pcrypt_kset;
34605- ret = kobject_add(&pinst->kobj, NULL, name);
34606+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
34607 if (!ret)
34608 kobject_uevent(&pinst->kobj, KOBJ_ADD);
34609
34610@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
34611
34612 get_online_cpus();
34613
34614- pcrypt->wq = alloc_workqueue(name,
34615- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
34616+ pcrypt->wq = alloc_workqueue("%s",
34617+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
34618 if (!pcrypt->wq)
34619 goto err;
34620
34621diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
34622index f220d64..d359ad6 100644
34623--- a/drivers/acpi/apei/apei-internal.h
34624+++ b/drivers/acpi/apei/apei-internal.h
34625@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
34626 struct apei_exec_ins_type {
34627 u32 flags;
34628 apei_exec_ins_func_t run;
34629-};
34630+} __do_const;
34631
34632 struct apei_exec_context {
34633 u32 ip;
34634diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
34635index 33dc6a0..4b24b47 100644
34636--- a/drivers/acpi/apei/cper.c
34637+++ b/drivers/acpi/apei/cper.c
34638@@ -39,12 +39,12 @@
34639 */
34640 u64 cper_next_record_id(void)
34641 {
34642- static atomic64_t seq;
34643+ static atomic64_unchecked_t seq;
34644
34645- if (!atomic64_read(&seq))
34646- atomic64_set(&seq, ((u64)get_seconds()) << 32);
34647+ if (!atomic64_read_unchecked(&seq))
34648+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
34649
34650- return atomic64_inc_return(&seq);
34651+ return atomic64_inc_return_unchecked(&seq);
34652 }
34653 EXPORT_SYMBOL_GPL(cper_next_record_id);
34654
34655diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
34656index be60399..778b33e8 100644
34657--- a/drivers/acpi/bgrt.c
34658+++ b/drivers/acpi/bgrt.c
34659@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
34660 return -ENODEV;
34661
34662 sysfs_bin_attr_init(&image_attr);
34663- image_attr.private = bgrt_image;
34664- image_attr.size = bgrt_image_size;
34665+ pax_open_kernel();
34666+ *(void **)&image_attr.private = bgrt_image;
34667+ *(size_t *)&image_attr.size = bgrt_image_size;
34668+ pax_close_kernel();
34669
34670 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
34671 if (!bgrt_kobj)
34672diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
34673index cb96296..b81293b 100644
34674--- a/drivers/acpi/blacklist.c
34675+++ b/drivers/acpi/blacklist.c
34676@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
34677 u32 is_critical_error;
34678 };
34679
34680-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
34681+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
34682
34683 /*
34684 * POLICY: If *anything* doesn't work, put it on the blacklist.
34685@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
34686 return 0;
34687 }
34688
34689-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
34690+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
34691 {
34692 .callback = dmi_disable_osi_vista,
34693 .ident = "Fujitsu Siemens",
34694diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
34695index 7586544..636a2f0 100644
34696--- a/drivers/acpi/ec_sys.c
34697+++ b/drivers/acpi/ec_sys.c
34698@@ -12,6 +12,7 @@
34699 #include <linux/acpi.h>
34700 #include <linux/debugfs.h>
34701 #include <linux/module.h>
34702+#include <linux/uaccess.h>
34703 #include "internal.h"
34704
34705 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
34706@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
34707 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
34708 */
34709 unsigned int size = EC_SPACE_SIZE;
34710- u8 *data = (u8 *) buf;
34711+ u8 data;
34712 loff_t init_off = *off;
34713 int err = 0;
34714
34715@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
34716 size = count;
34717
34718 while (size) {
34719- err = ec_read(*off, &data[*off - init_off]);
34720+ err = ec_read(*off, &data);
34721 if (err)
34722 return err;
34723+ if (put_user(data, &buf[*off - init_off]))
34724+ return -EFAULT;
34725 *off += 1;
34726 size--;
34727 }
34728@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
34729
34730 unsigned int size = count;
34731 loff_t init_off = *off;
34732- u8 *data = (u8 *) buf;
34733 int err = 0;
34734
34735 if (*off >= EC_SPACE_SIZE)
34736@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
34737 }
34738
34739 while (size) {
34740- u8 byte_write = data[*off - init_off];
34741+ u8 byte_write;
34742+ if (get_user(byte_write, &buf[*off - init_off]))
34743+ return -EFAULT;
34744 err = ec_write(*off, byte_write);
34745 if (err)
34746 return err;
34747diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
34748index eb133c7..f571552 100644
34749--- a/drivers/acpi/processor_idle.c
34750+++ b/drivers/acpi/processor_idle.c
34751@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
34752 {
34753 int i, count = CPUIDLE_DRIVER_STATE_START;
34754 struct acpi_processor_cx *cx;
34755- struct cpuidle_state *state;
34756+ cpuidle_state_no_const *state;
34757 struct cpuidle_driver *drv = &acpi_idle_driver;
34758
34759 if (!pr->flags.power_setup_done)
34760diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
34761index fcae5fa..e9f71ea 100644
34762--- a/drivers/acpi/sysfs.c
34763+++ b/drivers/acpi/sysfs.c
34764@@ -423,11 +423,11 @@ static u32 num_counters;
34765 static struct attribute **all_attrs;
34766 static u32 acpi_gpe_count;
34767
34768-static struct attribute_group interrupt_stats_attr_group = {
34769+static attribute_group_no_const interrupt_stats_attr_group = {
34770 .name = "interrupts",
34771 };
34772
34773-static struct kobj_attribute *counter_attrs;
34774+static kobj_attribute_no_const *counter_attrs;
34775
34776 static void delete_gpe_attr_array(void)
34777 {
34778diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
34779index 7b9bdd8..37638ca 100644
34780--- a/drivers/ata/libahci.c
34781+++ b/drivers/ata/libahci.c
34782@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
34783 }
34784 EXPORT_SYMBOL_GPL(ahci_kick_engine);
34785
34786-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
34787+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
34788 struct ata_taskfile *tf, int is_cmd, u16 flags,
34789 unsigned long timeout_msec)
34790 {
34791diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
34792index adf002a..39bb8f9 100644
34793--- a/drivers/ata/libata-core.c
34794+++ b/drivers/ata/libata-core.c
34795@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
34796 struct ata_port *ap;
34797 unsigned int tag;
34798
34799- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34800+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34801 ap = qc->ap;
34802
34803 qc->flags = 0;
34804@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
34805 struct ata_port *ap;
34806 struct ata_link *link;
34807
34808- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34809+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34810 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
34811 ap = qc->ap;
34812 link = qc->dev->link;
34813@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34814 return;
34815
34816 spin_lock(&lock);
34817+ pax_open_kernel();
34818
34819 for (cur = ops->inherits; cur; cur = cur->inherits) {
34820 void **inherit = (void **)cur;
34821@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34822 if (IS_ERR(*pp))
34823 *pp = NULL;
34824
34825- ops->inherits = NULL;
34826+ *(struct ata_port_operations **)&ops->inherits = NULL;
34827
34828+ pax_close_kernel();
34829 spin_unlock(&lock);
34830 }
34831
34832diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
34833index 7638121..357a965 100644
34834--- a/drivers/ata/pata_arasan_cf.c
34835+++ b/drivers/ata/pata_arasan_cf.c
34836@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
34837 /* Handle platform specific quirks */
34838 if (quirk) {
34839 if (quirk & CF_BROKEN_PIO) {
34840- ap->ops->set_piomode = NULL;
34841+ pax_open_kernel();
34842+ *(void **)&ap->ops->set_piomode = NULL;
34843+ pax_close_kernel();
34844 ap->pio_mask = 0;
34845 }
34846 if (quirk & CF_BROKEN_MWDMA)
34847diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
34848index f9b983a..887b9d8 100644
34849--- a/drivers/atm/adummy.c
34850+++ b/drivers/atm/adummy.c
34851@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
34852 vcc->pop(vcc, skb);
34853 else
34854 dev_kfree_skb_any(skb);
34855- atomic_inc(&vcc->stats->tx);
34856+ atomic_inc_unchecked(&vcc->stats->tx);
34857
34858 return 0;
34859 }
34860diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
34861index 77a7480d..05cde58 100644
34862--- a/drivers/atm/ambassador.c
34863+++ b/drivers/atm/ambassador.c
34864@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
34865 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
34866
34867 // VC layer stats
34868- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34869+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34870
34871 // free the descriptor
34872 kfree (tx_descr);
34873@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34874 dump_skb ("<<<", vc, skb);
34875
34876 // VC layer stats
34877- atomic_inc(&atm_vcc->stats->rx);
34878+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34879 __net_timestamp(skb);
34880 // end of our responsibility
34881 atm_vcc->push (atm_vcc, skb);
34882@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34883 } else {
34884 PRINTK (KERN_INFO, "dropped over-size frame");
34885 // should we count this?
34886- atomic_inc(&atm_vcc->stats->rx_drop);
34887+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34888 }
34889
34890 } else {
34891@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
34892 }
34893
34894 if (check_area (skb->data, skb->len)) {
34895- atomic_inc(&atm_vcc->stats->tx_err);
34896+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
34897 return -ENOMEM; // ?
34898 }
34899
34900diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
34901index 0e3f8f9..765a7a5 100644
34902--- a/drivers/atm/atmtcp.c
34903+++ b/drivers/atm/atmtcp.c
34904@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34905 if (vcc->pop) vcc->pop(vcc,skb);
34906 else dev_kfree_skb(skb);
34907 if (dev_data) return 0;
34908- atomic_inc(&vcc->stats->tx_err);
34909+ atomic_inc_unchecked(&vcc->stats->tx_err);
34910 return -ENOLINK;
34911 }
34912 size = skb->len+sizeof(struct atmtcp_hdr);
34913@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34914 if (!new_skb) {
34915 if (vcc->pop) vcc->pop(vcc,skb);
34916 else dev_kfree_skb(skb);
34917- atomic_inc(&vcc->stats->tx_err);
34918+ atomic_inc_unchecked(&vcc->stats->tx_err);
34919 return -ENOBUFS;
34920 }
34921 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
34922@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34923 if (vcc->pop) vcc->pop(vcc,skb);
34924 else dev_kfree_skb(skb);
34925 out_vcc->push(out_vcc,new_skb);
34926- atomic_inc(&vcc->stats->tx);
34927- atomic_inc(&out_vcc->stats->rx);
34928+ atomic_inc_unchecked(&vcc->stats->tx);
34929+ atomic_inc_unchecked(&out_vcc->stats->rx);
34930 return 0;
34931 }
34932
34933@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34934 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
34935 read_unlock(&vcc_sklist_lock);
34936 if (!out_vcc) {
34937- atomic_inc(&vcc->stats->tx_err);
34938+ atomic_inc_unchecked(&vcc->stats->tx_err);
34939 goto done;
34940 }
34941 skb_pull(skb,sizeof(struct atmtcp_hdr));
34942@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34943 __net_timestamp(new_skb);
34944 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
34945 out_vcc->push(out_vcc,new_skb);
34946- atomic_inc(&vcc->stats->tx);
34947- atomic_inc(&out_vcc->stats->rx);
34948+ atomic_inc_unchecked(&vcc->stats->tx);
34949+ atomic_inc_unchecked(&out_vcc->stats->rx);
34950 done:
34951 if (vcc->pop) vcc->pop(vcc,skb);
34952 else dev_kfree_skb(skb);
34953diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
34954index b1955ba..b179940 100644
34955--- a/drivers/atm/eni.c
34956+++ b/drivers/atm/eni.c
34957@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
34958 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
34959 vcc->dev->number);
34960 length = 0;
34961- atomic_inc(&vcc->stats->rx_err);
34962+ atomic_inc_unchecked(&vcc->stats->rx_err);
34963 }
34964 else {
34965 length = ATM_CELL_SIZE-1; /* no HEC */
34966@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34967 size);
34968 }
34969 eff = length = 0;
34970- atomic_inc(&vcc->stats->rx_err);
34971+ atomic_inc_unchecked(&vcc->stats->rx_err);
34972 }
34973 else {
34974 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
34975@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34976 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
34977 vcc->dev->number,vcc->vci,length,size << 2,descr);
34978 length = eff = 0;
34979- atomic_inc(&vcc->stats->rx_err);
34980+ atomic_inc_unchecked(&vcc->stats->rx_err);
34981 }
34982 }
34983 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
34984@@ -767,7 +767,7 @@ rx_dequeued++;
34985 vcc->push(vcc,skb);
34986 pushed++;
34987 }
34988- atomic_inc(&vcc->stats->rx);
34989+ atomic_inc_unchecked(&vcc->stats->rx);
34990 }
34991 wake_up(&eni_dev->rx_wait);
34992 }
34993@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
34994 PCI_DMA_TODEVICE);
34995 if (vcc->pop) vcc->pop(vcc,skb);
34996 else dev_kfree_skb_irq(skb);
34997- atomic_inc(&vcc->stats->tx);
34998+ atomic_inc_unchecked(&vcc->stats->tx);
34999 wake_up(&eni_dev->tx_wait);
35000 dma_complete++;
35001 }
35002diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
35003index b41c948..a002b17 100644
35004--- a/drivers/atm/firestream.c
35005+++ b/drivers/atm/firestream.c
35006@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
35007 }
35008 }
35009
35010- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35011+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35012
35013 fs_dprintk (FS_DEBUG_TXMEM, "i");
35014 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35015@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35016 #endif
35017 skb_put (skb, qe->p1 & 0xffff);
35018 ATM_SKB(skb)->vcc = atm_vcc;
35019- atomic_inc(&atm_vcc->stats->rx);
35020+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35021 __net_timestamp(skb);
35022 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35023 atm_vcc->push (atm_vcc, skb);
35024@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35025 kfree (pe);
35026 }
35027 if (atm_vcc)
35028- atomic_inc(&atm_vcc->stats->rx_drop);
35029+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35030 break;
35031 case 0x1f: /* Reassembly abort: no buffers. */
35032 /* Silently increment error counter. */
35033 if (atm_vcc)
35034- atomic_inc(&atm_vcc->stats->rx_drop);
35035+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35036 break;
35037 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35038 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35039diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35040index 204814e..cede831 100644
35041--- a/drivers/atm/fore200e.c
35042+++ b/drivers/atm/fore200e.c
35043@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35044 #endif
35045 /* check error condition */
35046 if (*entry->status & STATUS_ERROR)
35047- atomic_inc(&vcc->stats->tx_err);
35048+ atomic_inc_unchecked(&vcc->stats->tx_err);
35049 else
35050- atomic_inc(&vcc->stats->tx);
35051+ atomic_inc_unchecked(&vcc->stats->tx);
35052 }
35053 }
35054
35055@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35056 if (skb == NULL) {
35057 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35058
35059- atomic_inc(&vcc->stats->rx_drop);
35060+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35061 return -ENOMEM;
35062 }
35063
35064@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35065
35066 dev_kfree_skb_any(skb);
35067
35068- atomic_inc(&vcc->stats->rx_drop);
35069+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35070 return -ENOMEM;
35071 }
35072
35073 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35074
35075 vcc->push(vcc, skb);
35076- atomic_inc(&vcc->stats->rx);
35077+ atomic_inc_unchecked(&vcc->stats->rx);
35078
35079 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35080
35081@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35082 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35083 fore200e->atm_dev->number,
35084 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35085- atomic_inc(&vcc->stats->rx_err);
35086+ atomic_inc_unchecked(&vcc->stats->rx_err);
35087 }
35088 }
35089
35090@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35091 goto retry_here;
35092 }
35093
35094- atomic_inc(&vcc->stats->tx_err);
35095+ atomic_inc_unchecked(&vcc->stats->tx_err);
35096
35097 fore200e->tx_sat++;
35098 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35099diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35100index 507362a..a845e57 100644
35101--- a/drivers/atm/he.c
35102+++ b/drivers/atm/he.c
35103@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35104
35105 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35106 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35107- atomic_inc(&vcc->stats->rx_drop);
35108+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35109 goto return_host_buffers;
35110 }
35111
35112@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35113 RBRQ_LEN_ERR(he_dev->rbrq_head)
35114 ? "LEN_ERR" : "",
35115 vcc->vpi, vcc->vci);
35116- atomic_inc(&vcc->stats->rx_err);
35117+ atomic_inc_unchecked(&vcc->stats->rx_err);
35118 goto return_host_buffers;
35119 }
35120
35121@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35122 vcc->push(vcc, skb);
35123 spin_lock(&he_dev->global_lock);
35124
35125- atomic_inc(&vcc->stats->rx);
35126+ atomic_inc_unchecked(&vcc->stats->rx);
35127
35128 return_host_buffers:
35129 ++pdus_assembled;
35130@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35131 tpd->vcc->pop(tpd->vcc, tpd->skb);
35132 else
35133 dev_kfree_skb_any(tpd->skb);
35134- atomic_inc(&tpd->vcc->stats->tx_err);
35135+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35136 }
35137 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35138 return;
35139@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35140 vcc->pop(vcc, skb);
35141 else
35142 dev_kfree_skb_any(skb);
35143- atomic_inc(&vcc->stats->tx_err);
35144+ atomic_inc_unchecked(&vcc->stats->tx_err);
35145 return -EINVAL;
35146 }
35147
35148@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35149 vcc->pop(vcc, skb);
35150 else
35151 dev_kfree_skb_any(skb);
35152- atomic_inc(&vcc->stats->tx_err);
35153+ atomic_inc_unchecked(&vcc->stats->tx_err);
35154 return -EINVAL;
35155 }
35156 #endif
35157@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35158 vcc->pop(vcc, skb);
35159 else
35160 dev_kfree_skb_any(skb);
35161- atomic_inc(&vcc->stats->tx_err);
35162+ atomic_inc_unchecked(&vcc->stats->tx_err);
35163 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35164 return -ENOMEM;
35165 }
35166@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35167 vcc->pop(vcc, skb);
35168 else
35169 dev_kfree_skb_any(skb);
35170- atomic_inc(&vcc->stats->tx_err);
35171+ atomic_inc_unchecked(&vcc->stats->tx_err);
35172 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35173 return -ENOMEM;
35174 }
35175@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35176 __enqueue_tpd(he_dev, tpd, cid);
35177 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35178
35179- atomic_inc(&vcc->stats->tx);
35180+ atomic_inc_unchecked(&vcc->stats->tx);
35181
35182 return 0;
35183 }
35184diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
35185index 1dc0519..1aadaf7 100644
35186--- a/drivers/atm/horizon.c
35187+++ b/drivers/atm/horizon.c
35188@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
35189 {
35190 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
35191 // VC layer stats
35192- atomic_inc(&vcc->stats->rx);
35193+ atomic_inc_unchecked(&vcc->stats->rx);
35194 __net_timestamp(skb);
35195 // end of our responsibility
35196 vcc->push (vcc, skb);
35197@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
35198 dev->tx_iovec = NULL;
35199
35200 // VC layer stats
35201- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35202+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35203
35204 // free the skb
35205 hrz_kfree_skb (skb);
35206diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
35207index 272f009..a18ba55 100644
35208--- a/drivers/atm/idt77252.c
35209+++ b/drivers/atm/idt77252.c
35210@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
35211 else
35212 dev_kfree_skb(skb);
35213
35214- atomic_inc(&vcc->stats->tx);
35215+ atomic_inc_unchecked(&vcc->stats->tx);
35216 }
35217
35218 atomic_dec(&scq->used);
35219@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35220 if ((sb = dev_alloc_skb(64)) == NULL) {
35221 printk("%s: Can't allocate buffers for aal0.\n",
35222 card->name);
35223- atomic_add(i, &vcc->stats->rx_drop);
35224+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35225 break;
35226 }
35227 if (!atm_charge(vcc, sb->truesize)) {
35228 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
35229 card->name);
35230- atomic_add(i - 1, &vcc->stats->rx_drop);
35231+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
35232 dev_kfree_skb(sb);
35233 break;
35234 }
35235@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35236 ATM_SKB(sb)->vcc = vcc;
35237 __net_timestamp(sb);
35238 vcc->push(vcc, sb);
35239- atomic_inc(&vcc->stats->rx);
35240+ atomic_inc_unchecked(&vcc->stats->rx);
35241
35242 cell += ATM_CELL_PAYLOAD;
35243 }
35244@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35245 "(CDC: %08x)\n",
35246 card->name, len, rpp->len, readl(SAR_REG_CDC));
35247 recycle_rx_pool_skb(card, rpp);
35248- atomic_inc(&vcc->stats->rx_err);
35249+ atomic_inc_unchecked(&vcc->stats->rx_err);
35250 return;
35251 }
35252 if (stat & SAR_RSQE_CRC) {
35253 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
35254 recycle_rx_pool_skb(card, rpp);
35255- atomic_inc(&vcc->stats->rx_err);
35256+ atomic_inc_unchecked(&vcc->stats->rx_err);
35257 return;
35258 }
35259 if (skb_queue_len(&rpp->queue) > 1) {
35260@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35261 RXPRINTK("%s: Can't alloc RX skb.\n",
35262 card->name);
35263 recycle_rx_pool_skb(card, rpp);
35264- atomic_inc(&vcc->stats->rx_err);
35265+ atomic_inc_unchecked(&vcc->stats->rx_err);
35266 return;
35267 }
35268 if (!atm_charge(vcc, skb->truesize)) {
35269@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35270 __net_timestamp(skb);
35271
35272 vcc->push(vcc, skb);
35273- atomic_inc(&vcc->stats->rx);
35274+ atomic_inc_unchecked(&vcc->stats->rx);
35275
35276 return;
35277 }
35278@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35279 __net_timestamp(skb);
35280
35281 vcc->push(vcc, skb);
35282- atomic_inc(&vcc->stats->rx);
35283+ atomic_inc_unchecked(&vcc->stats->rx);
35284
35285 if (skb->truesize > SAR_FB_SIZE_3)
35286 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
35287@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
35288 if (vcc->qos.aal != ATM_AAL0) {
35289 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
35290 card->name, vpi, vci);
35291- atomic_inc(&vcc->stats->rx_drop);
35292+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35293 goto drop;
35294 }
35295
35296 if ((sb = dev_alloc_skb(64)) == NULL) {
35297 printk("%s: Can't allocate buffers for AAL0.\n",
35298 card->name);
35299- atomic_inc(&vcc->stats->rx_err);
35300+ atomic_inc_unchecked(&vcc->stats->rx_err);
35301 goto drop;
35302 }
35303
35304@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
35305 ATM_SKB(sb)->vcc = vcc;
35306 __net_timestamp(sb);
35307 vcc->push(vcc, sb);
35308- atomic_inc(&vcc->stats->rx);
35309+ atomic_inc_unchecked(&vcc->stats->rx);
35310
35311 drop:
35312 skb_pull(queue, 64);
35313@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35314
35315 if (vc == NULL) {
35316 printk("%s: NULL connection in send().\n", card->name);
35317- atomic_inc(&vcc->stats->tx_err);
35318+ atomic_inc_unchecked(&vcc->stats->tx_err);
35319 dev_kfree_skb(skb);
35320 return -EINVAL;
35321 }
35322 if (!test_bit(VCF_TX, &vc->flags)) {
35323 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
35324- atomic_inc(&vcc->stats->tx_err);
35325+ atomic_inc_unchecked(&vcc->stats->tx_err);
35326 dev_kfree_skb(skb);
35327 return -EINVAL;
35328 }
35329@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35330 break;
35331 default:
35332 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
35333- atomic_inc(&vcc->stats->tx_err);
35334+ atomic_inc_unchecked(&vcc->stats->tx_err);
35335 dev_kfree_skb(skb);
35336 return -EINVAL;
35337 }
35338
35339 if (skb_shinfo(skb)->nr_frags != 0) {
35340 printk("%s: No scatter-gather yet.\n", card->name);
35341- atomic_inc(&vcc->stats->tx_err);
35342+ atomic_inc_unchecked(&vcc->stats->tx_err);
35343 dev_kfree_skb(skb);
35344 return -EINVAL;
35345 }
35346@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35347
35348 err = queue_skb(card, vc, skb, oam);
35349 if (err) {
35350- atomic_inc(&vcc->stats->tx_err);
35351+ atomic_inc_unchecked(&vcc->stats->tx_err);
35352 dev_kfree_skb(skb);
35353 return err;
35354 }
35355@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
35356 skb = dev_alloc_skb(64);
35357 if (!skb) {
35358 printk("%s: Out of memory in send_oam().\n", card->name);
35359- atomic_inc(&vcc->stats->tx_err);
35360+ atomic_inc_unchecked(&vcc->stats->tx_err);
35361 return -ENOMEM;
35362 }
35363 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
35364diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
35365index 4217f29..88f547a 100644
35366--- a/drivers/atm/iphase.c
35367+++ b/drivers/atm/iphase.c
35368@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
35369 status = (u_short) (buf_desc_ptr->desc_mode);
35370 if (status & (RX_CER | RX_PTE | RX_OFL))
35371 {
35372- atomic_inc(&vcc->stats->rx_err);
35373+ atomic_inc_unchecked(&vcc->stats->rx_err);
35374 IF_ERR(printk("IA: bad packet, dropping it");)
35375 if (status & RX_CER) {
35376 IF_ERR(printk(" cause: packet CRC error\n");)
35377@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
35378 len = dma_addr - buf_addr;
35379 if (len > iadev->rx_buf_sz) {
35380 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
35381- atomic_inc(&vcc->stats->rx_err);
35382+ atomic_inc_unchecked(&vcc->stats->rx_err);
35383 goto out_free_desc;
35384 }
35385
35386@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35387 ia_vcc = INPH_IA_VCC(vcc);
35388 if (ia_vcc == NULL)
35389 {
35390- atomic_inc(&vcc->stats->rx_err);
35391+ atomic_inc_unchecked(&vcc->stats->rx_err);
35392 atm_return(vcc, skb->truesize);
35393 dev_kfree_skb_any(skb);
35394 goto INCR_DLE;
35395@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35396 if ((length > iadev->rx_buf_sz) || (length >
35397 (skb->len - sizeof(struct cpcs_trailer))))
35398 {
35399- atomic_inc(&vcc->stats->rx_err);
35400+ atomic_inc_unchecked(&vcc->stats->rx_err);
35401 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
35402 length, skb->len);)
35403 atm_return(vcc, skb->truesize);
35404@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35405
35406 IF_RX(printk("rx_dle_intr: skb push");)
35407 vcc->push(vcc,skb);
35408- atomic_inc(&vcc->stats->rx);
35409+ atomic_inc_unchecked(&vcc->stats->rx);
35410 iadev->rx_pkt_cnt++;
35411 }
35412 INCR_DLE:
35413@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
35414 {
35415 struct k_sonet_stats *stats;
35416 stats = &PRIV(_ia_dev[board])->sonet_stats;
35417- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
35418- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
35419- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
35420- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
35421- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
35422- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
35423- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
35424- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
35425- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
35426+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
35427+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
35428+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
35429+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
35430+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
35431+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
35432+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
35433+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
35434+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
35435 }
35436 ia_cmds.status = 0;
35437 break;
35438@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
35439 if ((desc == 0) || (desc > iadev->num_tx_desc))
35440 {
35441 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
35442- atomic_inc(&vcc->stats->tx);
35443+ atomic_inc_unchecked(&vcc->stats->tx);
35444 if (vcc->pop)
35445 vcc->pop(vcc, skb);
35446 else
35447@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
35448 ATM_DESC(skb) = vcc->vci;
35449 skb_queue_tail(&iadev->tx_dma_q, skb);
35450
35451- atomic_inc(&vcc->stats->tx);
35452+ atomic_inc_unchecked(&vcc->stats->tx);
35453 iadev->tx_pkt_cnt++;
35454 /* Increment transaction counter */
35455 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
35456
35457 #if 0
35458 /* add flow control logic */
35459- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
35460+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
35461 if (iavcc->vc_desc_cnt > 10) {
35462 vcc->tx_quota = vcc->tx_quota * 3 / 4;
35463 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
35464diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
35465index fa7d701..1e404c7 100644
35466--- a/drivers/atm/lanai.c
35467+++ b/drivers/atm/lanai.c
35468@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
35469 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
35470 lanai_endtx(lanai, lvcc);
35471 lanai_free_skb(lvcc->tx.atmvcc, skb);
35472- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
35473+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
35474 }
35475
35476 /* Try to fill the buffer - don't call unless there is backlog */
35477@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
35478 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
35479 __net_timestamp(skb);
35480 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
35481- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
35482+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
35483 out:
35484 lvcc->rx.buf.ptr = end;
35485 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
35486@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35487 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
35488 "vcc %d\n", lanai->number, (unsigned int) s, vci);
35489 lanai->stats.service_rxnotaal5++;
35490- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35491+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35492 return 0;
35493 }
35494 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
35495@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35496 int bytes;
35497 read_unlock(&vcc_sklist_lock);
35498 DPRINTK("got trashed rx pdu on vci %d\n", vci);
35499- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35500+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35501 lvcc->stats.x.aal5.service_trash++;
35502 bytes = (SERVICE_GET_END(s) * 16) -
35503 (((unsigned long) lvcc->rx.buf.ptr) -
35504@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35505 }
35506 if (s & SERVICE_STREAM) {
35507 read_unlock(&vcc_sklist_lock);
35508- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35509+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35510 lvcc->stats.x.aal5.service_stream++;
35511 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
35512 "PDU on VCI %d!\n", lanai->number, vci);
35513@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35514 return 0;
35515 }
35516 DPRINTK("got rx crc error on vci %d\n", vci);
35517- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35518+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35519 lvcc->stats.x.aal5.service_rxcrc++;
35520 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
35521 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
35522diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
35523index 6587dc2..149833d 100644
35524--- a/drivers/atm/nicstar.c
35525+++ b/drivers/atm/nicstar.c
35526@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35527 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
35528 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
35529 card->index);
35530- atomic_inc(&vcc->stats->tx_err);
35531+ atomic_inc_unchecked(&vcc->stats->tx_err);
35532 dev_kfree_skb_any(skb);
35533 return -EINVAL;
35534 }
35535@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35536 if (!vc->tx) {
35537 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
35538 card->index);
35539- atomic_inc(&vcc->stats->tx_err);
35540+ atomic_inc_unchecked(&vcc->stats->tx_err);
35541 dev_kfree_skb_any(skb);
35542 return -EINVAL;
35543 }
35544@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35545 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
35546 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
35547 card->index);
35548- atomic_inc(&vcc->stats->tx_err);
35549+ atomic_inc_unchecked(&vcc->stats->tx_err);
35550 dev_kfree_skb_any(skb);
35551 return -EINVAL;
35552 }
35553
35554 if (skb_shinfo(skb)->nr_frags != 0) {
35555 printk("nicstar%d: No scatter-gather yet.\n", card->index);
35556- atomic_inc(&vcc->stats->tx_err);
35557+ atomic_inc_unchecked(&vcc->stats->tx_err);
35558 dev_kfree_skb_any(skb);
35559 return -EINVAL;
35560 }
35561@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35562 }
35563
35564 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
35565- atomic_inc(&vcc->stats->tx_err);
35566+ atomic_inc_unchecked(&vcc->stats->tx_err);
35567 dev_kfree_skb_any(skb);
35568 return -EIO;
35569 }
35570- atomic_inc(&vcc->stats->tx);
35571+ atomic_inc_unchecked(&vcc->stats->tx);
35572
35573 return 0;
35574 }
35575@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35576 printk
35577 ("nicstar%d: Can't allocate buffers for aal0.\n",
35578 card->index);
35579- atomic_add(i, &vcc->stats->rx_drop);
35580+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35581 break;
35582 }
35583 if (!atm_charge(vcc, sb->truesize)) {
35584 RXPRINTK
35585 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
35586 card->index);
35587- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
35588+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
35589 dev_kfree_skb_any(sb);
35590 break;
35591 }
35592@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35593 ATM_SKB(sb)->vcc = vcc;
35594 __net_timestamp(sb);
35595 vcc->push(vcc, sb);
35596- atomic_inc(&vcc->stats->rx);
35597+ atomic_inc_unchecked(&vcc->stats->rx);
35598 cell += ATM_CELL_PAYLOAD;
35599 }
35600
35601@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35602 if (iovb == NULL) {
35603 printk("nicstar%d: Out of iovec buffers.\n",
35604 card->index);
35605- atomic_inc(&vcc->stats->rx_drop);
35606+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35607 recycle_rx_buf(card, skb);
35608 return;
35609 }
35610@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35611 small or large buffer itself. */
35612 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
35613 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
35614- atomic_inc(&vcc->stats->rx_err);
35615+ atomic_inc_unchecked(&vcc->stats->rx_err);
35616 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35617 NS_MAX_IOVECS);
35618 NS_PRV_IOVCNT(iovb) = 0;
35619@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35620 ("nicstar%d: Expected a small buffer, and this is not one.\n",
35621 card->index);
35622 which_list(card, skb);
35623- atomic_inc(&vcc->stats->rx_err);
35624+ atomic_inc_unchecked(&vcc->stats->rx_err);
35625 recycle_rx_buf(card, skb);
35626 vc->rx_iov = NULL;
35627 recycle_iov_buf(card, iovb);
35628@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35629 ("nicstar%d: Expected a large buffer, and this is not one.\n",
35630 card->index);
35631 which_list(card, skb);
35632- atomic_inc(&vcc->stats->rx_err);
35633+ atomic_inc_unchecked(&vcc->stats->rx_err);
35634 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35635 NS_PRV_IOVCNT(iovb));
35636 vc->rx_iov = NULL;
35637@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35638 printk(" - PDU size mismatch.\n");
35639 else
35640 printk(".\n");
35641- atomic_inc(&vcc->stats->rx_err);
35642+ atomic_inc_unchecked(&vcc->stats->rx_err);
35643 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35644 NS_PRV_IOVCNT(iovb));
35645 vc->rx_iov = NULL;
35646@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35647 /* skb points to a small buffer */
35648 if (!atm_charge(vcc, skb->truesize)) {
35649 push_rxbufs(card, skb);
35650- atomic_inc(&vcc->stats->rx_drop);
35651+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35652 } else {
35653 skb_put(skb, len);
35654 dequeue_sm_buf(card, skb);
35655@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35656 ATM_SKB(skb)->vcc = vcc;
35657 __net_timestamp(skb);
35658 vcc->push(vcc, skb);
35659- atomic_inc(&vcc->stats->rx);
35660+ atomic_inc_unchecked(&vcc->stats->rx);
35661 }
35662 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
35663 struct sk_buff *sb;
35664@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35665 if (len <= NS_SMBUFSIZE) {
35666 if (!atm_charge(vcc, sb->truesize)) {
35667 push_rxbufs(card, sb);
35668- atomic_inc(&vcc->stats->rx_drop);
35669+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35670 } else {
35671 skb_put(sb, len);
35672 dequeue_sm_buf(card, sb);
35673@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35674 ATM_SKB(sb)->vcc = vcc;
35675 __net_timestamp(sb);
35676 vcc->push(vcc, sb);
35677- atomic_inc(&vcc->stats->rx);
35678+ atomic_inc_unchecked(&vcc->stats->rx);
35679 }
35680
35681 push_rxbufs(card, skb);
35682@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35683
35684 if (!atm_charge(vcc, skb->truesize)) {
35685 push_rxbufs(card, skb);
35686- atomic_inc(&vcc->stats->rx_drop);
35687+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35688 } else {
35689 dequeue_lg_buf(card, skb);
35690 #ifdef NS_USE_DESTRUCTORS
35691@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35692 ATM_SKB(skb)->vcc = vcc;
35693 __net_timestamp(skb);
35694 vcc->push(vcc, skb);
35695- atomic_inc(&vcc->stats->rx);
35696+ atomic_inc_unchecked(&vcc->stats->rx);
35697 }
35698
35699 push_rxbufs(card, sb);
35700@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35701 printk
35702 ("nicstar%d: Out of huge buffers.\n",
35703 card->index);
35704- atomic_inc(&vcc->stats->rx_drop);
35705+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35706 recycle_iovec_rx_bufs(card,
35707 (struct iovec *)
35708 iovb->data,
35709@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35710 card->hbpool.count++;
35711 } else
35712 dev_kfree_skb_any(hb);
35713- atomic_inc(&vcc->stats->rx_drop);
35714+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35715 } else {
35716 /* Copy the small buffer to the huge buffer */
35717 sb = (struct sk_buff *)iov->iov_base;
35718@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35719 #endif /* NS_USE_DESTRUCTORS */
35720 __net_timestamp(hb);
35721 vcc->push(vcc, hb);
35722- atomic_inc(&vcc->stats->rx);
35723+ atomic_inc_unchecked(&vcc->stats->rx);
35724 }
35725 }
35726
35727diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
35728index 32784d1..4a8434a 100644
35729--- a/drivers/atm/solos-pci.c
35730+++ b/drivers/atm/solos-pci.c
35731@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
35732 }
35733 atm_charge(vcc, skb->truesize);
35734 vcc->push(vcc, skb);
35735- atomic_inc(&vcc->stats->rx);
35736+ atomic_inc_unchecked(&vcc->stats->rx);
35737 break;
35738
35739 case PKT_STATUS:
35740@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
35741 vcc = SKB_CB(oldskb)->vcc;
35742
35743 if (vcc) {
35744- atomic_inc(&vcc->stats->tx);
35745+ atomic_inc_unchecked(&vcc->stats->tx);
35746 solos_pop(vcc, oldskb);
35747 } else {
35748 dev_kfree_skb_irq(oldskb);
35749diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
35750index 0215934..ce9f5b1 100644
35751--- a/drivers/atm/suni.c
35752+++ b/drivers/atm/suni.c
35753@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
35754
35755
35756 #define ADD_LIMITED(s,v) \
35757- atomic_add((v),&stats->s); \
35758- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
35759+ atomic_add_unchecked((v),&stats->s); \
35760+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
35761
35762
35763 static void suni_hz(unsigned long from_timer)
35764diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
35765index 5120a96..e2572bd 100644
35766--- a/drivers/atm/uPD98402.c
35767+++ b/drivers/atm/uPD98402.c
35768@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
35769 struct sonet_stats tmp;
35770 int error = 0;
35771
35772- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
35773+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
35774 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
35775 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
35776 if (zero && !error) {
35777@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
35778
35779
35780 #define ADD_LIMITED(s,v) \
35781- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
35782- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
35783- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
35784+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
35785+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
35786+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
35787
35788
35789 static void stat_event(struct atm_dev *dev)
35790@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
35791 if (reason & uPD98402_INT_PFM) stat_event(dev);
35792 if (reason & uPD98402_INT_PCO) {
35793 (void) GET(PCOCR); /* clear interrupt cause */
35794- atomic_add(GET(HECCT),
35795+ atomic_add_unchecked(GET(HECCT),
35796 &PRIV(dev)->sonet_stats.uncorr_hcs);
35797 }
35798 if ((reason & uPD98402_INT_RFO) &&
35799@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
35800 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
35801 uPD98402_INT_LOS),PIMR); /* enable them */
35802 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
35803- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35804- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
35805- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
35806+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35807+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
35808+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
35809 return 0;
35810 }
35811
35812diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
35813index 969c3c2..9b72956 100644
35814--- a/drivers/atm/zatm.c
35815+++ b/drivers/atm/zatm.c
35816@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35817 }
35818 if (!size) {
35819 dev_kfree_skb_irq(skb);
35820- if (vcc) atomic_inc(&vcc->stats->rx_err);
35821+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
35822 continue;
35823 }
35824 if (!atm_charge(vcc,skb->truesize)) {
35825@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35826 skb->len = size;
35827 ATM_SKB(skb)->vcc = vcc;
35828 vcc->push(vcc,skb);
35829- atomic_inc(&vcc->stats->rx);
35830+ atomic_inc_unchecked(&vcc->stats->rx);
35831 }
35832 zout(pos & 0xffff,MTA(mbx));
35833 #if 0 /* probably a stupid idea */
35834@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
35835 skb_queue_head(&zatm_vcc->backlog,skb);
35836 break;
35837 }
35838- atomic_inc(&vcc->stats->tx);
35839+ atomic_inc_unchecked(&vcc->stats->tx);
35840 wake_up(&zatm_vcc->tx_wait);
35841 }
35842
35843diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
35844index d78b204..ecc1929 100644
35845--- a/drivers/base/attribute_container.c
35846+++ b/drivers/base/attribute_container.c
35847@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
35848 ic->classdev.parent = get_device(dev);
35849 ic->classdev.class = cont->class;
35850 cont->class->dev_release = attribute_container_release;
35851- dev_set_name(&ic->classdev, dev_name(dev));
35852+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
35853 if (fn)
35854 fn(cont, dev, &ic->classdev);
35855 else
35856diff --git a/drivers/base/bus.c b/drivers/base/bus.c
35857index d414331..b4dd4ba 100644
35858--- a/drivers/base/bus.c
35859+++ b/drivers/base/bus.c
35860@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
35861 return -EINVAL;
35862
35863 mutex_lock(&subsys->p->mutex);
35864- list_add_tail(&sif->node, &subsys->p->interfaces);
35865+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
35866 if (sif->add_dev) {
35867 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35868 while ((dev = subsys_dev_iter_next(&iter)))
35869@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
35870 subsys = sif->subsys;
35871
35872 mutex_lock(&subsys->p->mutex);
35873- list_del_init(&sif->node);
35874+ pax_list_del_init((struct list_head *)&sif->node);
35875 if (sif->remove_dev) {
35876 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35877 while ((dev = subsys_dev_iter_next(&iter)))
35878diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
35879index 7413d06..79155fa 100644
35880--- a/drivers/base/devtmpfs.c
35881+++ b/drivers/base/devtmpfs.c
35882@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
35883 if (!thread)
35884 return 0;
35885
35886- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
35887+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
35888 if (err)
35889 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
35890 else
35891@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
35892 *err = sys_unshare(CLONE_NEWNS);
35893 if (*err)
35894 goto out;
35895- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
35896+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
35897 if (*err)
35898 goto out;
35899- sys_chdir("/.."); /* will traverse into overmounted root */
35900- sys_chroot(".");
35901+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
35902+ sys_chroot((char __force_user *)".");
35903 complete(&setup_done);
35904 while (1) {
35905 spin_lock(&req_lock);
35906diff --git a/drivers/base/node.c b/drivers/base/node.c
35907index 7616a77c..8f57f51 100644
35908--- a/drivers/base/node.c
35909+++ b/drivers/base/node.c
35910@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
35911 struct node_attr {
35912 struct device_attribute attr;
35913 enum node_states state;
35914-};
35915+} __do_const;
35916
35917 static ssize_t show_node_state(struct device *dev,
35918 struct device_attribute *attr, char *buf)
35919diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
35920index 7072404..76dcebd 100644
35921--- a/drivers/base/power/domain.c
35922+++ b/drivers/base/power/domain.c
35923@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
35924 {
35925 struct cpuidle_driver *cpuidle_drv;
35926 struct gpd_cpu_data *cpu_data;
35927- struct cpuidle_state *idle_state;
35928+ cpuidle_state_no_const *idle_state;
35929 int ret = 0;
35930
35931 if (IS_ERR_OR_NULL(genpd) || state < 0)
35932@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
35933 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
35934 {
35935 struct gpd_cpu_data *cpu_data;
35936- struct cpuidle_state *idle_state;
35937+ cpuidle_state_no_const *idle_state;
35938 int ret = 0;
35939
35940 if (IS_ERR_OR_NULL(genpd))
35941diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
35942index a53ebd2..8f73eeb 100644
35943--- a/drivers/base/power/sysfs.c
35944+++ b/drivers/base/power/sysfs.c
35945@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
35946 return -EIO;
35947 }
35948 }
35949- return sprintf(buf, p);
35950+ return sprintf(buf, "%s", p);
35951 }
35952
35953 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
35954diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
35955index 79715e7..df06b3b 100644
35956--- a/drivers/base/power/wakeup.c
35957+++ b/drivers/base/power/wakeup.c
35958@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
35959 * They need to be modified together atomically, so it's better to use one
35960 * atomic variable to hold them both.
35961 */
35962-static atomic_t combined_event_count = ATOMIC_INIT(0);
35963+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
35964
35965 #define IN_PROGRESS_BITS (sizeof(int) * 4)
35966 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
35967
35968 static void split_counters(unsigned int *cnt, unsigned int *inpr)
35969 {
35970- unsigned int comb = atomic_read(&combined_event_count);
35971+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
35972
35973 *cnt = (comb >> IN_PROGRESS_BITS);
35974 *inpr = comb & MAX_IN_PROGRESS;
35975@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
35976 ws->start_prevent_time = ws->last_time;
35977
35978 /* Increment the counter of events in progress. */
35979- cec = atomic_inc_return(&combined_event_count);
35980+ cec = atomic_inc_return_unchecked(&combined_event_count);
35981
35982 trace_wakeup_source_activate(ws->name, cec);
35983 }
35984@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
35985 * Increment the counter of registered wakeup events and decrement the
35986 * couter of wakeup events in progress simultaneously.
35987 */
35988- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
35989+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
35990 trace_wakeup_source_deactivate(ws->name, cec);
35991
35992 split_counters(&cnt, &inpr);
35993diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
35994index e8d11b6..7b1b36f 100644
35995--- a/drivers/base/syscore.c
35996+++ b/drivers/base/syscore.c
35997@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
35998 void register_syscore_ops(struct syscore_ops *ops)
35999 {
36000 mutex_lock(&syscore_ops_lock);
36001- list_add_tail(&ops->node, &syscore_ops_list);
36002+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
36003 mutex_unlock(&syscore_ops_lock);
36004 }
36005 EXPORT_SYMBOL_GPL(register_syscore_ops);
36006@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
36007 void unregister_syscore_ops(struct syscore_ops *ops)
36008 {
36009 mutex_lock(&syscore_ops_lock);
36010- list_del(&ops->node);
36011+ pax_list_del((struct list_head *)&ops->node);
36012 mutex_unlock(&syscore_ops_lock);
36013 }
36014 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36015diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36016index 62b6c2c..4a11354 100644
36017--- a/drivers/block/cciss.c
36018+++ b/drivers/block/cciss.c
36019@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
36020 int err;
36021 u32 cp;
36022
36023+ memset(&arg64, 0, sizeof(arg64));
36024+
36025 err = 0;
36026 err |=
36027 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
36028@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
36029 while (!list_empty(&h->reqQ)) {
36030 c = list_entry(h->reqQ.next, CommandList_struct, list);
36031 /* can't do anything if fifo is full */
36032- if ((h->access.fifo_full(h))) {
36033+ if ((h->access->fifo_full(h))) {
36034 dev_warn(&h->pdev->dev, "fifo full\n");
36035 break;
36036 }
36037@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
36038 h->Qdepth--;
36039
36040 /* Tell the controller execute command */
36041- h->access.submit_command(h, c);
36042+ h->access->submit_command(h, c);
36043
36044 /* Put job onto the completed Q */
36045 addQ(&h->cmpQ, c);
36046@@ -3446,17 +3448,17 @@ startio:
36047
36048 static inline unsigned long get_next_completion(ctlr_info_t *h)
36049 {
36050- return h->access.command_completed(h);
36051+ return h->access->command_completed(h);
36052 }
36053
36054 static inline int interrupt_pending(ctlr_info_t *h)
36055 {
36056- return h->access.intr_pending(h);
36057+ return h->access->intr_pending(h);
36058 }
36059
36060 static inline long interrupt_not_for_us(ctlr_info_t *h)
36061 {
36062- return ((h->access.intr_pending(h) == 0) ||
36063+ return ((h->access->intr_pending(h) == 0) ||
36064 (h->interrupts_enabled == 0));
36065 }
36066
36067@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
36068 u32 a;
36069
36070 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36071- return h->access.command_completed(h);
36072+ return h->access->command_completed(h);
36073
36074 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36075 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36076@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36077 trans_support & CFGTBL_Trans_use_short_tags);
36078
36079 /* Change the access methods to the performant access methods */
36080- h->access = SA5_performant_access;
36081+ h->access = &SA5_performant_access;
36082 h->transMethod = CFGTBL_Trans_Performant;
36083
36084 return;
36085@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36086 if (prod_index < 0)
36087 return -ENODEV;
36088 h->product_name = products[prod_index].product_name;
36089- h->access = *(products[prod_index].access);
36090+ h->access = products[prod_index].access;
36091
36092 if (cciss_board_disabled(h)) {
36093 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36094@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
36095 }
36096
36097 /* make sure the board interrupts are off */
36098- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36099+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36100 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36101 if (rc)
36102 goto clean2;
36103@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
36104 * fake ones to scoop up any residual completions.
36105 */
36106 spin_lock_irqsave(&h->lock, flags);
36107- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36108+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36109 spin_unlock_irqrestore(&h->lock, flags);
36110 free_irq(h->intr[h->intr_mode], h);
36111 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36112@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
36113 dev_info(&h->pdev->dev, "Board READY.\n");
36114 dev_info(&h->pdev->dev,
36115 "Waiting for stale completions to drain.\n");
36116- h->access.set_intr_mask(h, CCISS_INTR_ON);
36117+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36118 msleep(10000);
36119- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36120+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36121
36122 rc = controller_reset_failed(h->cfgtable);
36123 if (rc)
36124@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
36125 cciss_scsi_setup(h);
36126
36127 /* Turn the interrupts on so we can service requests */
36128- h->access.set_intr_mask(h, CCISS_INTR_ON);
36129+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36130
36131 /* Get the firmware version */
36132 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36133@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36134 kfree(flush_buf);
36135 if (return_code != IO_OK)
36136 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36137- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36138+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36139 free_irq(h->intr[h->intr_mode], h);
36140 }
36141
36142diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36143index 7fda30e..eb5dfe0 100644
36144--- a/drivers/block/cciss.h
36145+++ b/drivers/block/cciss.h
36146@@ -101,7 +101,7 @@ struct ctlr_info
36147 /* information about each logical volume */
36148 drive_info_struct *drv[CISS_MAX_LUN];
36149
36150- struct access_method access;
36151+ struct access_method *access;
36152
36153 /* queue and queue Info */
36154 struct list_head reqQ;
36155diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
36156index 639d26b..fd6ad1f 100644
36157--- a/drivers/block/cpqarray.c
36158+++ b/drivers/block/cpqarray.c
36159@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36160 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
36161 goto Enomem4;
36162 }
36163- hba[i]->access.set_intr_mask(hba[i], 0);
36164+ hba[i]->access->set_intr_mask(hba[i], 0);
36165 if (request_irq(hba[i]->intr, do_ida_intr,
36166 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
36167 {
36168@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36169 add_timer(&hba[i]->timer);
36170
36171 /* Enable IRQ now that spinlock and rate limit timer are set up */
36172- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36173+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36174
36175 for(j=0; j<NWD; j++) {
36176 struct gendisk *disk = ida_gendisk[i][j];
36177@@ -694,7 +694,7 @@ DBGINFO(
36178 for(i=0; i<NR_PRODUCTS; i++) {
36179 if (board_id == products[i].board_id) {
36180 c->product_name = products[i].product_name;
36181- c->access = *(products[i].access);
36182+ c->access = products[i].access;
36183 break;
36184 }
36185 }
36186@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
36187 hba[ctlr]->intr = intr;
36188 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
36189 hba[ctlr]->product_name = products[j].product_name;
36190- hba[ctlr]->access = *(products[j].access);
36191+ hba[ctlr]->access = products[j].access;
36192 hba[ctlr]->ctlr = ctlr;
36193 hba[ctlr]->board_id = board_id;
36194 hba[ctlr]->pci_dev = NULL; /* not PCI */
36195@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
36196
36197 while((c = h->reqQ) != NULL) {
36198 /* Can't do anything if we're busy */
36199- if (h->access.fifo_full(h) == 0)
36200+ if (h->access->fifo_full(h) == 0)
36201 return;
36202
36203 /* Get the first entry from the request Q */
36204@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
36205 h->Qdepth--;
36206
36207 /* Tell the controller to do our bidding */
36208- h->access.submit_command(h, c);
36209+ h->access->submit_command(h, c);
36210
36211 /* Get onto the completion Q */
36212 addQ(&h->cmpQ, c);
36213@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36214 unsigned long flags;
36215 __u32 a,a1;
36216
36217- istat = h->access.intr_pending(h);
36218+ istat = h->access->intr_pending(h);
36219 /* Is this interrupt for us? */
36220 if (istat == 0)
36221 return IRQ_NONE;
36222@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36223 */
36224 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
36225 if (istat & FIFO_NOT_EMPTY) {
36226- while((a = h->access.command_completed(h))) {
36227+ while((a = h->access->command_completed(h))) {
36228 a1 = a; a &= ~3;
36229 if ((c = h->cmpQ) == NULL)
36230 {
36231@@ -1193,6 +1193,7 @@ out_passthru:
36232 ida_pci_info_struct pciinfo;
36233
36234 if (!arg) return -EINVAL;
36235+ memset(&pciinfo, 0, sizeof(pciinfo));
36236 pciinfo.bus = host->pci_dev->bus->number;
36237 pciinfo.dev_fn = host->pci_dev->devfn;
36238 pciinfo.board_id = host->board_id;
36239@@ -1447,11 +1448,11 @@ static int sendcmd(
36240 /*
36241 * Disable interrupt
36242 */
36243- info_p->access.set_intr_mask(info_p, 0);
36244+ info_p->access->set_intr_mask(info_p, 0);
36245 /* Make sure there is room in the command FIFO */
36246 /* Actually it should be completely empty at this time. */
36247 for (i = 200000; i > 0; i--) {
36248- temp = info_p->access.fifo_full(info_p);
36249+ temp = info_p->access->fifo_full(info_p);
36250 if (temp != 0) {
36251 break;
36252 }
36253@@ -1464,7 +1465,7 @@ DBG(
36254 /*
36255 * Send the cmd
36256 */
36257- info_p->access.submit_command(info_p, c);
36258+ info_p->access->submit_command(info_p, c);
36259 complete = pollcomplete(ctlr);
36260
36261 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
36262@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
36263 * we check the new geometry. Then turn interrupts back on when
36264 * we're done.
36265 */
36266- host->access.set_intr_mask(host, 0);
36267+ host->access->set_intr_mask(host, 0);
36268 getgeometry(ctlr);
36269- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
36270+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
36271
36272 for(i=0; i<NWD; i++) {
36273 struct gendisk *disk = ida_gendisk[ctlr][i];
36274@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
36275 /* Wait (up to 2 seconds) for a command to complete */
36276
36277 for (i = 200000; i > 0; i--) {
36278- done = hba[ctlr]->access.command_completed(hba[ctlr]);
36279+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
36280 if (done == 0) {
36281 udelay(10); /* a short fixed delay */
36282 } else
36283diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
36284index be73e9d..7fbf140 100644
36285--- a/drivers/block/cpqarray.h
36286+++ b/drivers/block/cpqarray.h
36287@@ -99,7 +99,7 @@ struct ctlr_info {
36288 drv_info_t drv[NWD];
36289 struct proc_dir_entry *proc;
36290
36291- struct access_method access;
36292+ struct access_method *access;
36293
36294 cmdlist_t *reqQ;
36295 cmdlist_t *cmpQ;
36296diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
36297index f943aac..99bfd19 100644
36298--- a/drivers/block/drbd/drbd_int.h
36299+++ b/drivers/block/drbd/drbd_int.h
36300@@ -582,7 +582,7 @@ struct drbd_epoch {
36301 struct drbd_tconn *tconn;
36302 struct list_head list;
36303 unsigned int barrier_nr;
36304- atomic_t epoch_size; /* increased on every request added. */
36305+ atomic_unchecked_t epoch_size; /* increased on every request added. */
36306 atomic_t active; /* increased on every req. added, and dec on every finished. */
36307 unsigned long flags;
36308 };
36309@@ -1021,7 +1021,7 @@ struct drbd_conf {
36310 unsigned int al_tr_number;
36311 int al_tr_cycle;
36312 wait_queue_head_t seq_wait;
36313- atomic_t packet_seq;
36314+ atomic_unchecked_t packet_seq;
36315 unsigned int peer_seq;
36316 spinlock_t peer_seq_lock;
36317 unsigned int minor;
36318@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
36319 char __user *uoptval;
36320 int err;
36321
36322- uoptval = (char __user __force *)optval;
36323+ uoptval = (char __force_user *)optval;
36324
36325 set_fs(KERNEL_DS);
36326 if (level == SOL_SOCKET)
36327diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
36328index a5dca6a..bb27967 100644
36329--- a/drivers/block/drbd/drbd_main.c
36330+++ b/drivers/block/drbd/drbd_main.c
36331@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
36332 p->sector = sector;
36333 p->block_id = block_id;
36334 p->blksize = blksize;
36335- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36336+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36337 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
36338 }
36339
36340@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
36341 return -EIO;
36342 p->sector = cpu_to_be64(req->i.sector);
36343 p->block_id = (unsigned long)req;
36344- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36345+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36346 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
36347 if (mdev->state.conn >= C_SYNC_SOURCE &&
36348 mdev->state.conn <= C_PAUSED_SYNC_T)
36349@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
36350 {
36351 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
36352
36353- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
36354- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
36355+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
36356+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
36357 kfree(tconn->current_epoch);
36358
36359 idr_destroy(&tconn->volumes);
36360diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
36361index 4222aff..1f79506 100644
36362--- a/drivers/block/drbd/drbd_receiver.c
36363+++ b/drivers/block/drbd/drbd_receiver.c
36364@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
36365 {
36366 int err;
36367
36368- atomic_set(&mdev->packet_seq, 0);
36369+ atomic_set_unchecked(&mdev->packet_seq, 0);
36370 mdev->peer_seq = 0;
36371
36372 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
36373@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36374 do {
36375 next_epoch = NULL;
36376
36377- epoch_size = atomic_read(&epoch->epoch_size);
36378+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
36379
36380 switch (ev & ~EV_CLEANUP) {
36381 case EV_PUT:
36382@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36383 rv = FE_DESTROYED;
36384 } else {
36385 epoch->flags = 0;
36386- atomic_set(&epoch->epoch_size, 0);
36387+ atomic_set_unchecked(&epoch->epoch_size, 0);
36388 /* atomic_set(&epoch->active, 0); is already zero */
36389 if (rv == FE_STILL_LIVE)
36390 rv = FE_RECYCLED;
36391@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36392 conn_wait_active_ee_empty(tconn);
36393 drbd_flush(tconn);
36394
36395- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36396+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36397 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
36398 if (epoch)
36399 break;
36400@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36401 }
36402
36403 epoch->flags = 0;
36404- atomic_set(&epoch->epoch_size, 0);
36405+ atomic_set_unchecked(&epoch->epoch_size, 0);
36406 atomic_set(&epoch->active, 0);
36407
36408 spin_lock(&tconn->epoch_lock);
36409- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36410+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36411 list_add(&epoch->list, &tconn->current_epoch->list);
36412 tconn->current_epoch = epoch;
36413 tconn->epochs++;
36414@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36415
36416 err = wait_for_and_update_peer_seq(mdev, peer_seq);
36417 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
36418- atomic_inc(&tconn->current_epoch->epoch_size);
36419+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
36420 err2 = drbd_drain_block(mdev, pi->size);
36421 if (!err)
36422 err = err2;
36423@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36424
36425 spin_lock(&tconn->epoch_lock);
36426 peer_req->epoch = tconn->current_epoch;
36427- atomic_inc(&peer_req->epoch->epoch_size);
36428+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
36429 atomic_inc(&peer_req->epoch->active);
36430 spin_unlock(&tconn->epoch_lock);
36431
36432@@ -4347,7 +4347,7 @@ struct data_cmd {
36433 int expect_payload;
36434 size_t pkt_size;
36435 int (*fn)(struct drbd_tconn *, struct packet_info *);
36436-};
36437+} __do_const;
36438
36439 static struct data_cmd drbd_cmd_handler[] = {
36440 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
36441@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
36442 if (!list_empty(&tconn->current_epoch->list))
36443 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
36444 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
36445- atomic_set(&tconn->current_epoch->epoch_size, 0);
36446+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
36447 tconn->send.seen_any_write_yet = false;
36448
36449 conn_info(tconn, "Connection closed\n");
36450@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
36451 struct asender_cmd {
36452 size_t pkt_size;
36453 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
36454-};
36455+} __do_const;
36456
36457 static struct asender_cmd asender_tbl[] = {
36458 [P_PING] = { 0, got_Ping },
36459diff --git a/drivers/block/loop.c b/drivers/block/loop.c
36460index d92d50f..a7e9d97 100644
36461--- a/drivers/block/loop.c
36462+++ b/drivers/block/loop.c
36463@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
36464
36465 file_start_write(file);
36466 set_fs(get_ds());
36467- bw = file->f_op->write(file, buf, len, &pos);
36468+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
36469 set_fs(old_fs);
36470 file_end_write(file);
36471 if (likely(bw == len))
36472diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
36473index f5d0ea1..c62380a 100644
36474--- a/drivers/block/pktcdvd.c
36475+++ b/drivers/block/pktcdvd.c
36476@@ -84,7 +84,7 @@
36477 #define MAX_SPEED 0xffff
36478
36479 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
36480- ~(sector_t)((pd)->settings.size - 1))
36481+ ~(sector_t)((pd)->settings.size - 1UL))
36482
36483 static DEFINE_MUTEX(pktcdvd_mutex);
36484 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
36485diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
36486index 8a3aff7..d7538c2 100644
36487--- a/drivers/cdrom/cdrom.c
36488+++ b/drivers/cdrom/cdrom.c
36489@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
36490 ENSURE(reset, CDC_RESET);
36491 ENSURE(generic_packet, CDC_GENERIC_PACKET);
36492 cdi->mc_flags = 0;
36493- cdo->n_minors = 0;
36494 cdi->options = CDO_USE_FFLAGS;
36495
36496 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
36497@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
36498 else
36499 cdi->cdda_method = CDDA_OLD;
36500
36501- if (!cdo->generic_packet)
36502- cdo->generic_packet = cdrom_dummy_generic_packet;
36503+ if (!cdo->generic_packet) {
36504+ pax_open_kernel();
36505+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
36506+ pax_close_kernel();
36507+ }
36508
36509 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
36510 mutex_lock(&cdrom_mutex);
36511@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
36512 if (cdi->exit)
36513 cdi->exit(cdi);
36514
36515- cdi->ops->n_minors--;
36516 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
36517 }
36518
36519@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
36520 */
36521 nr = nframes;
36522 do {
36523- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
36524+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
36525 if (cgc.buffer)
36526 break;
36527
36528@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
36529 struct cdrom_device_info *cdi;
36530 int ret;
36531
36532- ret = scnprintf(info + *pos, max_size - *pos, header);
36533+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
36534 if (!ret)
36535 return 1;
36536
36537diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
36538index 4afcb65..a68a32d 100644
36539--- a/drivers/cdrom/gdrom.c
36540+++ b/drivers/cdrom/gdrom.c
36541@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
36542 .audio_ioctl = gdrom_audio_ioctl,
36543 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
36544 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
36545- .n_minors = 1,
36546 };
36547
36548 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
36549diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
36550index 3bb6fa3..34013fb 100644
36551--- a/drivers/char/Kconfig
36552+++ b/drivers/char/Kconfig
36553@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
36554
36555 config DEVKMEM
36556 bool "/dev/kmem virtual device support"
36557- default y
36558+ default n
36559+ depends on !GRKERNSEC_KMEM
36560 help
36561 Say Y here if you want to support the /dev/kmem device. The
36562 /dev/kmem device is rarely used, but can be used for certain
36563@@ -582,6 +583,7 @@ config DEVPORT
36564 bool
36565 depends on !M68K
36566 depends on ISA || PCI
36567+ depends on !GRKERNSEC_KMEM
36568 default y
36569
36570 source "drivers/s390/char/Kconfig"
36571diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
36572index a48e05b..6bac831 100644
36573--- a/drivers/char/agp/compat_ioctl.c
36574+++ b/drivers/char/agp/compat_ioctl.c
36575@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
36576 return -ENOMEM;
36577 }
36578
36579- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
36580+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
36581 sizeof(*usegment) * ureserve.seg_count)) {
36582 kfree(usegment);
36583 kfree(ksegment);
36584diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
36585index 2e04433..771f2cc 100644
36586--- a/drivers/char/agp/frontend.c
36587+++ b/drivers/char/agp/frontend.c
36588@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
36589 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
36590 return -EFAULT;
36591
36592- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
36593+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
36594 return -EFAULT;
36595
36596 client = agp_find_client_by_pid(reserve.pid);
36597@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
36598 if (segment == NULL)
36599 return -ENOMEM;
36600
36601- if (copy_from_user(segment, (void __user *) reserve.seg_list,
36602+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
36603 sizeof(struct agp_segment) * reserve.seg_count)) {
36604 kfree(segment);
36605 return -EFAULT;
36606diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
36607index 4f94375..413694e 100644
36608--- a/drivers/char/genrtc.c
36609+++ b/drivers/char/genrtc.c
36610@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
36611 switch (cmd) {
36612
36613 case RTC_PLL_GET:
36614+ memset(&pll, 0, sizeof(pll));
36615 if (get_rtc_pll(&pll))
36616 return -EINVAL;
36617 else
36618diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
36619index d784650..e8bfd69 100644
36620--- a/drivers/char/hpet.c
36621+++ b/drivers/char/hpet.c
36622@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
36623 }
36624
36625 static int
36626-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
36627+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
36628 struct hpet_info *info)
36629 {
36630 struct hpet_timer __iomem *timer;
36631diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
36632index 86fe45c..c0ea948 100644
36633--- a/drivers/char/hw_random/intel-rng.c
36634+++ b/drivers/char/hw_random/intel-rng.c
36635@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
36636
36637 if (no_fwh_detect)
36638 return -ENODEV;
36639- printk(warning);
36640+ printk("%s", warning);
36641 return -EBUSY;
36642 }
36643
36644diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
36645index 4445fa1..7c6de37 100644
36646--- a/drivers/char/ipmi/ipmi_msghandler.c
36647+++ b/drivers/char/ipmi/ipmi_msghandler.c
36648@@ -420,7 +420,7 @@ struct ipmi_smi {
36649 struct proc_dir_entry *proc_dir;
36650 char proc_dir_name[10];
36651
36652- atomic_t stats[IPMI_NUM_STATS];
36653+ atomic_unchecked_t stats[IPMI_NUM_STATS];
36654
36655 /*
36656 * run_to_completion duplicate of smb_info, smi_info
36657@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
36658
36659
36660 #define ipmi_inc_stat(intf, stat) \
36661- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
36662+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
36663 #define ipmi_get_stat(intf, stat) \
36664- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
36665+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
36666
36667 static int is_lan_addr(struct ipmi_addr *addr)
36668 {
36669@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
36670 INIT_LIST_HEAD(&intf->cmd_rcvrs);
36671 init_waitqueue_head(&intf->waitq);
36672 for (i = 0; i < IPMI_NUM_STATS; i++)
36673- atomic_set(&intf->stats[i], 0);
36674+ atomic_set_unchecked(&intf->stats[i], 0);
36675
36676 intf->proc_dir = NULL;
36677
36678diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
36679index af4b23f..79806fc 100644
36680--- a/drivers/char/ipmi/ipmi_si_intf.c
36681+++ b/drivers/char/ipmi/ipmi_si_intf.c
36682@@ -275,7 +275,7 @@ struct smi_info {
36683 unsigned char slave_addr;
36684
36685 /* Counters and things for the proc filesystem. */
36686- atomic_t stats[SI_NUM_STATS];
36687+ atomic_unchecked_t stats[SI_NUM_STATS];
36688
36689 struct task_struct *thread;
36690
36691@@ -284,9 +284,9 @@ struct smi_info {
36692 };
36693
36694 #define smi_inc_stat(smi, stat) \
36695- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
36696+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
36697 #define smi_get_stat(smi, stat) \
36698- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
36699+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
36700
36701 #define SI_MAX_PARMS 4
36702
36703@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
36704 atomic_set(&new_smi->req_events, 0);
36705 new_smi->run_to_completion = 0;
36706 for (i = 0; i < SI_NUM_STATS; i++)
36707- atomic_set(&new_smi->stats[i], 0);
36708+ atomic_set_unchecked(&new_smi->stats[i], 0);
36709
36710 new_smi->interrupt_disabled = 1;
36711 atomic_set(&new_smi->stop_operation, 0);
36712diff --git a/drivers/char/mem.c b/drivers/char/mem.c
36713index 1ccbe94..6ad651a 100644
36714--- a/drivers/char/mem.c
36715+++ b/drivers/char/mem.c
36716@@ -18,6 +18,7 @@
36717 #include <linux/raw.h>
36718 #include <linux/tty.h>
36719 #include <linux/capability.h>
36720+#include <linux/security.h>
36721 #include <linux/ptrace.h>
36722 #include <linux/device.h>
36723 #include <linux/highmem.h>
36724@@ -38,6 +39,10 @@
36725
36726 #define DEVPORT_MINOR 4
36727
36728+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36729+extern const struct file_operations grsec_fops;
36730+#endif
36731+
36732 static inline unsigned long size_inside_page(unsigned long start,
36733 unsigned long size)
36734 {
36735@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36736
36737 while (cursor < to) {
36738 if (!devmem_is_allowed(pfn)) {
36739+#ifdef CONFIG_GRKERNSEC_KMEM
36740+ gr_handle_mem_readwrite(from, to);
36741+#else
36742 printk(KERN_INFO
36743 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
36744 current->comm, from, to);
36745+#endif
36746 return 0;
36747 }
36748 cursor += PAGE_SIZE;
36749@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36750 }
36751 return 1;
36752 }
36753+#elif defined(CONFIG_GRKERNSEC_KMEM)
36754+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36755+{
36756+ return 0;
36757+}
36758 #else
36759 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36760 {
36761@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
36762
36763 while (count > 0) {
36764 unsigned long remaining;
36765+ char *temp;
36766
36767 sz = size_inside_page(p, count);
36768
36769@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
36770 if (!ptr)
36771 return -EFAULT;
36772
36773- remaining = copy_to_user(buf, ptr, sz);
36774+#ifdef CONFIG_PAX_USERCOPY
36775+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36776+ if (!temp) {
36777+ unxlate_dev_mem_ptr(p, ptr);
36778+ return -ENOMEM;
36779+ }
36780+ memcpy(temp, ptr, sz);
36781+#else
36782+ temp = ptr;
36783+#endif
36784+
36785+ remaining = copy_to_user(buf, temp, sz);
36786+
36787+#ifdef CONFIG_PAX_USERCOPY
36788+ kfree(temp);
36789+#endif
36790+
36791 unxlate_dev_mem_ptr(p, ptr);
36792 if (remaining)
36793 return -EFAULT;
36794@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
36795 else
36796 csize = count;
36797
36798- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
36799+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
36800 if (rc < 0)
36801 return rc;
36802 buf += csize;
36803@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36804 size_t count, loff_t *ppos)
36805 {
36806 unsigned long p = *ppos;
36807- ssize_t low_count, read, sz;
36808+ ssize_t low_count, read, sz, err = 0;
36809 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
36810- int err = 0;
36811
36812 read = 0;
36813 if (p < (unsigned long) high_memory) {
36814@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36815 }
36816 #endif
36817 while (low_count > 0) {
36818+ char *temp;
36819+
36820 sz = size_inside_page(p, low_count);
36821
36822 /*
36823@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36824 */
36825 kbuf = xlate_dev_kmem_ptr((char *)p);
36826
36827- if (copy_to_user(buf, kbuf, sz))
36828+#ifdef CONFIG_PAX_USERCOPY
36829+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36830+ if (!temp)
36831+ return -ENOMEM;
36832+ memcpy(temp, kbuf, sz);
36833+#else
36834+ temp = kbuf;
36835+#endif
36836+
36837+ err = copy_to_user(buf, temp, sz);
36838+
36839+#ifdef CONFIG_PAX_USERCOPY
36840+ kfree(temp);
36841+#endif
36842+
36843+ if (err)
36844 return -EFAULT;
36845 buf += sz;
36846 p += sz;
36847@@ -869,6 +916,9 @@ static const struct memdev {
36848 #ifdef CONFIG_CRASH_DUMP
36849 [12] = { "oldmem", 0, &oldmem_fops, NULL },
36850 #endif
36851+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36852+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
36853+#endif
36854 };
36855
36856 static int memory_open(struct inode *inode, struct file *filp)
36857@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
36858 continue;
36859
36860 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
36861- NULL, devlist[minor].name);
36862+ NULL, "%s", devlist[minor].name);
36863 }
36864
36865 return tty_init();
36866diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
36867index c689697..04e6d6a2 100644
36868--- a/drivers/char/mwave/tp3780i.c
36869+++ b/drivers/char/mwave/tp3780i.c
36870@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
36871 PRINTK_2(TRACE_TP3780I,
36872 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
36873
36874+ memset(pAbilities, 0, sizeof(*pAbilities));
36875 /* fill out standard constant fields */
36876 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
36877 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
36878diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
36879index 9df78e2..01ba9ae 100644
36880--- a/drivers/char/nvram.c
36881+++ b/drivers/char/nvram.c
36882@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
36883
36884 spin_unlock_irq(&rtc_lock);
36885
36886- if (copy_to_user(buf, contents, tmp - contents))
36887+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
36888 return -EFAULT;
36889
36890 *ppos = i;
36891diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
36892index 5c5cc00..ac9edb7 100644
36893--- a/drivers/char/pcmcia/synclink_cs.c
36894+++ b/drivers/char/pcmcia/synclink_cs.c
36895@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36896
36897 if (debug_level >= DEBUG_LEVEL_INFO)
36898 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
36899- __FILE__, __LINE__, info->device_name, port->count);
36900+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
36901
36902- WARN_ON(!port->count);
36903+ WARN_ON(!atomic_read(&port->count));
36904
36905 if (tty_port_close_start(port, tty, filp) == 0)
36906 goto cleanup;
36907@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36908 cleanup:
36909 if (debug_level >= DEBUG_LEVEL_INFO)
36910 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
36911- tty->driver->name, port->count);
36912+ tty->driver->name, atomic_read(&port->count));
36913 }
36914
36915 /* Wait until the transmitter is empty.
36916@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36917
36918 if (debug_level >= DEBUG_LEVEL_INFO)
36919 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
36920- __FILE__, __LINE__, tty->driver->name, port->count);
36921+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
36922
36923 /* If port is closing, signal caller to try again */
36924 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
36925@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36926 goto cleanup;
36927 }
36928 spin_lock(&port->lock);
36929- port->count++;
36930+ atomic_inc(&port->count);
36931 spin_unlock(&port->lock);
36932 spin_unlock_irqrestore(&info->netlock, flags);
36933
36934- if (port->count == 1) {
36935+ if (atomic_read(&port->count) == 1) {
36936 /* 1st open on this device, init hardware */
36937 retval = startup(info, tty);
36938 if (retval < 0)
36939@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
36940 unsigned short new_crctype;
36941
36942 /* return error if TTY interface open */
36943- if (info->port.count)
36944+ if (atomic_read(&info->port.count))
36945 return -EBUSY;
36946
36947 switch (encoding)
36948@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
36949
36950 /* arbitrate between network and tty opens */
36951 spin_lock_irqsave(&info->netlock, flags);
36952- if (info->port.count != 0 || info->netcount != 0) {
36953+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
36954 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
36955 spin_unlock_irqrestore(&info->netlock, flags);
36956 return -EBUSY;
36957@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36958 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
36959
36960 /* return error if TTY interface open */
36961- if (info->port.count)
36962+ if (atomic_read(&info->port.count))
36963 return -EBUSY;
36964
36965 if (cmd != SIOCWANDEV)
36966diff --git a/drivers/char/random.c b/drivers/char/random.c
36967index 35487e8..dac8bd1 100644
36968--- a/drivers/char/random.c
36969+++ b/drivers/char/random.c
36970@@ -272,8 +272,13 @@
36971 /*
36972 * Configuration information
36973 */
36974+#ifdef CONFIG_GRKERNSEC_RANDNET
36975+#define INPUT_POOL_WORDS 512
36976+#define OUTPUT_POOL_WORDS 128
36977+#else
36978 #define INPUT_POOL_WORDS 128
36979 #define OUTPUT_POOL_WORDS 32
36980+#endif
36981 #define SEC_XFER_SIZE 512
36982 #define EXTRACT_SIZE 10
36983
36984@@ -313,10 +318,17 @@ static struct poolinfo {
36985 int poolwords;
36986 int tap1, tap2, tap3, tap4, tap5;
36987 } poolinfo_table[] = {
36988+#ifdef CONFIG_GRKERNSEC_RANDNET
36989+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
36990+ { 512, 411, 308, 208, 104, 1 },
36991+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
36992+ { 128, 103, 76, 51, 25, 1 },
36993+#else
36994 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
36995 { 128, 103, 76, 51, 25, 1 },
36996 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
36997 { 32, 26, 20, 14, 7, 1 },
36998+#endif
36999 #if 0
37000 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
37001 { 2048, 1638, 1231, 819, 411, 1 },
37002@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
37003 input_rotate += i ? 7 : 14;
37004 }
37005
37006- ACCESS_ONCE(r->input_rotate) = input_rotate;
37007- ACCESS_ONCE(r->add_ptr) = i;
37008+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
37009+ ACCESS_ONCE_RW(r->add_ptr) = i;
37010 smp_wmb();
37011
37012 if (out)
37013@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
37014
37015 extract_buf(r, tmp);
37016 i = min_t(int, nbytes, EXTRACT_SIZE);
37017- if (copy_to_user(buf, tmp, i)) {
37018+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
37019 ret = -EFAULT;
37020 break;
37021 }
37022@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
37023 #include <linux/sysctl.h>
37024
37025 static int min_read_thresh = 8, min_write_thresh;
37026-static int max_read_thresh = INPUT_POOL_WORDS * 32;
37027+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
37028 static int max_write_thresh = INPUT_POOL_WORDS * 32;
37029 static char sysctl_bootid[16];
37030
37031@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
37032 static int proc_do_uuid(ctl_table *table, int write,
37033 void __user *buffer, size_t *lenp, loff_t *ppos)
37034 {
37035- ctl_table fake_table;
37036+ ctl_table_no_const fake_table;
37037 unsigned char buf[64], tmp_uuid[16], *uuid;
37038
37039 uuid = table->data;
37040diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
37041index bf2349db..5456d53 100644
37042--- a/drivers/char/sonypi.c
37043+++ b/drivers/char/sonypi.c
37044@@ -54,6 +54,7 @@
37045
37046 #include <asm/uaccess.h>
37047 #include <asm/io.h>
37048+#include <asm/local.h>
37049
37050 #include <linux/sonypi.h>
37051
37052@@ -490,7 +491,7 @@ static struct sonypi_device {
37053 spinlock_t fifo_lock;
37054 wait_queue_head_t fifo_proc_list;
37055 struct fasync_struct *fifo_async;
37056- int open_count;
37057+ local_t open_count;
37058 int model;
37059 struct input_dev *input_jog_dev;
37060 struct input_dev *input_key_dev;
37061@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
37062 static int sonypi_misc_release(struct inode *inode, struct file *file)
37063 {
37064 mutex_lock(&sonypi_device.lock);
37065- sonypi_device.open_count--;
37066+ local_dec(&sonypi_device.open_count);
37067 mutex_unlock(&sonypi_device.lock);
37068 return 0;
37069 }
37070@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
37071 {
37072 mutex_lock(&sonypi_device.lock);
37073 /* Flush input queue on first open */
37074- if (!sonypi_device.open_count)
37075+ if (!local_read(&sonypi_device.open_count))
37076 kfifo_reset(&sonypi_device.fifo);
37077- sonypi_device.open_count++;
37078+ local_inc(&sonypi_device.open_count);
37079 mutex_unlock(&sonypi_device.lock);
37080
37081 return 0;
37082diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
37083index 64420b3..5c40b56 100644
37084--- a/drivers/char/tpm/tpm_acpi.c
37085+++ b/drivers/char/tpm/tpm_acpi.c
37086@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
37087 virt = acpi_os_map_memory(start, len);
37088 if (!virt) {
37089 kfree(log->bios_event_log);
37090+ log->bios_event_log = NULL;
37091 printk("%s: ERROR - Unable to map memory\n", __func__);
37092 return -EIO;
37093 }
37094
37095- memcpy_fromio(log->bios_event_log, virt, len);
37096+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
37097
37098 acpi_os_unmap_memory(virt, len);
37099 return 0;
37100diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
37101index 84ddc55..1d32f1e 100644
37102--- a/drivers/char/tpm/tpm_eventlog.c
37103+++ b/drivers/char/tpm/tpm_eventlog.c
37104@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
37105 event = addr;
37106
37107 if ((event->event_type == 0 && event->event_size == 0) ||
37108- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
37109+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
37110 return NULL;
37111
37112 return addr;
37113@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
37114 return NULL;
37115
37116 if ((event->event_type == 0 && event->event_size == 0) ||
37117- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
37118+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
37119 return NULL;
37120
37121 (*pos)++;
37122@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
37123 int i;
37124
37125 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
37126- seq_putc(m, data[i]);
37127+ if (!seq_putc(m, data[i]))
37128+ return -EFAULT;
37129
37130 return 0;
37131 }
37132diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
37133index fc45567..fa2a590 100644
37134--- a/drivers/char/virtio_console.c
37135+++ b/drivers/char/virtio_console.c
37136@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
37137 if (to_user) {
37138 ssize_t ret;
37139
37140- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
37141+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
37142 if (ret)
37143 return -EFAULT;
37144 } else {
37145@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
37146 if (!port_has_data(port) && !port->host_connected)
37147 return 0;
37148
37149- return fill_readbuf(port, ubuf, count, true);
37150+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
37151 }
37152
37153 static int wait_port_writable(struct port *port, bool nonblock)
37154diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
37155index a33f46f..a720eed 100644
37156--- a/drivers/clk/clk-composite.c
37157+++ b/drivers/clk/clk-composite.c
37158@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
37159 struct clk *clk;
37160 struct clk_init_data init;
37161 struct clk_composite *composite;
37162- struct clk_ops *clk_composite_ops;
37163+ clk_ops_no_const *clk_composite_ops;
37164
37165 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
37166 if (!composite) {
37167diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
37168index bd11315..7f87098 100644
37169--- a/drivers/clk/socfpga/clk.c
37170+++ b/drivers/clk/socfpga/clk.c
37171@@ -22,6 +22,7 @@
37172 #include <linux/clk-provider.h>
37173 #include <linux/io.h>
37174 #include <linux/of.h>
37175+#include <asm/pgtable.h>
37176
37177 /* Clock Manager offsets */
37178 #define CLKMGR_CTRL 0x0
37179@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
37180 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
37181 strcmp(clk_name, "sdram_pll")) {
37182 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
37183- clk_pll_ops.enable = clk_gate_ops.enable;
37184- clk_pll_ops.disable = clk_gate_ops.disable;
37185+ pax_open_kernel();
37186+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
37187+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
37188+ pax_close_kernel();
37189 }
37190
37191 clk = clk_register(NULL, &socfpga_clk->hw.hw);
37192diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
37193index a2b2541..bc1e7ff 100644
37194--- a/drivers/clocksource/arm_arch_timer.c
37195+++ b/drivers/clocksource/arm_arch_timer.c
37196@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
37197 return NOTIFY_OK;
37198 }
37199
37200-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
37201+static struct notifier_block arch_timer_cpu_nb = {
37202 .notifier_call = arch_timer_cpu_notify,
37203 };
37204
37205diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
37206index 350f493..489479e 100644
37207--- a/drivers/clocksource/bcm_kona_timer.c
37208+++ b/drivers/clocksource/bcm_kona_timer.c
37209@@ -199,7 +199,7 @@ static struct irqaction kona_timer_irq = {
37210 .handler = kona_timer_interrupt,
37211 };
37212
37213-static void __init kona_timer_init(void)
37214+static void __init kona_timer_init(struct device_node *np)
37215 {
37216 kona_timers_init();
37217 kona_timer_clockevents_init();
37218diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
37219index ade7513..069445f 100644
37220--- a/drivers/clocksource/metag_generic.c
37221+++ b/drivers/clocksource/metag_generic.c
37222@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
37223 return NOTIFY_OK;
37224 }
37225
37226-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
37227+static struct notifier_block arch_timer_cpu_nb = {
37228 .notifier_call = arch_timer_cpu_notify,
37229 };
37230
37231diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
37232index edc089e..bc7c0bc 100644
37233--- a/drivers/cpufreq/acpi-cpufreq.c
37234+++ b/drivers/cpufreq/acpi-cpufreq.c
37235@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
37236 return sprintf(buf, "%u\n", boost_enabled);
37237 }
37238
37239-static struct global_attr global_boost = __ATTR(boost, 0644,
37240+static global_attr_no_const global_boost = __ATTR(boost, 0644,
37241 show_global_boost,
37242 store_global_boost);
37243
37244@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37245 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
37246 per_cpu(acfreq_data, cpu) = data;
37247
37248- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
37249- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37250+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
37251+ pax_open_kernel();
37252+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37253+ pax_close_kernel();
37254+ }
37255
37256 result = acpi_processor_register_performance(data->acpi_data, cpu);
37257 if (result)
37258@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37259 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
37260 break;
37261 case ACPI_ADR_SPACE_FIXED_HARDWARE:
37262- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37263+ pax_open_kernel();
37264+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37265+ pax_close_kernel();
37266 policy->cur = get_cur_freq_on_cpu(cpu);
37267 break;
37268 default:
37269@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37270 acpi_processor_notify_smm(THIS_MODULE);
37271
37272 /* Check for APERF/MPERF support in hardware */
37273- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
37274- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
37275+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
37276+ pax_open_kernel();
37277+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
37278+ pax_close_kernel();
37279+ }
37280
37281 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
37282 for (i = 0; i < perf->state_count; i++)
37283diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
37284index 6485547..477033e 100644
37285--- a/drivers/cpufreq/cpufreq.c
37286+++ b/drivers/cpufreq/cpufreq.c
37287@@ -1854,7 +1854,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
37288 return NOTIFY_OK;
37289 }
37290
37291-static struct notifier_block __refdata cpufreq_cpu_notifier = {
37292+static struct notifier_block cpufreq_cpu_notifier = {
37293 .notifier_call = cpufreq_cpu_callback,
37294 };
37295
37296@@ -1886,8 +1886,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
37297
37298 pr_debug("trying to register driver %s\n", driver_data->name);
37299
37300- if (driver_data->setpolicy)
37301- driver_data->flags |= CPUFREQ_CONST_LOOPS;
37302+ if (driver_data->setpolicy) {
37303+ pax_open_kernel();
37304+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
37305+ pax_close_kernel();
37306+ }
37307
37308 write_lock_irqsave(&cpufreq_driver_lock, flags);
37309 if (cpufreq_driver) {
37310diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
37311index a86ff72..aad2b03 100644
37312--- a/drivers/cpufreq/cpufreq_governor.c
37313+++ b/drivers/cpufreq/cpufreq_governor.c
37314@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37315 struct dbs_data *dbs_data;
37316 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
37317 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
37318- struct od_ops *od_ops = NULL;
37319+ const struct od_ops *od_ops = NULL;
37320 struct od_dbs_tuners *od_tuners = NULL;
37321 struct cs_dbs_tuners *cs_tuners = NULL;
37322 struct cpu_dbs_common_info *cpu_cdbs;
37323@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37324
37325 if ((cdata->governor == GOV_CONSERVATIVE) &&
37326 (!policy->governor->initialized)) {
37327- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37328+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37329
37330 cpufreq_register_notifier(cs_ops->notifier_block,
37331 CPUFREQ_TRANSITION_NOTIFIER);
37332@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37333
37334 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
37335 (policy->governor->initialized == 1)) {
37336- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37337+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37338
37339 cpufreq_unregister_notifier(cs_ops->notifier_block,
37340 CPUFREQ_TRANSITION_NOTIFIER);
37341diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
37342index 0d9e6be..461fd3b 100644
37343--- a/drivers/cpufreq/cpufreq_governor.h
37344+++ b/drivers/cpufreq/cpufreq_governor.h
37345@@ -204,7 +204,7 @@ struct common_dbs_data {
37346 void (*exit)(struct dbs_data *dbs_data);
37347
37348 /* Governor specific ops, see below */
37349- void *gov_ops;
37350+ const void *gov_ops;
37351 };
37352
37353 /* Governer Per policy data */
37354diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
37355index c087347..dad6268 100644
37356--- a/drivers/cpufreq/cpufreq_ondemand.c
37357+++ b/drivers/cpufreq/cpufreq_ondemand.c
37358@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
37359 (struct cpufreq_policy *, unsigned int, unsigned int),
37360 unsigned int powersave_bias)
37361 {
37362- od_ops.powersave_bias_target = f;
37363+ pax_open_kernel();
37364+ *(void **)&od_ops.powersave_bias_target = f;
37365+ pax_close_kernel();
37366 od_set_powersave_bias(powersave_bias);
37367 }
37368 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
37369
37370 void od_unregister_powersave_bias_handler(void)
37371 {
37372- od_ops.powersave_bias_target = generic_powersave_bias_target;
37373+ pax_open_kernel();
37374+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
37375+ pax_close_kernel();
37376 od_set_powersave_bias(0);
37377 }
37378 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
37379diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
37380index bfd6273..e39dd63 100644
37381--- a/drivers/cpufreq/cpufreq_stats.c
37382+++ b/drivers/cpufreq/cpufreq_stats.c
37383@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
37384 }
37385
37386 /* priority=1 so this will get called before cpufreq_remove_dev */
37387-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
37388+static struct notifier_block cpufreq_stat_cpu_notifier = {
37389 .notifier_call = cpufreq_stat_cpu_callback,
37390 .priority = 1,
37391 };
37392diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
37393index 421ef37..e708530c 100644
37394--- a/drivers/cpufreq/p4-clockmod.c
37395+++ b/drivers/cpufreq/p4-clockmod.c
37396@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37397 case 0x0F: /* Core Duo */
37398 case 0x16: /* Celeron Core */
37399 case 0x1C: /* Atom */
37400- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37401+ pax_open_kernel();
37402+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37403+ pax_close_kernel();
37404 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
37405 case 0x0D: /* Pentium M (Dothan) */
37406- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37407+ pax_open_kernel();
37408+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37409+ pax_close_kernel();
37410 /* fall through */
37411 case 0x09: /* Pentium M (Banias) */
37412 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
37413@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37414
37415 /* on P-4s, the TSC runs with constant frequency independent whether
37416 * throttling is active or not. */
37417- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37418+ pax_open_kernel();
37419+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37420+ pax_close_kernel();
37421
37422 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
37423 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
37424diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
37425index c71ee14..7c2e183 100644
37426--- a/drivers/cpufreq/sparc-us3-cpufreq.c
37427+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
37428@@ -18,14 +18,12 @@
37429 #include <asm/head.h>
37430 #include <asm/timer.h>
37431
37432-static struct cpufreq_driver *cpufreq_us3_driver;
37433-
37434 struct us3_freq_percpu_info {
37435 struct cpufreq_frequency_table table[4];
37436 };
37437
37438 /* Indexed by cpu number. */
37439-static struct us3_freq_percpu_info *us3_freq_table;
37440+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
37441
37442 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
37443 * in the Safari config register.
37444@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
37445
37446 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
37447 {
37448- if (cpufreq_us3_driver)
37449- us3_set_cpu_divider_index(policy, 0);
37450+ us3_set_cpu_divider_index(policy->cpu, 0);
37451
37452 return 0;
37453 }
37454
37455+static int __init us3_freq_init(void);
37456+static void __exit us3_freq_exit(void);
37457+
37458+static struct cpufreq_driver cpufreq_us3_driver = {
37459+ .init = us3_freq_cpu_init,
37460+ .verify = us3_freq_verify,
37461+ .target = us3_freq_target,
37462+ .get = us3_freq_get,
37463+ .exit = us3_freq_cpu_exit,
37464+ .owner = THIS_MODULE,
37465+ .name = "UltraSPARC-III",
37466+
37467+};
37468+
37469 static int __init us3_freq_init(void)
37470 {
37471 unsigned long manuf, impl, ver;
37472@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
37473 (impl == CHEETAH_IMPL ||
37474 impl == CHEETAH_PLUS_IMPL ||
37475 impl == JAGUAR_IMPL ||
37476- impl == PANTHER_IMPL)) {
37477- struct cpufreq_driver *driver;
37478-
37479- ret = -ENOMEM;
37480- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
37481- if (!driver)
37482- goto err_out;
37483-
37484- us3_freq_table = kzalloc(
37485- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
37486- GFP_KERNEL);
37487- if (!us3_freq_table)
37488- goto err_out;
37489-
37490- driver->init = us3_freq_cpu_init;
37491- driver->verify = us3_freq_verify;
37492- driver->target = us3_freq_target;
37493- driver->get = us3_freq_get;
37494- driver->exit = us3_freq_cpu_exit;
37495- driver->owner = THIS_MODULE,
37496- strcpy(driver->name, "UltraSPARC-III");
37497-
37498- cpufreq_us3_driver = driver;
37499- ret = cpufreq_register_driver(driver);
37500- if (ret)
37501- goto err_out;
37502-
37503- return 0;
37504-
37505-err_out:
37506- if (driver) {
37507- kfree(driver);
37508- cpufreq_us3_driver = NULL;
37509- }
37510- kfree(us3_freq_table);
37511- us3_freq_table = NULL;
37512- return ret;
37513- }
37514+ impl == PANTHER_IMPL))
37515+ return cpufreq_register_driver(&cpufreq_us3_driver);
37516
37517 return -ENODEV;
37518 }
37519
37520 static void __exit us3_freq_exit(void)
37521 {
37522- if (cpufreq_us3_driver) {
37523- cpufreq_unregister_driver(cpufreq_us3_driver);
37524- kfree(cpufreq_us3_driver);
37525- cpufreq_us3_driver = NULL;
37526- kfree(us3_freq_table);
37527- us3_freq_table = NULL;
37528- }
37529+ cpufreq_unregister_driver(&cpufreq_us3_driver);
37530 }
37531
37532 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
37533diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
37534index 618e6f4..e89d915 100644
37535--- a/drivers/cpufreq/speedstep-centrino.c
37536+++ b/drivers/cpufreq/speedstep-centrino.c
37537@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
37538 !cpu_has(cpu, X86_FEATURE_EST))
37539 return -ENODEV;
37540
37541- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
37542- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
37543+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
37544+ pax_open_kernel();
37545+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
37546+ pax_close_kernel();
37547+ }
37548
37549 if (policy->cpu != 0)
37550 return -ENODEV;
37551diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
37552index c3a93fe..e808f24 100644
37553--- a/drivers/cpuidle/cpuidle.c
37554+++ b/drivers/cpuidle/cpuidle.c
37555@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
37556
37557 static void poll_idle_init(struct cpuidle_driver *drv)
37558 {
37559- struct cpuidle_state *state = &drv->states[0];
37560+ cpuidle_state_no_const *state = &drv->states[0];
37561
37562 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
37563 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
37564diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
37565index ea2f8e7..70ac501 100644
37566--- a/drivers/cpuidle/governor.c
37567+++ b/drivers/cpuidle/governor.c
37568@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
37569 mutex_lock(&cpuidle_lock);
37570 if (__cpuidle_find_governor(gov->name) == NULL) {
37571 ret = 0;
37572- list_add_tail(&gov->governor_list, &cpuidle_governors);
37573+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
37574 if (!cpuidle_curr_governor ||
37575 cpuidle_curr_governor->rating < gov->rating)
37576 cpuidle_switch_governor(gov);
37577@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
37578 new_gov = cpuidle_replace_governor(gov->rating);
37579 cpuidle_switch_governor(new_gov);
37580 }
37581- list_del(&gov->governor_list);
37582+ pax_list_del((struct list_head *)&gov->governor_list);
37583 mutex_unlock(&cpuidle_lock);
37584 }
37585
37586diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
37587index 428754a..8bdf9cc 100644
37588--- a/drivers/cpuidle/sysfs.c
37589+++ b/drivers/cpuidle/sysfs.c
37590@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
37591 NULL
37592 };
37593
37594-static struct attribute_group cpuidle_attr_group = {
37595+static attribute_group_no_const cpuidle_attr_group = {
37596 .attrs = cpuidle_default_attrs,
37597 .name = "cpuidle",
37598 };
37599diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
37600index 3b36797..db0b0c0 100644
37601--- a/drivers/devfreq/devfreq.c
37602+++ b/drivers/devfreq/devfreq.c
37603@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
37604 GFP_KERNEL);
37605 devfreq->last_stat_updated = jiffies;
37606
37607- dev_set_name(&devfreq->dev, dev_name(dev));
37608+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
37609 err = device_register(&devfreq->dev);
37610 if (err) {
37611 put_device(&devfreq->dev);
37612@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
37613 goto err_out;
37614 }
37615
37616- list_add(&governor->node, &devfreq_governor_list);
37617+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
37618
37619 list_for_each_entry(devfreq, &devfreq_list, node) {
37620 int ret = 0;
37621@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
37622 }
37623 }
37624
37625- list_del(&governor->node);
37626+ pax_list_del((struct list_head *)&governor->node);
37627 err_out:
37628 mutex_unlock(&devfreq_list_lock);
37629
37630diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
37631index b70709b..1d8d02a 100644
37632--- a/drivers/dma/sh/shdma.c
37633+++ b/drivers/dma/sh/shdma.c
37634@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
37635 return ret;
37636 }
37637
37638-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
37639+static struct notifier_block sh_dmae_nmi_notifier = {
37640 .notifier_call = sh_dmae_nmi_handler,
37641
37642 /* Run before NMI debug handler and KGDB */
37643diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
37644index c4d700a..0b57abd 100644
37645--- a/drivers/edac/edac_mc_sysfs.c
37646+++ b/drivers/edac/edac_mc_sysfs.c
37647@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
37648 struct dev_ch_attribute {
37649 struct device_attribute attr;
37650 int channel;
37651-};
37652+} __do_const;
37653
37654 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
37655 struct dev_ch_attribute dev_attr_legacy_##_name = \
37656@@ -1005,14 +1005,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
37657 }
37658
37659 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
37660+ pax_open_kernel();
37661 if (mci->get_sdram_scrub_rate) {
37662- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
37663- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
37664+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
37665+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
37666 }
37667 if (mci->set_sdram_scrub_rate) {
37668- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
37669- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
37670+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
37671+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
37672 }
37673+ pax_close_kernel();
37674 err = device_create_file(&mci->dev,
37675 &dev_attr_sdram_scrub_rate);
37676 if (err) {
37677diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
37678index e8658e4..22746d6 100644
37679--- a/drivers/edac/edac_pci_sysfs.c
37680+++ b/drivers/edac/edac_pci_sysfs.c
37681@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
37682 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
37683 static int edac_pci_poll_msec = 1000; /* one second workq period */
37684
37685-static atomic_t pci_parity_count = ATOMIC_INIT(0);
37686-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
37687+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
37688+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
37689
37690 static struct kobject *edac_pci_top_main_kobj;
37691 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
37692@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
37693 void *value;
37694 ssize_t(*show) (void *, char *);
37695 ssize_t(*store) (void *, const char *, size_t);
37696-};
37697+} __do_const;
37698
37699 /* Set of show/store abstract level functions for PCI Parity object */
37700 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
37701@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37702 edac_printk(KERN_CRIT, EDAC_PCI,
37703 "Signaled System Error on %s\n",
37704 pci_name(dev));
37705- atomic_inc(&pci_nonparity_count);
37706+ atomic_inc_unchecked(&pci_nonparity_count);
37707 }
37708
37709 if (status & (PCI_STATUS_PARITY)) {
37710@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37711 "Master Data Parity Error on %s\n",
37712 pci_name(dev));
37713
37714- atomic_inc(&pci_parity_count);
37715+ atomic_inc_unchecked(&pci_parity_count);
37716 }
37717
37718 if (status & (PCI_STATUS_DETECTED_PARITY)) {
37719@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37720 "Detected Parity Error on %s\n",
37721 pci_name(dev));
37722
37723- atomic_inc(&pci_parity_count);
37724+ atomic_inc_unchecked(&pci_parity_count);
37725 }
37726 }
37727
37728@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37729 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
37730 "Signaled System Error on %s\n",
37731 pci_name(dev));
37732- atomic_inc(&pci_nonparity_count);
37733+ atomic_inc_unchecked(&pci_nonparity_count);
37734 }
37735
37736 if (status & (PCI_STATUS_PARITY)) {
37737@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37738 "Master Data Parity Error on "
37739 "%s\n", pci_name(dev));
37740
37741- atomic_inc(&pci_parity_count);
37742+ atomic_inc_unchecked(&pci_parity_count);
37743 }
37744
37745 if (status & (PCI_STATUS_DETECTED_PARITY)) {
37746@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37747 "Detected Parity Error on %s\n",
37748 pci_name(dev));
37749
37750- atomic_inc(&pci_parity_count);
37751+ atomic_inc_unchecked(&pci_parity_count);
37752 }
37753 }
37754 }
37755@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
37756 if (!check_pci_errors)
37757 return;
37758
37759- before_count = atomic_read(&pci_parity_count);
37760+ before_count = atomic_read_unchecked(&pci_parity_count);
37761
37762 /* scan all PCI devices looking for a Parity Error on devices and
37763 * bridges.
37764@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
37765 /* Only if operator has selected panic on PCI Error */
37766 if (edac_pci_get_panic_on_pe()) {
37767 /* If the count is different 'after' from 'before' */
37768- if (before_count != atomic_read(&pci_parity_count))
37769+ if (before_count != atomic_read_unchecked(&pci_parity_count))
37770 panic("EDAC: PCI Parity Error");
37771 }
37772 }
37773diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
37774index 51b7e3a..aa8a3e8 100644
37775--- a/drivers/edac/mce_amd.h
37776+++ b/drivers/edac/mce_amd.h
37777@@ -77,7 +77,7 @@ struct amd_decoder_ops {
37778 bool (*mc0_mce)(u16, u8);
37779 bool (*mc1_mce)(u16, u8);
37780 bool (*mc2_mce)(u16, u8);
37781-};
37782+} __no_const;
37783
37784 void amd_report_gart_errors(bool);
37785 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
37786diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
37787index 57ea7f4..789e3c3 100644
37788--- a/drivers/firewire/core-card.c
37789+++ b/drivers/firewire/core-card.c
37790@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
37791
37792 void fw_core_remove_card(struct fw_card *card)
37793 {
37794- struct fw_card_driver dummy_driver = dummy_driver_template;
37795+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
37796
37797 card->driver->update_phy_reg(card, 4,
37798 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
37799diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
37800index 664a6ff..af13580 100644
37801--- a/drivers/firewire/core-device.c
37802+++ b/drivers/firewire/core-device.c
37803@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
37804 struct config_rom_attribute {
37805 struct device_attribute attr;
37806 u32 key;
37807-};
37808+} __do_const;
37809
37810 static ssize_t show_immediate(struct device *dev,
37811 struct device_attribute *dattr, char *buf)
37812diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
37813index 28a94c7..58da63a 100644
37814--- a/drivers/firewire/core-transaction.c
37815+++ b/drivers/firewire/core-transaction.c
37816@@ -38,6 +38,7 @@
37817 #include <linux/timer.h>
37818 #include <linux/types.h>
37819 #include <linux/workqueue.h>
37820+#include <linux/sched.h>
37821
37822 #include <asm/byteorder.h>
37823
37824diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
37825index 515a42c..5ecf3ba 100644
37826--- a/drivers/firewire/core.h
37827+++ b/drivers/firewire/core.h
37828@@ -111,6 +111,7 @@ struct fw_card_driver {
37829
37830 int (*stop_iso)(struct fw_iso_context *ctx);
37831 };
37832+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
37833
37834 void fw_card_initialize(struct fw_card *card,
37835 const struct fw_card_driver *driver, struct device *device);
37836diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
37837index 94a58a0..f5eba42 100644
37838--- a/drivers/firmware/dmi-id.c
37839+++ b/drivers/firmware/dmi-id.c
37840@@ -16,7 +16,7 @@
37841 struct dmi_device_attribute{
37842 struct device_attribute dev_attr;
37843 int field;
37844-};
37845+} __do_const;
37846 #define to_dmi_dev_attr(_dev_attr) \
37847 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
37848
37849diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
37850index b95159b..841ae55 100644
37851--- a/drivers/firmware/dmi_scan.c
37852+++ b/drivers/firmware/dmi_scan.c
37853@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
37854 }
37855 }
37856 else {
37857- /*
37858- * no iounmap() for that ioremap(); it would be a no-op, but
37859- * it's so early in setup that sucker gets confused into doing
37860- * what it shouldn't if we actually call it.
37861- */
37862 p = dmi_ioremap(0xF0000, 0x10000);
37863 if (p == NULL)
37864 goto error;
37865@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
37866 if (buf == NULL)
37867 return -1;
37868
37869- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
37870+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
37871
37872 iounmap(buf);
37873 return 0;
37874diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
37875index 5145fa3..0d3babd 100644
37876--- a/drivers/firmware/efi/efi.c
37877+++ b/drivers/firmware/efi/efi.c
37878@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
37879 };
37880
37881 static struct efivars generic_efivars;
37882-static struct efivar_operations generic_ops;
37883+static efivar_operations_no_const generic_ops __read_only;
37884
37885 static int generic_ops_register(void)
37886 {
37887- generic_ops.get_variable = efi.get_variable;
37888- generic_ops.set_variable = efi.set_variable;
37889- generic_ops.get_next_variable = efi.get_next_variable;
37890- generic_ops.query_variable_store = efi_query_variable_store;
37891+ pax_open_kernel();
37892+ *(void **)&generic_ops.get_variable = efi.get_variable;
37893+ *(void **)&generic_ops.set_variable = efi.set_variable;
37894+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
37895+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
37896+ pax_close_kernel();
37897
37898 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
37899 }
37900diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
37901index 8bd1bb6..c48b0c6 100644
37902--- a/drivers/firmware/efi/efivars.c
37903+++ b/drivers/firmware/efi/efivars.c
37904@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
37905 static int
37906 create_efivars_bin_attributes(void)
37907 {
37908- struct bin_attribute *attr;
37909+ bin_attribute_no_const *attr;
37910 int error;
37911
37912 /* new_var */
37913diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
37914index 2a90ba6..07f3733 100644
37915--- a/drivers/firmware/google/memconsole.c
37916+++ b/drivers/firmware/google/memconsole.c
37917@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
37918 if (!found_memconsole())
37919 return -ENODEV;
37920
37921- memconsole_bin_attr.size = memconsole_length;
37922+ pax_open_kernel();
37923+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
37924+ pax_close_kernel();
37925
37926 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
37927
37928diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
37929index e16d932..f0206ef 100644
37930--- a/drivers/gpio/gpio-ich.c
37931+++ b/drivers/gpio/gpio-ich.c
37932@@ -69,7 +69,7 @@ struct ichx_desc {
37933 /* Some chipsets have quirks, let these use their own request/get */
37934 int (*request)(struct gpio_chip *chip, unsigned offset);
37935 int (*get)(struct gpio_chip *chip, unsigned offset);
37936-};
37937+} __do_const;
37938
37939 static struct {
37940 spinlock_t lock;
37941diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
37942index 9902732..64b62dd 100644
37943--- a/drivers/gpio/gpio-vr41xx.c
37944+++ b/drivers/gpio/gpio-vr41xx.c
37945@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
37946 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
37947 maskl, pendl, maskh, pendh);
37948
37949- atomic_inc(&irq_err_count);
37950+ atomic_inc_unchecked(&irq_err_count);
37951
37952 return -EINVAL;
37953 }
37954diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
37955index ed1334e..ee0dd42 100644
37956--- a/drivers/gpu/drm/drm_crtc_helper.c
37957+++ b/drivers/gpu/drm/drm_crtc_helper.c
37958@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
37959 struct drm_crtc *tmp;
37960 int crtc_mask = 1;
37961
37962- WARN(!crtc, "checking null crtc?\n");
37963+ BUG_ON(!crtc);
37964
37965 dev = crtc->dev;
37966
37967diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
37968index 9cc247f..36aa285 100644
37969--- a/drivers/gpu/drm/drm_drv.c
37970+++ b/drivers/gpu/drm/drm_drv.c
37971@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
37972 /**
37973 * Copy and IOCTL return string to user space
37974 */
37975-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
37976+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
37977 {
37978 int len;
37979
37980@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
37981 struct drm_file *file_priv = filp->private_data;
37982 struct drm_device *dev;
37983 const struct drm_ioctl_desc *ioctl = NULL;
37984- drm_ioctl_t *func;
37985+ drm_ioctl_no_const_t func;
37986 unsigned int nr = DRM_IOCTL_NR(cmd);
37987 int retcode = -EINVAL;
37988 char stack_kdata[128];
37989@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
37990 return -ENODEV;
37991
37992 atomic_inc(&dev->ioctl_count);
37993- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
37994+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
37995 ++file_priv->ioctl_count;
37996
37997 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
37998diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
37999index 429e07d..e681a2c 100644
38000--- a/drivers/gpu/drm/drm_fops.c
38001+++ b/drivers/gpu/drm/drm_fops.c
38002@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
38003 }
38004
38005 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
38006- atomic_set(&dev->counts[i], 0);
38007+ atomic_set_unchecked(&dev->counts[i], 0);
38008
38009 dev->sigdata.lock = NULL;
38010
38011@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
38012 if (drm_device_is_unplugged(dev))
38013 return -ENODEV;
38014
38015- if (!dev->open_count++)
38016+ if (local_inc_return(&dev->open_count) == 1)
38017 need_setup = 1;
38018 mutex_lock(&dev->struct_mutex);
38019 old_imapping = inode->i_mapping;
38020@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
38021 retcode = drm_open_helper(inode, filp, dev);
38022 if (retcode)
38023 goto err_undo;
38024- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
38025+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
38026 if (need_setup) {
38027 retcode = drm_setup(dev);
38028 if (retcode)
38029@@ -166,7 +166,7 @@ err_undo:
38030 iput(container_of(dev->dev_mapping, struct inode, i_data));
38031 dev->dev_mapping = old_mapping;
38032 mutex_unlock(&dev->struct_mutex);
38033- dev->open_count--;
38034+ local_dec(&dev->open_count);
38035 return retcode;
38036 }
38037 EXPORT_SYMBOL(drm_open);
38038@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
38039
38040 mutex_lock(&drm_global_mutex);
38041
38042- DRM_DEBUG("open_count = %d\n", dev->open_count);
38043+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
38044
38045 if (dev->driver->preclose)
38046 dev->driver->preclose(dev, file_priv);
38047@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
38048 * Begin inline drm_release
38049 */
38050
38051- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
38052+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
38053 task_pid_nr(current),
38054 (long)old_encode_dev(file_priv->minor->device),
38055- dev->open_count);
38056+ local_read(&dev->open_count));
38057
38058 /* Release any auth tokens that might point to this file_priv,
38059 (do that under the drm_global_mutex) */
38060@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
38061 * End inline drm_release
38062 */
38063
38064- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
38065- if (!--dev->open_count) {
38066+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
38067+ if (local_dec_and_test(&dev->open_count)) {
38068 if (atomic_read(&dev->ioctl_count)) {
38069 DRM_ERROR("Device busy: %d\n",
38070 atomic_read(&dev->ioctl_count));
38071diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
38072index f731116..629842c 100644
38073--- a/drivers/gpu/drm/drm_global.c
38074+++ b/drivers/gpu/drm/drm_global.c
38075@@ -36,7 +36,7 @@
38076 struct drm_global_item {
38077 struct mutex mutex;
38078 void *object;
38079- int refcount;
38080+ atomic_t refcount;
38081 };
38082
38083 static struct drm_global_item glob[DRM_GLOBAL_NUM];
38084@@ -49,7 +49,7 @@ void drm_global_init(void)
38085 struct drm_global_item *item = &glob[i];
38086 mutex_init(&item->mutex);
38087 item->object = NULL;
38088- item->refcount = 0;
38089+ atomic_set(&item->refcount, 0);
38090 }
38091 }
38092
38093@@ -59,7 +59,7 @@ void drm_global_release(void)
38094 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
38095 struct drm_global_item *item = &glob[i];
38096 BUG_ON(item->object != NULL);
38097- BUG_ON(item->refcount != 0);
38098+ BUG_ON(atomic_read(&item->refcount) != 0);
38099 }
38100 }
38101
38102@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38103 void *object;
38104
38105 mutex_lock(&item->mutex);
38106- if (item->refcount == 0) {
38107+ if (atomic_read(&item->refcount) == 0) {
38108 item->object = kzalloc(ref->size, GFP_KERNEL);
38109 if (unlikely(item->object == NULL)) {
38110 ret = -ENOMEM;
38111@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38112 goto out_err;
38113
38114 }
38115- ++item->refcount;
38116+ atomic_inc(&item->refcount);
38117 ref->object = item->object;
38118 object = item->object;
38119 mutex_unlock(&item->mutex);
38120@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
38121 struct drm_global_item *item = &glob[ref->global_type];
38122
38123 mutex_lock(&item->mutex);
38124- BUG_ON(item->refcount == 0);
38125+ BUG_ON(atomic_read(&item->refcount) == 0);
38126 BUG_ON(ref->object != item->object);
38127- if (--item->refcount == 0) {
38128+ if (atomic_dec_and_test(&item->refcount)) {
38129 ref->release(ref);
38130 item->object = NULL;
38131 }
38132diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
38133index d4b20ce..77a8d41 100644
38134--- a/drivers/gpu/drm/drm_info.c
38135+++ b/drivers/gpu/drm/drm_info.c
38136@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
38137 struct drm_local_map *map;
38138 struct drm_map_list *r_list;
38139
38140- /* Hardcoded from _DRM_FRAME_BUFFER,
38141- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
38142- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
38143- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
38144+ static const char * const types[] = {
38145+ [_DRM_FRAME_BUFFER] = "FB",
38146+ [_DRM_REGISTERS] = "REG",
38147+ [_DRM_SHM] = "SHM",
38148+ [_DRM_AGP] = "AGP",
38149+ [_DRM_SCATTER_GATHER] = "SG",
38150+ [_DRM_CONSISTENT] = "PCI",
38151+ [_DRM_GEM] = "GEM" };
38152 const char *type;
38153 int i;
38154
38155@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
38156 map = r_list->map;
38157 if (!map)
38158 continue;
38159- if (map->type < 0 || map->type > 5)
38160+ if (map->type >= ARRAY_SIZE(types))
38161 type = "??";
38162 else
38163 type = types[map->type];
38164@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
38165 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
38166 vma->vm_flags & VM_LOCKED ? 'l' : '-',
38167 vma->vm_flags & VM_IO ? 'i' : '-',
38168+#ifdef CONFIG_GRKERNSEC_HIDESYM
38169+ 0);
38170+#else
38171 vma->vm_pgoff);
38172+#endif
38173
38174 #if defined(__i386__)
38175 pgprot = pgprot_val(vma->vm_page_prot);
38176diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
38177index 2f4c434..dd12cd2 100644
38178--- a/drivers/gpu/drm/drm_ioc32.c
38179+++ b/drivers/gpu/drm/drm_ioc32.c
38180@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
38181 request = compat_alloc_user_space(nbytes);
38182 if (!access_ok(VERIFY_WRITE, request, nbytes))
38183 return -EFAULT;
38184- list = (struct drm_buf_desc *) (request + 1);
38185+ list = (struct drm_buf_desc __user *) (request + 1);
38186
38187 if (__put_user(count, &request->count)
38188 || __put_user(list, &request->list))
38189@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
38190 request = compat_alloc_user_space(nbytes);
38191 if (!access_ok(VERIFY_WRITE, request, nbytes))
38192 return -EFAULT;
38193- list = (struct drm_buf_pub *) (request + 1);
38194+ list = (struct drm_buf_pub __user *) (request + 1);
38195
38196 if (__put_user(count, &request->count)
38197 || __put_user(list, &request->list))
38198@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
38199 return 0;
38200 }
38201
38202-drm_ioctl_compat_t *drm_compat_ioctls[] = {
38203+drm_ioctl_compat_t drm_compat_ioctls[] = {
38204 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
38205 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
38206 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
38207@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
38208 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38209 {
38210 unsigned int nr = DRM_IOCTL_NR(cmd);
38211- drm_ioctl_compat_t *fn;
38212 int ret;
38213
38214 /* Assume that ioctls without an explicit compat routine will just
38215@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38216 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
38217 return drm_ioctl(filp, cmd, arg);
38218
38219- fn = drm_compat_ioctls[nr];
38220-
38221- if (fn != NULL)
38222- ret = (*fn) (filp, cmd, arg);
38223+ if (drm_compat_ioctls[nr] != NULL)
38224+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
38225 else
38226 ret = drm_ioctl(filp, cmd, arg);
38227
38228diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
38229index e77bd8b..1571b85 100644
38230--- a/drivers/gpu/drm/drm_ioctl.c
38231+++ b/drivers/gpu/drm/drm_ioctl.c
38232@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
38233 stats->data[i].value =
38234 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
38235 else
38236- stats->data[i].value = atomic_read(&dev->counts[i]);
38237+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
38238 stats->data[i].type = dev->types[i];
38239 }
38240
38241diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
38242index d752c96..fe08455 100644
38243--- a/drivers/gpu/drm/drm_lock.c
38244+++ b/drivers/gpu/drm/drm_lock.c
38245@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38246 if (drm_lock_take(&master->lock, lock->context)) {
38247 master->lock.file_priv = file_priv;
38248 master->lock.lock_time = jiffies;
38249- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
38250+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
38251 break; /* Got lock */
38252 }
38253
38254@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38255 return -EINVAL;
38256 }
38257
38258- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
38259+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
38260
38261 if (drm_lock_free(&master->lock, lock->context)) {
38262 /* FIXME: Should really bail out here. */
38263diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
38264index 16f3ec5..b28f9ca 100644
38265--- a/drivers/gpu/drm/drm_stub.c
38266+++ b/drivers/gpu/drm/drm_stub.c
38267@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
38268
38269 drm_device_set_unplugged(dev);
38270
38271- if (dev->open_count == 0) {
38272+ if (local_read(&dev->open_count) == 0) {
38273 drm_put_dev(dev);
38274 }
38275 mutex_unlock(&drm_global_mutex);
38276diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
38277index 0229665..f61329c 100644
38278--- a/drivers/gpu/drm/drm_sysfs.c
38279+++ b/drivers/gpu/drm/drm_sysfs.c
38280@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
38281 int drm_sysfs_device_add(struct drm_minor *minor)
38282 {
38283 int err;
38284- char *minor_str;
38285+ const char *minor_str;
38286
38287 minor->kdev.parent = minor->dev->dev;
38288
38289diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
38290index 004ecdf..db1f6e0 100644
38291--- a/drivers/gpu/drm/i810/i810_dma.c
38292+++ b/drivers/gpu/drm/i810/i810_dma.c
38293@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
38294 dma->buflist[vertex->idx],
38295 vertex->discard, vertex->used);
38296
38297- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38298- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38299+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38300+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38301 sarea_priv->last_enqueue = dev_priv->counter - 1;
38302 sarea_priv->last_dispatch = (int)hw_status[5];
38303
38304@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
38305 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
38306 mc->last_render);
38307
38308- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38309- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38310+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38311+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38312 sarea_priv->last_enqueue = dev_priv->counter - 1;
38313 sarea_priv->last_dispatch = (int)hw_status[5];
38314
38315diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
38316index 6e0acad..93c8289 100644
38317--- a/drivers/gpu/drm/i810/i810_drv.h
38318+++ b/drivers/gpu/drm/i810/i810_drv.h
38319@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
38320 int page_flipping;
38321
38322 wait_queue_head_t irq_queue;
38323- atomic_t irq_received;
38324- atomic_t irq_emitted;
38325+ atomic_unchecked_t irq_received;
38326+ atomic_unchecked_t irq_emitted;
38327
38328 int front_offset;
38329 } drm_i810_private_t;
38330diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
38331index e913d32..4d9b351 100644
38332--- a/drivers/gpu/drm/i915/i915_debugfs.c
38333+++ b/drivers/gpu/drm/i915/i915_debugfs.c
38334@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
38335 I915_READ(GTIMR));
38336 }
38337 seq_printf(m, "Interrupts received: %d\n",
38338- atomic_read(&dev_priv->irq_received));
38339+ atomic_read_unchecked(&dev_priv->irq_received));
38340 for_each_ring(ring, dev_priv, i) {
38341 if (IS_GEN6(dev) || IS_GEN7(dev)) {
38342 seq_printf(m,
38343diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
38344index 17d9b0b..860e6d9 100644
38345--- a/drivers/gpu/drm/i915/i915_dma.c
38346+++ b/drivers/gpu/drm/i915/i915_dma.c
38347@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
38348 bool can_switch;
38349
38350 spin_lock(&dev->count_lock);
38351- can_switch = (dev->open_count == 0);
38352+ can_switch = (local_read(&dev->open_count) == 0);
38353 spin_unlock(&dev->count_lock);
38354 return can_switch;
38355 }
38356diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
38357index 47d8b68..52f5d8d 100644
38358--- a/drivers/gpu/drm/i915/i915_drv.h
38359+++ b/drivers/gpu/drm/i915/i915_drv.h
38360@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
38361 drm_dma_handle_t *status_page_dmah;
38362 struct resource mch_res;
38363
38364- atomic_t irq_received;
38365+ atomic_unchecked_t irq_received;
38366
38367 /* protects the irq masks */
38368 spinlock_t irq_lock;
38369@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
38370 struct drm_i915_private *dev_priv, unsigned port);
38371 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
38372 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
38373-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
38374+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
38375 {
38376 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
38377 }
38378diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38379index 117ce38..eefd237 100644
38380--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38381+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38382@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
38383
38384 static int
38385 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
38386- int count)
38387+ unsigned int count)
38388 {
38389- int i;
38390+ unsigned int i;
38391 int relocs_total = 0;
38392 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
38393
38394diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
38395index 3c59584..500f2e9 100644
38396--- a/drivers/gpu/drm/i915/i915_ioc32.c
38397+++ b/drivers/gpu/drm/i915/i915_ioc32.c
38398@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
38399 (unsigned long)request);
38400 }
38401
38402-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
38403+static drm_ioctl_compat_t i915_compat_ioctls[] = {
38404 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
38405 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
38406 [DRM_I915_GETPARAM] = compat_i915_getparam,
38407@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
38408 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38409 {
38410 unsigned int nr = DRM_IOCTL_NR(cmd);
38411- drm_ioctl_compat_t *fn = NULL;
38412 int ret;
38413
38414 if (nr < DRM_COMMAND_BASE)
38415 return drm_compat_ioctl(filp, cmd, arg);
38416
38417- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
38418- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
38419-
38420- if (fn != NULL)
38421+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
38422+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
38423 ret = (*fn) (filp, cmd, arg);
38424- else
38425+ } else
38426 ret = drm_ioctl(filp, cmd, arg);
38427
38428 return ret;
38429diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
38430index e5e32869..1678f36 100644
38431--- a/drivers/gpu/drm/i915/i915_irq.c
38432+++ b/drivers/gpu/drm/i915/i915_irq.c
38433@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
38434 int pipe;
38435 u32 pipe_stats[I915_MAX_PIPES];
38436
38437- atomic_inc(&dev_priv->irq_received);
38438+ atomic_inc_unchecked(&dev_priv->irq_received);
38439
38440 while (true) {
38441 iir = I915_READ(VLV_IIR);
38442@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
38443 irqreturn_t ret = IRQ_NONE;
38444 int i;
38445
38446- atomic_inc(&dev_priv->irq_received);
38447+ atomic_inc_unchecked(&dev_priv->irq_received);
38448
38449 /* disable master interrupt before clearing iir */
38450 de_ier = I915_READ(DEIER);
38451@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
38452 int ret = IRQ_NONE;
38453 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
38454
38455- atomic_inc(&dev_priv->irq_received);
38456+ atomic_inc_unchecked(&dev_priv->irq_received);
38457
38458 /* disable master interrupt before clearing iir */
38459 de_ier = I915_READ(DEIER);
38460@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
38461 {
38462 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38463
38464- atomic_set(&dev_priv->irq_received, 0);
38465+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38466
38467 I915_WRITE(HWSTAM, 0xeffe);
38468
38469@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
38470 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38471 int pipe;
38472
38473- atomic_set(&dev_priv->irq_received, 0);
38474+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38475
38476 /* VLV magic */
38477 I915_WRITE(VLV_IMR, 0);
38478@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
38479 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38480 int pipe;
38481
38482- atomic_set(&dev_priv->irq_received, 0);
38483+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38484
38485 for_each_pipe(pipe)
38486 I915_WRITE(PIPESTAT(pipe), 0);
38487@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
38488 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
38489 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38490
38491- atomic_inc(&dev_priv->irq_received);
38492+ atomic_inc_unchecked(&dev_priv->irq_received);
38493
38494 iir = I915_READ16(IIR);
38495 if (iir == 0)
38496@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
38497 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38498 int pipe;
38499
38500- atomic_set(&dev_priv->irq_received, 0);
38501+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38502
38503 if (I915_HAS_HOTPLUG(dev)) {
38504 I915_WRITE(PORT_HOTPLUG_EN, 0);
38505@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
38506 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38507 int pipe, ret = IRQ_NONE;
38508
38509- atomic_inc(&dev_priv->irq_received);
38510+ atomic_inc_unchecked(&dev_priv->irq_received);
38511
38512 iir = I915_READ(IIR);
38513 do {
38514@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
38515 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38516 int pipe;
38517
38518- atomic_set(&dev_priv->irq_received, 0);
38519+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38520
38521 I915_WRITE(PORT_HOTPLUG_EN, 0);
38522 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
38523@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
38524 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
38525 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38526
38527- atomic_inc(&dev_priv->irq_received);
38528+ atomic_inc_unchecked(&dev_priv->irq_received);
38529
38530 iir = I915_READ(IIR);
38531
38532diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
38533index eea5982..eeef407 100644
38534--- a/drivers/gpu/drm/i915/intel_display.c
38535+++ b/drivers/gpu/drm/i915/intel_display.c
38536@@ -8935,13 +8935,13 @@ struct intel_quirk {
38537 int subsystem_vendor;
38538 int subsystem_device;
38539 void (*hook)(struct drm_device *dev);
38540-};
38541+} __do_const;
38542
38543 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
38544 struct intel_dmi_quirk {
38545 void (*hook)(struct drm_device *dev);
38546 const struct dmi_system_id (*dmi_id_list)[];
38547-};
38548+} __do_const;
38549
38550 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
38551 {
38552@@ -8949,18 +8949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
38553 return 1;
38554 }
38555
38556-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
38557+static const struct dmi_system_id intel_dmi_quirks_table[] = {
38558 {
38559- .dmi_id_list = &(const struct dmi_system_id[]) {
38560- {
38561- .callback = intel_dmi_reverse_brightness,
38562- .ident = "NCR Corporation",
38563- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
38564- DMI_MATCH(DMI_PRODUCT_NAME, ""),
38565- },
38566- },
38567- { } /* terminating entry */
38568+ .callback = intel_dmi_reverse_brightness,
38569+ .ident = "NCR Corporation",
38570+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
38571+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
38572 },
38573+ },
38574+ { } /* terminating entry */
38575+};
38576+
38577+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
38578+ {
38579+ .dmi_id_list = &intel_dmi_quirks_table,
38580 .hook = quirk_invert_brightness,
38581 },
38582 };
38583diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
38584index 54558a0..2d97005 100644
38585--- a/drivers/gpu/drm/mga/mga_drv.h
38586+++ b/drivers/gpu/drm/mga/mga_drv.h
38587@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
38588 u32 clear_cmd;
38589 u32 maccess;
38590
38591- atomic_t vbl_received; /**< Number of vblanks received. */
38592+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
38593 wait_queue_head_t fence_queue;
38594- atomic_t last_fence_retired;
38595+ atomic_unchecked_t last_fence_retired;
38596 u32 next_fence_to_post;
38597
38598 unsigned int fb_cpp;
38599diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
38600index 709e90d..89a1c0d 100644
38601--- a/drivers/gpu/drm/mga/mga_ioc32.c
38602+++ b/drivers/gpu/drm/mga/mga_ioc32.c
38603@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
38604 return 0;
38605 }
38606
38607-drm_ioctl_compat_t *mga_compat_ioctls[] = {
38608+drm_ioctl_compat_t mga_compat_ioctls[] = {
38609 [DRM_MGA_INIT] = compat_mga_init,
38610 [DRM_MGA_GETPARAM] = compat_mga_getparam,
38611 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
38612@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
38613 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38614 {
38615 unsigned int nr = DRM_IOCTL_NR(cmd);
38616- drm_ioctl_compat_t *fn = NULL;
38617 int ret;
38618
38619 if (nr < DRM_COMMAND_BASE)
38620 return drm_compat_ioctl(filp, cmd, arg);
38621
38622- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
38623- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
38624-
38625- if (fn != NULL)
38626+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
38627+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
38628 ret = (*fn) (filp, cmd, arg);
38629- else
38630+ } else
38631 ret = drm_ioctl(filp, cmd, arg);
38632
38633 return ret;
38634diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
38635index 598c281..60d590e 100644
38636--- a/drivers/gpu/drm/mga/mga_irq.c
38637+++ b/drivers/gpu/drm/mga/mga_irq.c
38638@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
38639 if (crtc != 0)
38640 return 0;
38641
38642- return atomic_read(&dev_priv->vbl_received);
38643+ return atomic_read_unchecked(&dev_priv->vbl_received);
38644 }
38645
38646
38647@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
38648 /* VBLANK interrupt */
38649 if (status & MGA_VLINEPEN) {
38650 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
38651- atomic_inc(&dev_priv->vbl_received);
38652+ atomic_inc_unchecked(&dev_priv->vbl_received);
38653 drm_handle_vblank(dev, 0);
38654 handled = 1;
38655 }
38656@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
38657 if ((prim_start & ~0x03) != (prim_end & ~0x03))
38658 MGA_WRITE(MGA_PRIMEND, prim_end);
38659
38660- atomic_inc(&dev_priv->last_fence_retired);
38661+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
38662 DRM_WAKEUP(&dev_priv->fence_queue);
38663 handled = 1;
38664 }
38665@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
38666 * using fences.
38667 */
38668 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
38669- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
38670+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
38671 - *sequence) <= (1 << 23)));
38672
38673 *sequence = cur_fence;
38674diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
38675index 6aa2137..fe8dc55 100644
38676--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
38677+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
38678@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
38679 struct bit_table {
38680 const char id;
38681 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
38682-};
38683+} __no_const;
38684
38685 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
38686
38687diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
38688index f2b30f8..d0f9a95 100644
38689--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
38690+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
38691@@ -92,7 +92,7 @@ struct nouveau_drm {
38692 struct drm_global_reference mem_global_ref;
38693 struct ttm_bo_global_ref bo_global_ref;
38694 struct ttm_bo_device bdev;
38695- atomic_t validate_sequence;
38696+ atomic_unchecked_t validate_sequence;
38697 int (*move)(struct nouveau_channel *,
38698 struct ttm_buffer_object *,
38699 struct ttm_mem_reg *, struct ttm_mem_reg *);
38700diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
38701index b4b4d0c..b7edc15 100644
38702--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
38703+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
38704@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
38705 int ret, i;
38706 struct nouveau_bo *res_bo = NULL;
38707
38708- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
38709+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
38710 retry:
38711 if (++trycnt > 100000) {
38712 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
38713@@ -359,7 +359,7 @@ retry:
38714 if (ret) {
38715 validate_fini(op, NULL);
38716 if (unlikely(ret == -EAGAIN)) {
38717- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
38718+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
38719 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
38720 sequence);
38721 if (!ret)
38722diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38723index 08214bc..9208577 100644
38724--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38725+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38726@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
38727 unsigned long arg)
38728 {
38729 unsigned int nr = DRM_IOCTL_NR(cmd);
38730- drm_ioctl_compat_t *fn = NULL;
38731+ drm_ioctl_compat_t fn = NULL;
38732 int ret;
38733
38734 if (nr < DRM_COMMAND_BASE)
38735diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
38736index 25d3495..d81aaf6 100644
38737--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
38738+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
38739@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
38740 bool can_switch;
38741
38742 spin_lock(&dev->count_lock);
38743- can_switch = (dev->open_count == 0);
38744+ can_switch = (local_read(&dev->open_count) == 0);
38745 spin_unlock(&dev->count_lock);
38746 return can_switch;
38747 }
38748diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
38749index 489cb8c..0b8d0d3 100644
38750--- a/drivers/gpu/drm/qxl/qxl_ttm.c
38751+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
38752@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
38753 }
38754 }
38755
38756-static struct vm_operations_struct qxl_ttm_vm_ops;
38757+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
38758 static const struct vm_operations_struct *ttm_vm_ops;
38759
38760 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38761@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
38762 return r;
38763 if (unlikely(ttm_vm_ops == NULL)) {
38764 ttm_vm_ops = vma->vm_ops;
38765+ pax_open_kernel();
38766 qxl_ttm_vm_ops = *ttm_vm_ops;
38767 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
38768+ pax_close_kernel();
38769 }
38770 vma->vm_ops = &qxl_ttm_vm_ops;
38771 return 0;
38772@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
38773 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
38774 {
38775 #if defined(CONFIG_DEBUG_FS)
38776- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
38777- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
38778- unsigned i;
38779+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
38780+ {
38781+ .name = "qxl_mem_mm",
38782+ .show = &qxl_mm_dump_table,
38783+ },
38784+ {
38785+ .name = "qxl_surf_mm",
38786+ .show = &qxl_mm_dump_table,
38787+ }
38788+ };
38789
38790- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
38791- if (i == 0)
38792- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
38793- else
38794- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
38795- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
38796- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
38797- qxl_mem_types_list[i].driver_features = 0;
38798- if (i == 0)
38799- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38800- else
38801- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38802+ pax_open_kernel();
38803+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38804+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38805+ pax_close_kernel();
38806
38807- }
38808- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
38809+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
38810 #else
38811 return 0;
38812 #endif
38813diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
38814index d4660cf..70dbe65 100644
38815--- a/drivers/gpu/drm/r128/r128_cce.c
38816+++ b/drivers/gpu/drm/r128/r128_cce.c
38817@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
38818
38819 /* GH: Simple idle check.
38820 */
38821- atomic_set(&dev_priv->idle_count, 0);
38822+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38823
38824 /* We don't support anything other than bus-mastering ring mode,
38825 * but the ring can be in either AGP or PCI space for the ring
38826diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
38827index 930c71b..499aded 100644
38828--- a/drivers/gpu/drm/r128/r128_drv.h
38829+++ b/drivers/gpu/drm/r128/r128_drv.h
38830@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
38831 int is_pci;
38832 unsigned long cce_buffers_offset;
38833
38834- atomic_t idle_count;
38835+ atomic_unchecked_t idle_count;
38836
38837 int page_flipping;
38838 int current_page;
38839 u32 crtc_offset;
38840 u32 crtc_offset_cntl;
38841
38842- atomic_t vbl_received;
38843+ atomic_unchecked_t vbl_received;
38844
38845 u32 color_fmt;
38846 unsigned int front_offset;
38847diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
38848index a954c54..9cc595c 100644
38849--- a/drivers/gpu/drm/r128/r128_ioc32.c
38850+++ b/drivers/gpu/drm/r128/r128_ioc32.c
38851@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
38852 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
38853 }
38854
38855-drm_ioctl_compat_t *r128_compat_ioctls[] = {
38856+drm_ioctl_compat_t r128_compat_ioctls[] = {
38857 [DRM_R128_INIT] = compat_r128_init,
38858 [DRM_R128_DEPTH] = compat_r128_depth,
38859 [DRM_R128_STIPPLE] = compat_r128_stipple,
38860@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
38861 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38862 {
38863 unsigned int nr = DRM_IOCTL_NR(cmd);
38864- drm_ioctl_compat_t *fn = NULL;
38865 int ret;
38866
38867 if (nr < DRM_COMMAND_BASE)
38868 return drm_compat_ioctl(filp, cmd, arg);
38869
38870- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
38871- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38872-
38873- if (fn != NULL)
38874+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
38875+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38876 ret = (*fn) (filp, cmd, arg);
38877- else
38878+ } else
38879 ret = drm_ioctl(filp, cmd, arg);
38880
38881 return ret;
38882diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
38883index 2ea4f09..d391371 100644
38884--- a/drivers/gpu/drm/r128/r128_irq.c
38885+++ b/drivers/gpu/drm/r128/r128_irq.c
38886@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
38887 if (crtc != 0)
38888 return 0;
38889
38890- return atomic_read(&dev_priv->vbl_received);
38891+ return atomic_read_unchecked(&dev_priv->vbl_received);
38892 }
38893
38894 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38895@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38896 /* VBLANK interrupt */
38897 if (status & R128_CRTC_VBLANK_INT) {
38898 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
38899- atomic_inc(&dev_priv->vbl_received);
38900+ atomic_inc_unchecked(&dev_priv->vbl_received);
38901 drm_handle_vblank(dev, 0);
38902 return IRQ_HANDLED;
38903 }
38904diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
38905index 19bb7e6..de7e2a2 100644
38906--- a/drivers/gpu/drm/r128/r128_state.c
38907+++ b/drivers/gpu/drm/r128/r128_state.c
38908@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
38909
38910 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
38911 {
38912- if (atomic_read(&dev_priv->idle_count) == 0)
38913+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
38914 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
38915 else
38916- atomic_set(&dev_priv->idle_count, 0);
38917+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38918 }
38919
38920 #endif
38921diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
38922index 5a82b6b..9e69c73 100644
38923--- a/drivers/gpu/drm/radeon/mkregtable.c
38924+++ b/drivers/gpu/drm/radeon/mkregtable.c
38925@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
38926 regex_t mask_rex;
38927 regmatch_t match[4];
38928 char buf[1024];
38929- size_t end;
38930+ long end;
38931 int len;
38932 int done = 0;
38933 int r;
38934 unsigned o;
38935 struct offset *offset;
38936 char last_reg_s[10];
38937- int last_reg;
38938+ unsigned long last_reg;
38939
38940 if (regcomp
38941 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
38942diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
38943index b0dc0b6..a9bfe9c 100644
38944--- a/drivers/gpu/drm/radeon/radeon_device.c
38945+++ b/drivers/gpu/drm/radeon/radeon_device.c
38946@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
38947 bool can_switch;
38948
38949 spin_lock(&dev->count_lock);
38950- can_switch = (dev->open_count == 0);
38951+ can_switch = (local_read(&dev->open_count) == 0);
38952 spin_unlock(&dev->count_lock);
38953 return can_switch;
38954 }
38955diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
38956index b369d42..8dd04eb 100644
38957--- a/drivers/gpu/drm/radeon/radeon_drv.h
38958+++ b/drivers/gpu/drm/radeon/radeon_drv.h
38959@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
38960
38961 /* SW interrupt */
38962 wait_queue_head_t swi_queue;
38963- atomic_t swi_emitted;
38964+ atomic_unchecked_t swi_emitted;
38965 int vblank_crtc;
38966 uint32_t irq_enable_reg;
38967 uint32_t r500_disp_irq_reg;
38968diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
38969index c180df8..5fd8186 100644
38970--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
38971+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
38972@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38973 request = compat_alloc_user_space(sizeof(*request));
38974 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
38975 || __put_user(req32.param, &request->param)
38976- || __put_user((void __user *)(unsigned long)req32.value,
38977+ || __put_user((unsigned long)req32.value,
38978 &request->value))
38979 return -EFAULT;
38980
38981@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38982 #define compat_radeon_cp_setparam NULL
38983 #endif /* X86_64 || IA64 */
38984
38985-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38986+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
38987 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
38988 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
38989 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
38990@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38991 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38992 {
38993 unsigned int nr = DRM_IOCTL_NR(cmd);
38994- drm_ioctl_compat_t *fn = NULL;
38995 int ret;
38996
38997 if (nr < DRM_COMMAND_BASE)
38998 return drm_compat_ioctl(filp, cmd, arg);
38999
39000- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
39001- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39002-
39003- if (fn != NULL)
39004+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
39005+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39006 ret = (*fn) (filp, cmd, arg);
39007- else
39008+ } else
39009 ret = drm_ioctl(filp, cmd, arg);
39010
39011 return ret;
39012diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
39013index 8d68e97..9dcfed8 100644
39014--- a/drivers/gpu/drm/radeon/radeon_irq.c
39015+++ b/drivers/gpu/drm/radeon/radeon_irq.c
39016@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
39017 unsigned int ret;
39018 RING_LOCALS;
39019
39020- atomic_inc(&dev_priv->swi_emitted);
39021- ret = atomic_read(&dev_priv->swi_emitted);
39022+ atomic_inc_unchecked(&dev_priv->swi_emitted);
39023+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
39024
39025 BEGIN_RING(4);
39026 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
39027@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
39028 drm_radeon_private_t *dev_priv =
39029 (drm_radeon_private_t *) dev->dev_private;
39030
39031- atomic_set(&dev_priv->swi_emitted, 0);
39032+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
39033 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
39034
39035 dev->max_vblank_count = 0x001fffff;
39036diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
39037index 4d20910..6726b6d 100644
39038--- a/drivers/gpu/drm/radeon/radeon_state.c
39039+++ b/drivers/gpu/drm/radeon/radeon_state.c
39040@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
39041 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
39042 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
39043
39044- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39045+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39046 sarea_priv->nbox * sizeof(depth_boxes[0])))
39047 return -EFAULT;
39048
39049@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
39050 {
39051 drm_radeon_private_t *dev_priv = dev->dev_private;
39052 drm_radeon_getparam_t *param = data;
39053- int value;
39054+ int value = 0;
39055
39056 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
39057
39058diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
39059index 6c0ce89..57a2529 100644
39060--- a/drivers/gpu/drm/radeon/radeon_ttm.c
39061+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
39062@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
39063 man->size = size >> PAGE_SHIFT;
39064 }
39065
39066-static struct vm_operations_struct radeon_ttm_vm_ops;
39067+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
39068 static const struct vm_operations_struct *ttm_vm_ops = NULL;
39069
39070 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39071@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
39072 }
39073 if (unlikely(ttm_vm_ops == NULL)) {
39074 ttm_vm_ops = vma->vm_ops;
39075+ pax_open_kernel();
39076 radeon_ttm_vm_ops = *ttm_vm_ops;
39077 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
39078+ pax_close_kernel();
39079 }
39080 vma->vm_ops = &radeon_ttm_vm_ops;
39081 return 0;
39082@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
39083 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
39084 {
39085 #if defined(CONFIG_DEBUG_FS)
39086- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
39087- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
39088+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
39089+ {
39090+ .name = "radeon_vram_mm",
39091+ .show = &radeon_mm_dump_table,
39092+ },
39093+ {
39094+ .name = "radeon_gtt_mm",
39095+ .show = &radeon_mm_dump_table,
39096+ },
39097+ {
39098+ .name = "ttm_page_pool",
39099+ .show = &ttm_page_alloc_debugfs,
39100+ },
39101+ {
39102+ .name = "ttm_dma_page_pool",
39103+ .show = &ttm_dma_page_alloc_debugfs,
39104+ },
39105+ };
39106 unsigned i;
39107
39108- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
39109- if (i == 0)
39110- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
39111- else
39112- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
39113- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39114- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
39115- radeon_mem_types_list[i].driver_features = 0;
39116- if (i == 0)
39117- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39118- else
39119- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39120-
39121- }
39122- /* Add ttm page pool to debugfs */
39123- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
39124- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39125- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
39126- radeon_mem_types_list[i].driver_features = 0;
39127- radeon_mem_types_list[i++].data = NULL;
39128+ pax_open_kernel();
39129+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39130+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39131+ pax_close_kernel();
39132 #ifdef CONFIG_SWIOTLB
39133- if (swiotlb_nr_tbl()) {
39134- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
39135- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39136- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
39137- radeon_mem_types_list[i].driver_features = 0;
39138- radeon_mem_types_list[i++].data = NULL;
39139- }
39140+ if (swiotlb_nr_tbl())
39141+ i++;
39142 #endif
39143 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
39144
39145diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
39146index 55880d5..9e95342 100644
39147--- a/drivers/gpu/drm/radeon/rs690.c
39148+++ b/drivers/gpu/drm/radeon/rs690.c
39149@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
39150 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
39151 rdev->pm.sideport_bandwidth.full)
39152 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
39153- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
39154+ read_delay_latency.full = dfixed_const(800 * 1000);
39155 read_delay_latency.full = dfixed_div(read_delay_latency,
39156 rdev->pm.igp_sideport_mclk);
39157+ a.full = dfixed_const(370);
39158+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
39159 } else {
39160 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
39161 rdev->pm.k8_bandwidth.full)
39162diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
39163index dbc2def..0a9f710 100644
39164--- a/drivers/gpu/drm/ttm/ttm_memory.c
39165+++ b/drivers/gpu/drm/ttm/ttm_memory.c
39166@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
39167 zone->glob = glob;
39168 glob->zone_kernel = zone;
39169 ret = kobject_init_and_add(
39170- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39171+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39172 if (unlikely(ret != 0)) {
39173 kobject_put(&zone->kobj);
39174 return ret;
39175@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
39176 zone->glob = glob;
39177 glob->zone_dma32 = zone;
39178 ret = kobject_init_and_add(
39179- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39180+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39181 if (unlikely(ret != 0)) {
39182 kobject_put(&zone->kobj);
39183 return ret;
39184diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39185index bd2a3b4..122d9ad 100644
39186--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
39187+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39188@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
39189 static int ttm_pool_mm_shrink(struct shrinker *shrink,
39190 struct shrink_control *sc)
39191 {
39192- static atomic_t start_pool = ATOMIC_INIT(0);
39193+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
39194 unsigned i;
39195- unsigned pool_offset = atomic_add_return(1, &start_pool);
39196+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
39197 struct ttm_page_pool *pool;
39198 int shrink_pages = sc->nr_to_scan;
39199
39200diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
39201index dc0c065..58a0782 100644
39202--- a/drivers/gpu/drm/udl/udl_fb.c
39203+++ b/drivers/gpu/drm/udl/udl_fb.c
39204@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
39205 fb_deferred_io_cleanup(info);
39206 kfree(info->fbdefio);
39207 info->fbdefio = NULL;
39208- info->fbops->fb_mmap = udl_fb_mmap;
39209 }
39210
39211 pr_warn("released /dev/fb%d user=%d count=%d\n",
39212diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
39213index 893a650..6190d3b 100644
39214--- a/drivers/gpu/drm/via/via_drv.h
39215+++ b/drivers/gpu/drm/via/via_drv.h
39216@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
39217 typedef uint32_t maskarray_t[5];
39218
39219 typedef struct drm_via_irq {
39220- atomic_t irq_received;
39221+ atomic_unchecked_t irq_received;
39222 uint32_t pending_mask;
39223 uint32_t enable_mask;
39224 wait_queue_head_t irq_queue;
39225@@ -75,7 +75,7 @@ typedef struct drm_via_private {
39226 struct timeval last_vblank;
39227 int last_vblank_valid;
39228 unsigned usec_per_vblank;
39229- atomic_t vbl_received;
39230+ atomic_unchecked_t vbl_received;
39231 drm_via_state_t hc_state;
39232 char pci_buf[VIA_PCI_BUF_SIZE];
39233 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
39234diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
39235index ac98964..5dbf512 100644
39236--- a/drivers/gpu/drm/via/via_irq.c
39237+++ b/drivers/gpu/drm/via/via_irq.c
39238@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
39239 if (crtc != 0)
39240 return 0;
39241
39242- return atomic_read(&dev_priv->vbl_received);
39243+ return atomic_read_unchecked(&dev_priv->vbl_received);
39244 }
39245
39246 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39247@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39248
39249 status = VIA_READ(VIA_REG_INTERRUPT);
39250 if (status & VIA_IRQ_VBLANK_PENDING) {
39251- atomic_inc(&dev_priv->vbl_received);
39252- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
39253+ atomic_inc_unchecked(&dev_priv->vbl_received);
39254+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
39255 do_gettimeofday(&cur_vblank);
39256 if (dev_priv->last_vblank_valid) {
39257 dev_priv->usec_per_vblank =
39258@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39259 dev_priv->last_vblank = cur_vblank;
39260 dev_priv->last_vblank_valid = 1;
39261 }
39262- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
39263+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
39264 DRM_DEBUG("US per vblank is: %u\n",
39265 dev_priv->usec_per_vblank);
39266 }
39267@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39268
39269 for (i = 0; i < dev_priv->num_irqs; ++i) {
39270 if (status & cur_irq->pending_mask) {
39271- atomic_inc(&cur_irq->irq_received);
39272+ atomic_inc_unchecked(&cur_irq->irq_received);
39273 DRM_WAKEUP(&cur_irq->irq_queue);
39274 handled = 1;
39275 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
39276@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
39277 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39278 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
39279 masks[irq][4]));
39280- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
39281+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
39282 } else {
39283 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39284 (((cur_irq_sequence =
39285- atomic_read(&cur_irq->irq_received)) -
39286+ atomic_read_unchecked(&cur_irq->irq_received)) -
39287 *sequence) <= (1 << 23)));
39288 }
39289 *sequence = cur_irq_sequence;
39290@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
39291 }
39292
39293 for (i = 0; i < dev_priv->num_irqs; ++i) {
39294- atomic_set(&cur_irq->irq_received, 0);
39295+ atomic_set_unchecked(&cur_irq->irq_received, 0);
39296 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
39297 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
39298 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
39299@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
39300 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
39301 case VIA_IRQ_RELATIVE:
39302 irqwait->request.sequence +=
39303- atomic_read(&cur_irq->irq_received);
39304+ atomic_read_unchecked(&cur_irq->irq_received);
39305 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
39306 case VIA_IRQ_ABSOLUTE:
39307 break;
39308diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39309index 13aeda7..4a952d1 100644
39310--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39311+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39312@@ -290,7 +290,7 @@ struct vmw_private {
39313 * Fencing and IRQs.
39314 */
39315
39316- atomic_t marker_seq;
39317+ atomic_unchecked_t marker_seq;
39318 wait_queue_head_t fence_queue;
39319 wait_queue_head_t fifo_queue;
39320 int fence_queue_waiters; /* Protected by hw_mutex */
39321diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39322index 3eb1486..0a47ee9 100644
39323--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39324+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39325@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
39326 (unsigned int) min,
39327 (unsigned int) fifo->capabilities);
39328
39329- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
39330+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
39331 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
39332 vmw_marker_queue_init(&fifo->marker_queue);
39333 return vmw_fifo_send_fence(dev_priv, &dummy);
39334@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
39335 if (reserveable)
39336 iowrite32(bytes, fifo_mem +
39337 SVGA_FIFO_RESERVED);
39338- return fifo_mem + (next_cmd >> 2);
39339+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
39340 } else {
39341 need_bounce = true;
39342 }
39343@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
39344
39345 fm = vmw_fifo_reserve(dev_priv, bytes);
39346 if (unlikely(fm == NULL)) {
39347- *seqno = atomic_read(&dev_priv->marker_seq);
39348+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
39349 ret = -ENOMEM;
39350 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
39351 false, 3*HZ);
39352@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
39353 }
39354
39355 do {
39356- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
39357+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
39358 } while (*seqno == 0);
39359
39360 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
39361diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39362index c509d40..3b640c3 100644
39363--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39364+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39365@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
39366 int ret;
39367
39368 num_clips = arg->num_clips;
39369- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
39370+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
39371
39372 if (unlikely(num_clips == 0))
39373 return 0;
39374@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
39375 int ret;
39376
39377 num_clips = arg->num_clips;
39378- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
39379+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
39380
39381 if (unlikely(num_clips == 0))
39382 return 0;
39383diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39384index 4640adb..e1384ed 100644
39385--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39386+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39387@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
39388 * emitted. Then the fence is stale and signaled.
39389 */
39390
39391- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
39392+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
39393 > VMW_FENCE_WRAP);
39394
39395 return ret;
39396@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
39397
39398 if (fifo_idle)
39399 down_read(&fifo_state->rwsem);
39400- signal_seq = atomic_read(&dev_priv->marker_seq);
39401+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
39402 ret = 0;
39403
39404 for (;;) {
39405diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39406index 8a8725c2..afed796 100644
39407--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39408+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39409@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
39410 while (!vmw_lag_lt(queue, us)) {
39411 spin_lock(&queue->lock);
39412 if (list_empty(&queue->head))
39413- seqno = atomic_read(&dev_priv->marker_seq);
39414+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
39415 else {
39416 marker = list_first_entry(&queue->head,
39417 struct vmw_marker, head);
39418diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
39419index 8c04943..4370ed9 100644
39420--- a/drivers/gpu/host1x/drm/dc.c
39421+++ b/drivers/gpu/host1x/drm/dc.c
39422@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
39423 }
39424
39425 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
39426- dc->debugfs_files[i].data = dc;
39427+ *(void **)&dc->debugfs_files[i].data = dc;
39428
39429 err = drm_debugfs_create_files(dc->debugfs_files,
39430 ARRAY_SIZE(debugfs_files),
39431diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
39432index 402f486..5340852 100644
39433--- a/drivers/hid/hid-core.c
39434+++ b/drivers/hid/hid-core.c
39435@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
39436 struct hid_report_enum *report_enum = device->report_enum + type;
39437 struct hid_report *report;
39438
39439+ if (id >= HID_MAX_IDS)
39440+ return NULL;
39441 if (report_enum->report_id_hash[id])
39442 return report_enum->report_id_hash[id];
39443
39444@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
39445
39446 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
39447 parser->global.report_id = item_udata(item);
39448- if (parser->global.report_id == 0) {
39449- hid_err(parser->device, "report_id 0 is invalid\n");
39450+ if (parser->global.report_id == 0 ||
39451+ parser->global.report_id >= HID_MAX_IDS) {
39452+ hid_err(parser->device, "report_id %u is invalid\n",
39453+ parser->global.report_id);
39454 return -1;
39455 }
39456 return 0;
39457@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
39458 for (i = 0; i < HID_REPORT_TYPES; i++) {
39459 struct hid_report_enum *report_enum = device->report_enum + i;
39460
39461- for (j = 0; j < 256; j++) {
39462+ for (j = 0; j < HID_MAX_IDS; j++) {
39463 struct hid_report *report = report_enum->report_id_hash[j];
39464 if (report)
39465 hid_free_report(report);
39466@@ -755,6 +759,56 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
39467 }
39468 EXPORT_SYMBOL_GPL(hid_parse_report);
39469
39470+static const char * const hid_report_names[] = {
39471+ "HID_INPUT_REPORT",
39472+ "HID_OUTPUT_REPORT",
39473+ "HID_FEATURE_REPORT",
39474+};
39475+/**
39476+ * hid_validate_report - validate existing device report
39477+ *
39478+ * @device: hid device
39479+ * @type: which report type to examine
39480+ * @id: which report ID to examine (0 for first)
39481+ * @fields: expected number of fields
39482+ * @report_counts: expected number of values per field
39483+ *
39484+ * Validate the report details after parsing.
39485+ */
39486+struct hid_report *hid_validate_report(struct hid_device *hid,
39487+ unsigned int type, unsigned int id,
39488+ unsigned int fields,
39489+ unsigned int report_counts)
39490+{
39491+ struct hid_report *report;
39492+ unsigned int i;
39493+
39494+ if (type > HID_FEATURE_REPORT) {
39495+ hid_err(hid, "invalid HID report %u\n", type);
39496+ return NULL;
39497+ }
39498+
39499+ report = hid->report_enum[type].report_id_hash[id];
39500+ if (!report) {
39501+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
39502+ return NULL;
39503+ }
39504+ if (report->maxfield < fields) {
39505+ hid_err(hid, "not enough fields in %s %u\n",
39506+ hid_report_names[type], id);
39507+ return NULL;
39508+ }
39509+ for (i = 0; i < fields; i++) {
39510+ if (report->field[i]->report_count < report_counts) {
39511+ hid_err(hid, "not enough values in %s %u fields\n",
39512+ hid_report_names[type], id);
39513+ return NULL;
39514+ }
39515+ }
39516+ return report;
39517+}
39518+EXPORT_SYMBOL_GPL(hid_validate_report);
39519+
39520 /**
39521 * hid_open_report - open a driver-specific device report
39522 *
39523@@ -1152,7 +1206,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
39524
39525 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
39526 {
39527- unsigned size = field->report_size;
39528+ unsigned size;
39529+
39530+ if (!field)
39531+ return -1;
39532+
39533+ size = field->report_size;
39534
39535 hid_dump_input(field->report->device, field->usage + offset, value);
39536
39537@@ -2275,7 +2334,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
39538
39539 int hid_add_device(struct hid_device *hdev)
39540 {
39541- static atomic_t id = ATOMIC_INIT(0);
39542+ static atomic_unchecked_t id = ATOMIC_INIT(0);
39543 int ret;
39544
39545 if (WARN_ON(hdev->status & HID_STAT_ADDED))
39546@@ -2309,7 +2368,7 @@ int hid_add_device(struct hid_device *hdev)
39547 /* XXX hack, any other cleaner solution after the driver core
39548 * is converted to allow more than 20 bytes as the device name? */
39549 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
39550- hdev->vendor, hdev->product, atomic_inc_return(&id));
39551+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
39552
39553 hid_debug_register(hdev, dev_name(&hdev->dev));
39554 ret = device_add(&hdev->dev);
39555diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
39556index 07837f5..b697ada 100644
39557--- a/drivers/hid/hid-lenovo-tpkbd.c
39558+++ b/drivers/hid/hid-lenovo-tpkbd.c
39559@@ -341,6 +341,11 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
39560 char *name_mute, *name_micmute;
39561 int ret;
39562
39563+ /* Validate required reports. */
39564+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 4, 4, 1) ||
39565+ !hid_validate_report(hdev, HID_OUTPUT_REPORT, 3, 1, 2))
39566+ return -ENODEV;
39567+
39568 if (sysfs_create_group(&hdev->dev.kobj,
39569 &tpkbd_attr_group_pointer)) {
39570 hid_warn(hdev, "Could not create sysfs group\n");
39571diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
39572index b3cd150..9805197 100644
39573--- a/drivers/hid/hid-lg2ff.c
39574+++ b/drivers/hid/hid-lg2ff.c
39575@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
39576 struct hid_report *report;
39577 struct hid_input *hidinput = list_entry(hid->inputs.next,
39578 struct hid_input, list);
39579- struct list_head *report_list =
39580- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39581 struct input_dev *dev = hidinput->input;
39582 int error;
39583
39584- if (list_empty(report_list)) {
39585- hid_err(hid, "no output report found\n");
39586+ /* Check that the report looks ok */
39587+ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7);
39588+ if (!report)
39589 return -ENODEV;
39590- }
39591-
39592- report = list_entry(report_list->next, struct hid_report, list);
39593-
39594- if (report->maxfield < 1) {
39595- hid_err(hid, "output report is empty\n");
39596- return -ENODEV;
39597- }
39598- if (report->field[0]->report_count < 7) {
39599- hid_err(hid, "not enough values in the field\n");
39600- return -ENODEV;
39601- }
39602
39603 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
39604 if (!lg2ff)
39605diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
39606index e52f181..53ac79b 100644
39607--- a/drivers/hid/hid-lg3ff.c
39608+++ b/drivers/hid/hid-lg3ff.c
39609@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
39610 int x, y;
39611
39612 /*
39613- * Maxusage should always be 63 (maximum fields)
39614- * likely a better way to ensure this data is clean
39615+ * Available values in the field should always be 63, but we only use up to
39616+ * 35. Instead, clear the entire area, however big it is.
39617 */
39618- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
39619+ memset(report->field[0]->value, 0,
39620+ sizeof(__s32) * report->field[0]->report_count);
39621
39622 switch (effect->type) {
39623 case FF_CONSTANT:
39624@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
39625 int lg3ff_init(struct hid_device *hid)
39626 {
39627 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39628- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39629 struct input_dev *dev = hidinput->input;
39630- struct hid_report *report;
39631- struct hid_field *field;
39632 const signed short *ff_bits = ff3_joystick_ac;
39633 int error;
39634 int i;
39635
39636- /* Find the report to use */
39637- if (list_empty(report_list)) {
39638- hid_err(hid, "No output report found\n");
39639- return -1;
39640- }
39641-
39642 /* Check that the report looks ok */
39643- report = list_entry(report_list->next, struct hid_report, list);
39644- if (!report) {
39645- hid_err(hid, "NULL output report\n");
39646- return -1;
39647- }
39648-
39649- field = report->field[0];
39650- if (!field) {
39651- hid_err(hid, "NULL field\n");
39652- return -1;
39653- }
39654+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 35))
39655+ return -ENODEV;
39656
39657 /* Assume single fixed device G940 */
39658 for (i = 0; ff_bits[i] >= 0; i++)
39659diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
39660index 0ddae2a..8b89f0f 100644
39661--- a/drivers/hid/hid-lg4ff.c
39662+++ b/drivers/hid/hid-lg4ff.c
39663@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
39664 int lg4ff_init(struct hid_device *hid)
39665 {
39666 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39667- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39668 struct input_dev *dev = hidinput->input;
39669- struct hid_report *report;
39670- struct hid_field *field;
39671 struct lg4ff_device_entry *entry;
39672 struct lg_drv_data *drv_data;
39673 struct usb_device_descriptor *udesc;
39674 int error, i, j;
39675 __u16 bcdDevice, rev_maj, rev_min;
39676
39677- /* Find the report to use */
39678- if (list_empty(report_list)) {
39679- hid_err(hid, "No output report found\n");
39680- return -1;
39681- }
39682-
39683 /* Check that the report looks ok */
39684- report = list_entry(report_list->next, struct hid_report, list);
39685- if (!report) {
39686- hid_err(hid, "NULL output report\n");
39687+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
39688 return -1;
39689- }
39690-
39691- field = report->field[0];
39692- if (!field) {
39693- hid_err(hid, "NULL field\n");
39694- return -1;
39695- }
39696
39697 /* Check what wheel has been connected */
39698 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
39699diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
39700index d7ea8c8..a84fb40 100644
39701--- a/drivers/hid/hid-lgff.c
39702+++ b/drivers/hid/hid-lgff.c
39703@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
39704 int lgff_init(struct hid_device* hid)
39705 {
39706 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39707- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39708 struct input_dev *dev = hidinput->input;
39709- struct hid_report *report;
39710- struct hid_field *field;
39711 const signed short *ff_bits = ff_joystick;
39712 int error;
39713 int i;
39714
39715- /* Find the report to use */
39716- if (list_empty(report_list)) {
39717- hid_err(hid, "No output report found\n");
39718- return -1;
39719- }
39720-
39721 /* Check that the report looks ok */
39722- report = list_entry(report_list->next, struct hid_report, list);
39723- field = report->field[0];
39724- if (!field) {
39725- hid_err(hid, "NULL field\n");
39726- return -1;
39727- }
39728+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
39729+ return -ENODEV;
39730
39731 for (i = 0; i < ARRAY_SIZE(devices); i++) {
39732 if (dev->id.vendor == devices[i].idVendor &&
39733diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
39734index 5207591a..6c9197f 100644
39735--- a/drivers/hid/hid-logitech-dj.c
39736+++ b/drivers/hid/hid-logitech-dj.c
39737@@ -421,7 +421,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
39738 struct hid_report *report;
39739 struct hid_report_enum *output_report_enum;
39740 u8 *data = (u8 *)(&dj_report->device_index);
39741- int i;
39742+ unsigned int i, length;
39743
39744 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
39745 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
39746@@ -431,7 +431,9 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
39747 return -ENODEV;
39748 }
39749
39750- for (i = 0; i < report->field[0]->report_count; i++)
39751+ length = min_t(size_t, sizeof(*dj_report) - 1,
39752+ report->field[0]->report_count);
39753+ for (i = 0; i < length; i++)
39754 report->field[0]->value[i] = data[i];
39755
39756 hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
39757@@ -738,6 +740,12 @@ static int logi_dj_probe(struct hid_device *hdev,
39758 goto hid_parse_fail;
39759 }
39760
39761+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
39762+ 1, 3)) {
39763+ retval = -ENODEV;
39764+ goto hid_parse_fail;
39765+ }
39766+
39767 /* Starts the usb device and connects to upper interfaces hiddev and
39768 * hidraw */
39769 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
39770diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
39771index d39a5ce..4892dfc 100644
39772--- a/drivers/hid/hid-multitouch.c
39773+++ b/drivers/hid/hid-multitouch.c
39774@@ -330,9 +330,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
39775 break;
39776 }
39777 }
39778+ /* Ignore if value index is out of bounds. */
39779+ if (td->inputmode_index < 0 ||
39780+ td->inputmode_index >= field->report_count) {
39781+ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
39782+ td->inputmode = -1;
39783+ }
39784
39785 break;
39786 case HID_DG_CONTACTMAX:
39787+ /* Ignore if value count is out of bounds. */
39788+ if (field->report_count < 1)
39789+ break;
39790 td->maxcontact_report_id = field->report->id;
39791 td->maxcontacts = field->value[0];
39792 if (!td->maxcontacts &&
39793@@ -743,15 +752,21 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
39794 unsigned count;
39795 int r, n;
39796
39797+ if (report->maxfield == 0)
39798+ return;
39799+
39800 /*
39801 * Includes multi-packet support where subsequent
39802 * packets are sent with zero contactcount.
39803 */
39804- if (td->cc_index >= 0) {
39805- struct hid_field *field = report->field[td->cc_index];
39806- int value = field->value[td->cc_value_index];
39807- if (value)
39808- td->num_expected = value;
39809+ if (td->cc_index >= 0 && td->cc_index < report->maxfield) {
39810+ field = report->field[td->cc_index];
39811+ if (td->cc_value_index >= 0 &&
39812+ td->cc_value_index < field->report_count) {
39813+ int value = field->value[td->cc_value_index];
39814+ if (value)
39815+ td->num_expected = value;
39816+ }
39817 }
39818
39819 for (r = 0; r < report->maxfield; r++) {
39820diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
39821index ef95102..5482156 100644
39822--- a/drivers/hid/hid-ntrig.c
39823+++ b/drivers/hid/hid-ntrig.c
39824@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
39825 struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
39826 report_id_hash[0x0d];
39827
39828- if (!report)
39829+ if (!report || report->maxfield < 1 ||
39830+ report->field[0]->report_count < 1)
39831 return -EINVAL;
39832
39833 hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
39834diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
39835index b48092d..72bba1e 100644
39836--- a/drivers/hid/hid-picolcd_core.c
39837+++ b/drivers/hid/hid-picolcd_core.c
39838@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
39839 buf += 10;
39840 cnt -= 10;
39841 }
39842- if (!report)
39843+ if (!report || report->maxfield < 1)
39844 return -EINVAL;
39845
39846 while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
39847diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
39848index d29112f..2dcd7d9 100644
39849--- a/drivers/hid/hid-pl.c
39850+++ b/drivers/hid/hid-pl.c
39851@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
39852 strong = &report->field[0]->value[2];
39853 weak = &report->field[0]->value[3];
39854 debug("detected single-field device");
39855- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
39856- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
39857+ } else if (report->field[0]->maxusage == 1 &&
39858+ report->field[0]->usage[0].hid ==
39859+ (HID_UP_LED | 0x43) &&
39860+ report->maxfield >= 4 &&
39861+ report->field[0]->report_count >= 1 &&
39862+ report->field[1]->report_count >= 1 &&
39863+ report->field[2]->report_count >= 1 &&
39864+ report->field[3]->report_count >= 1) {
39865 report->field[0]->value[0] = 0x00;
39866 report->field[1]->value[0] = 0x00;
39867 strong = &report->field[2]->value[0];
39868diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
39869index ca749810..aa34755 100644
39870--- a/drivers/hid/hid-sensor-hub.c
39871+++ b/drivers/hid/hid-sensor-hub.c
39872@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
39873
39874 mutex_lock(&data->mutex);
39875 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
39876- if (!report || (field_index >= report->maxfield)) {
39877+ if (!report || (field_index >= report->maxfield) ||
39878+ report->field[field_index]->report_count < 1) {
39879 ret = -EINVAL;
39880 goto done_proc;
39881 }
39882diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
39883index d164911..ef42e86 100644
39884--- a/drivers/hid/hid-steelseries.c
39885+++ b/drivers/hid/hid-steelseries.c
39886@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
39887 goto err_free;
39888 }
39889
39890+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 0, 1, 16)) {
39891+ ret = -ENODEV;
39892+ goto err_free;
39893+ }
39894+
39895 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
39896 if (ret) {
39897 hid_err(hdev, "hw start failed\n");
39898diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
39899index 90124ff..3761764 100644
39900--- a/drivers/hid/hid-wiimote-debug.c
39901+++ b/drivers/hid/hid-wiimote-debug.c
39902@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
39903 else if (size == 0)
39904 return -EIO;
39905
39906- if (copy_to_user(u, buf, size))
39907+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
39908 return -EFAULT;
39909
39910 *off += size;
39911diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
39912index 6ec28a3..b124991 100644
39913--- a/drivers/hid/hid-zpff.c
39914+++ b/drivers/hid/hid-zpff.c
39915@@ -68,22 +68,12 @@ static int zpff_init(struct hid_device *hid)
39916 struct hid_report *report;
39917 struct hid_input *hidinput = list_entry(hid->inputs.next,
39918 struct hid_input, list);
39919- struct list_head *report_list =
39920- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39921 struct input_dev *dev = hidinput->input;
39922 int error;
39923
39924- if (list_empty(report_list)) {
39925- hid_err(hid, "no output report found\n");
39926+ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 4, 1);
39927+ if (!report)
39928 return -ENODEV;
39929- }
39930-
39931- report = list_entry(report_list->next, struct hid_report, list);
39932-
39933- if (report->maxfield < 4) {
39934- hid_err(hid, "not enough fields in report\n");
39935- return -ENODEV;
39936- }
39937
39938 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
39939 if (!zpff)
39940diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
39941index fc307e0..2b255e8 100644
39942--- a/drivers/hid/uhid.c
39943+++ b/drivers/hid/uhid.c
39944@@ -47,7 +47,7 @@ struct uhid_device {
39945 struct mutex report_lock;
39946 wait_queue_head_t report_wait;
39947 atomic_t report_done;
39948- atomic_t report_id;
39949+ atomic_unchecked_t report_id;
39950 struct uhid_event report_buf;
39951 };
39952
39953@@ -187,7 +187,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
39954
39955 spin_lock_irqsave(&uhid->qlock, flags);
39956 ev->type = UHID_FEATURE;
39957- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
39958+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
39959 ev->u.feature.rnum = rnum;
39960 ev->u.feature.rtype = report_type;
39961
39962@@ -471,7 +471,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
39963 spin_lock_irqsave(&uhid->qlock, flags);
39964
39965 /* id for old report; drop it silently */
39966- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
39967+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
39968 goto unlock;
39969 if (atomic_read(&uhid->report_done))
39970 goto unlock;
39971diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
39972index 0b122f8..b1d8160 100644
39973--- a/drivers/hv/channel.c
39974+++ b/drivers/hv/channel.c
39975@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
39976 int ret = 0;
39977 int t;
39978
39979- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
39980- atomic_inc(&vmbus_connection.next_gpadl_handle);
39981+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
39982+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
39983
39984 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
39985 if (ret)
39986diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
39987index ae49237..380d4c9 100644
39988--- a/drivers/hv/hv.c
39989+++ b/drivers/hv/hv.c
39990@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
39991 u64 output_address = (output) ? virt_to_phys(output) : 0;
39992 u32 output_address_hi = output_address >> 32;
39993 u32 output_address_lo = output_address & 0xFFFFFFFF;
39994- void *hypercall_page = hv_context.hypercall_page;
39995+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
39996
39997 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
39998 "=a"(hv_status_lo) : "d" (control_hi),
39999diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
40000index 12f2f9e..679603c 100644
40001--- a/drivers/hv/hyperv_vmbus.h
40002+++ b/drivers/hv/hyperv_vmbus.h
40003@@ -591,7 +591,7 @@ enum vmbus_connect_state {
40004 struct vmbus_connection {
40005 enum vmbus_connect_state conn_state;
40006
40007- atomic_t next_gpadl_handle;
40008+ atomic_unchecked_t next_gpadl_handle;
40009
40010 /*
40011 * Represents channel interrupts. Each bit position represents a
40012diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40013index 4004e54..c2de226 100644
40014--- a/drivers/hv/vmbus_drv.c
40015+++ b/drivers/hv/vmbus_drv.c
40016@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40017 {
40018 int ret = 0;
40019
40020- static atomic_t device_num = ATOMIC_INIT(0);
40021+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40022
40023 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40024- atomic_inc_return(&device_num));
40025+ atomic_inc_return_unchecked(&device_num));
40026
40027 child_device_obj->device.bus = &hv_bus;
40028 child_device_obj->device.parent = &hv_acpi_dev->dev;
40029diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
40030index 6351aba..dc4aaf4 100644
40031--- a/drivers/hwmon/acpi_power_meter.c
40032+++ b/drivers/hwmon/acpi_power_meter.c
40033@@ -117,7 +117,7 @@ struct sensor_template {
40034 struct device_attribute *devattr,
40035 const char *buf, size_t count);
40036 int index;
40037-};
40038+} __do_const;
40039
40040 /* Averaging interval */
40041 static int update_avg_interval(struct acpi_power_meter_resource *resource)
40042@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
40043 struct sensor_template *attrs)
40044 {
40045 struct device *dev = &resource->acpi_dev->dev;
40046- struct sensor_device_attribute *sensors =
40047+ sensor_device_attribute_no_const *sensors =
40048 &resource->sensors[resource->num_sensors];
40049 int res = 0;
40050
40051diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
40052index 62c2e32..8f2859a 100644
40053--- a/drivers/hwmon/applesmc.c
40054+++ b/drivers/hwmon/applesmc.c
40055@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
40056 {
40057 struct applesmc_node_group *grp;
40058 struct applesmc_dev_attr *node;
40059- struct attribute *attr;
40060+ attribute_no_const *attr;
40061 int ret, i;
40062
40063 for (grp = groups; grp->format; grp++) {
40064diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
40065index b25c643..a13460d 100644
40066--- a/drivers/hwmon/asus_atk0110.c
40067+++ b/drivers/hwmon/asus_atk0110.c
40068@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
40069 struct atk_sensor_data {
40070 struct list_head list;
40071 struct atk_data *data;
40072- struct device_attribute label_attr;
40073- struct device_attribute input_attr;
40074- struct device_attribute limit1_attr;
40075- struct device_attribute limit2_attr;
40076+ device_attribute_no_const label_attr;
40077+ device_attribute_no_const input_attr;
40078+ device_attribute_no_const limit1_attr;
40079+ device_attribute_no_const limit2_attr;
40080 char label_attr_name[ATTR_NAME_SIZE];
40081 char input_attr_name[ATTR_NAME_SIZE];
40082 char limit1_attr_name[ATTR_NAME_SIZE];
40083@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
40084 static struct device_attribute atk_name_attr =
40085 __ATTR(name, 0444, atk_name_show, NULL);
40086
40087-static void atk_init_attribute(struct device_attribute *attr, char *name,
40088+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
40089 sysfs_show_func show)
40090 {
40091 sysfs_attr_init(&attr->attr);
40092diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
40093index 658ce3a..0d0c2f3 100644
40094--- a/drivers/hwmon/coretemp.c
40095+++ b/drivers/hwmon/coretemp.c
40096@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
40097 return NOTIFY_OK;
40098 }
40099
40100-static struct notifier_block coretemp_cpu_notifier __refdata = {
40101+static struct notifier_block coretemp_cpu_notifier = {
40102 .notifier_call = coretemp_cpu_callback,
40103 };
40104
40105diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
40106index 1429f6e..ee03d59 100644
40107--- a/drivers/hwmon/ibmaem.c
40108+++ b/drivers/hwmon/ibmaem.c
40109@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
40110 struct aem_rw_sensor_template *rw)
40111 {
40112 struct device *dev = &data->pdev->dev;
40113- struct sensor_device_attribute *sensors = data->sensors;
40114+ sensor_device_attribute_no_const *sensors = data->sensors;
40115 int err;
40116
40117 /* Set up read-only sensors */
40118diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
40119index 52b77af..aed1ddf 100644
40120--- a/drivers/hwmon/iio_hwmon.c
40121+++ b/drivers/hwmon/iio_hwmon.c
40122@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
40123 {
40124 struct device *dev = &pdev->dev;
40125 struct iio_hwmon_state *st;
40126- struct sensor_device_attribute *a;
40127+ sensor_device_attribute_no_const *a;
40128 int ret, i;
40129 int in_i = 1, temp_i = 1, curr_i = 1;
40130 enum iio_chan_type type;
40131diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
40132index 9add6092..ee7ba3f 100644
40133--- a/drivers/hwmon/pmbus/pmbus_core.c
40134+++ b/drivers/hwmon/pmbus/pmbus_core.c
40135@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
40136 return 0;
40137 }
40138
40139-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40140+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
40141 const char *name,
40142 umode_t mode,
40143 ssize_t (*show)(struct device *dev,
40144@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40145 dev_attr->store = store;
40146 }
40147
40148-static void pmbus_attr_init(struct sensor_device_attribute *a,
40149+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
40150 const char *name,
40151 umode_t mode,
40152 ssize_t (*show)(struct device *dev,
40153@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
40154 u16 reg, u8 mask)
40155 {
40156 struct pmbus_boolean *boolean;
40157- struct sensor_device_attribute *a;
40158+ sensor_device_attribute_no_const *a;
40159
40160 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
40161 if (!boolean)
40162@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
40163 bool update, bool readonly)
40164 {
40165 struct pmbus_sensor *sensor;
40166- struct device_attribute *a;
40167+ device_attribute_no_const *a;
40168
40169 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
40170 if (!sensor)
40171@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
40172 const char *lstring, int index)
40173 {
40174 struct pmbus_label *label;
40175- struct device_attribute *a;
40176+ device_attribute_no_const *a;
40177
40178 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
40179 if (!label)
40180diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
40181index 2507f90..1645765 100644
40182--- a/drivers/hwmon/sht15.c
40183+++ b/drivers/hwmon/sht15.c
40184@@ -169,7 +169,7 @@ struct sht15_data {
40185 int supply_uv;
40186 bool supply_uv_valid;
40187 struct work_struct update_supply_work;
40188- atomic_t interrupt_handled;
40189+ atomic_unchecked_t interrupt_handled;
40190 };
40191
40192 /**
40193@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
40194 ret = gpio_direction_input(data->pdata->gpio_data);
40195 if (ret)
40196 return ret;
40197- atomic_set(&data->interrupt_handled, 0);
40198+ atomic_set_unchecked(&data->interrupt_handled, 0);
40199
40200 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40201 if (gpio_get_value(data->pdata->gpio_data) == 0) {
40202 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
40203 /* Only relevant if the interrupt hasn't occurred. */
40204- if (!atomic_read(&data->interrupt_handled))
40205+ if (!atomic_read_unchecked(&data->interrupt_handled))
40206 schedule_work(&data->read_work);
40207 }
40208 ret = wait_event_timeout(data->wait_queue,
40209@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
40210
40211 /* First disable the interrupt */
40212 disable_irq_nosync(irq);
40213- atomic_inc(&data->interrupt_handled);
40214+ atomic_inc_unchecked(&data->interrupt_handled);
40215 /* Then schedule a reading work struct */
40216 if (data->state != SHT15_READING_NOTHING)
40217 schedule_work(&data->read_work);
40218@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
40219 * If not, then start the interrupt again - care here as could
40220 * have gone low in meantime so verify it hasn't!
40221 */
40222- atomic_set(&data->interrupt_handled, 0);
40223+ atomic_set_unchecked(&data->interrupt_handled, 0);
40224 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40225 /* If still not occurred or another handler was scheduled */
40226 if (gpio_get_value(data->pdata->gpio_data)
40227- || atomic_read(&data->interrupt_handled))
40228+ || atomic_read_unchecked(&data->interrupt_handled))
40229 return;
40230 }
40231
40232diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
40233index 76f157b..9c0db1b 100644
40234--- a/drivers/hwmon/via-cputemp.c
40235+++ b/drivers/hwmon/via-cputemp.c
40236@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
40237 return NOTIFY_OK;
40238 }
40239
40240-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
40241+static struct notifier_block via_cputemp_cpu_notifier = {
40242 .notifier_call = via_cputemp_cpu_callback,
40243 };
40244
40245diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
40246index 07f01ac..d79ad3d 100644
40247--- a/drivers/i2c/busses/i2c-amd756-s4882.c
40248+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
40249@@ -43,7 +43,7 @@
40250 extern struct i2c_adapter amd756_smbus;
40251
40252 static struct i2c_adapter *s4882_adapter;
40253-static struct i2c_algorithm *s4882_algo;
40254+static i2c_algorithm_no_const *s4882_algo;
40255
40256 /* Wrapper access functions for multiplexed SMBus */
40257 static DEFINE_MUTEX(amd756_lock);
40258diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
40259index 2ca268d..c6acbdf 100644
40260--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
40261+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
40262@@ -41,7 +41,7 @@
40263 extern struct i2c_adapter *nforce2_smbus;
40264
40265 static struct i2c_adapter *s4985_adapter;
40266-static struct i2c_algorithm *s4985_algo;
40267+static i2c_algorithm_no_const *s4985_algo;
40268
40269 /* Wrapper access functions for multiplexed SMBus */
40270 static DEFINE_MUTEX(nforce2_lock);
40271diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
40272index c3ccdea..5b3dc1a 100644
40273--- a/drivers/i2c/i2c-dev.c
40274+++ b/drivers/i2c/i2c-dev.c
40275@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
40276 break;
40277 }
40278
40279- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
40280+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
40281 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
40282 if (IS_ERR(rdwr_pa[i].buf)) {
40283 res = PTR_ERR(rdwr_pa[i].buf);
40284diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
40285index 2ff6204..218c16e 100644
40286--- a/drivers/ide/ide-cd.c
40287+++ b/drivers/ide/ide-cd.c
40288@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
40289 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
40290 if ((unsigned long)buf & alignment
40291 || blk_rq_bytes(rq) & q->dma_pad_mask
40292- || object_is_on_stack(buf))
40293+ || object_starts_on_stack(buf))
40294 drive->dma = 0;
40295 }
40296 }
40297diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
40298index e145931..08bfc59 100644
40299--- a/drivers/iio/industrialio-core.c
40300+++ b/drivers/iio/industrialio-core.c
40301@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
40302 }
40303
40304 static
40305-int __iio_device_attr_init(struct device_attribute *dev_attr,
40306+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
40307 const char *postfix,
40308 struct iio_chan_spec const *chan,
40309 ssize_t (*readfunc)(struct device *dev,
40310diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
40311index 784b97c..c9ceadf 100644
40312--- a/drivers/infiniband/core/cm.c
40313+++ b/drivers/infiniband/core/cm.c
40314@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
40315
40316 struct cm_counter_group {
40317 struct kobject obj;
40318- atomic_long_t counter[CM_ATTR_COUNT];
40319+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
40320 };
40321
40322 struct cm_counter_attribute {
40323@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
40324 struct ib_mad_send_buf *msg = NULL;
40325 int ret;
40326
40327- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40328+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40329 counter[CM_REQ_COUNTER]);
40330
40331 /* Quick state check to discard duplicate REQs. */
40332@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
40333 if (!cm_id_priv)
40334 return;
40335
40336- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40337+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40338 counter[CM_REP_COUNTER]);
40339 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
40340 if (ret)
40341@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
40342 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
40343 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
40344 spin_unlock_irq(&cm_id_priv->lock);
40345- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40346+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40347 counter[CM_RTU_COUNTER]);
40348 goto out;
40349 }
40350@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
40351 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
40352 dreq_msg->local_comm_id);
40353 if (!cm_id_priv) {
40354- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40355+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40356 counter[CM_DREQ_COUNTER]);
40357 cm_issue_drep(work->port, work->mad_recv_wc);
40358 return -EINVAL;
40359@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
40360 case IB_CM_MRA_REP_RCVD:
40361 break;
40362 case IB_CM_TIMEWAIT:
40363- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40364+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40365 counter[CM_DREQ_COUNTER]);
40366 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40367 goto unlock;
40368@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
40369 cm_free_msg(msg);
40370 goto deref;
40371 case IB_CM_DREQ_RCVD:
40372- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40373+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40374 counter[CM_DREQ_COUNTER]);
40375 goto unlock;
40376 default:
40377@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
40378 ib_modify_mad(cm_id_priv->av.port->mad_agent,
40379 cm_id_priv->msg, timeout)) {
40380 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
40381- atomic_long_inc(&work->port->
40382+ atomic_long_inc_unchecked(&work->port->
40383 counter_group[CM_RECV_DUPLICATES].
40384 counter[CM_MRA_COUNTER]);
40385 goto out;
40386@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
40387 break;
40388 case IB_CM_MRA_REQ_RCVD:
40389 case IB_CM_MRA_REP_RCVD:
40390- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40391+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40392 counter[CM_MRA_COUNTER]);
40393 /* fall through */
40394 default:
40395@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
40396 case IB_CM_LAP_IDLE:
40397 break;
40398 case IB_CM_MRA_LAP_SENT:
40399- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40400+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40401 counter[CM_LAP_COUNTER]);
40402 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40403 goto unlock;
40404@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
40405 cm_free_msg(msg);
40406 goto deref;
40407 case IB_CM_LAP_RCVD:
40408- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40409+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40410 counter[CM_LAP_COUNTER]);
40411 goto unlock;
40412 default:
40413@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
40414 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
40415 if (cur_cm_id_priv) {
40416 spin_unlock_irq(&cm.lock);
40417- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40418+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40419 counter[CM_SIDR_REQ_COUNTER]);
40420 goto out; /* Duplicate message. */
40421 }
40422@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
40423 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
40424 msg->retries = 1;
40425
40426- atomic_long_add(1 + msg->retries,
40427+ atomic_long_add_unchecked(1 + msg->retries,
40428 &port->counter_group[CM_XMIT].counter[attr_index]);
40429 if (msg->retries)
40430- atomic_long_add(msg->retries,
40431+ atomic_long_add_unchecked(msg->retries,
40432 &port->counter_group[CM_XMIT_RETRIES].
40433 counter[attr_index]);
40434
40435@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
40436 }
40437
40438 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
40439- atomic_long_inc(&port->counter_group[CM_RECV].
40440+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
40441 counter[attr_id - CM_ATTR_ID_OFFSET]);
40442
40443 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
40444@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
40445 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
40446
40447 return sprintf(buf, "%ld\n",
40448- atomic_long_read(&group->counter[cm_attr->index]));
40449+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
40450 }
40451
40452 static const struct sysfs_ops cm_counter_ops = {
40453diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
40454index 9f5ad7c..588cd84 100644
40455--- a/drivers/infiniband/core/fmr_pool.c
40456+++ b/drivers/infiniband/core/fmr_pool.c
40457@@ -98,8 +98,8 @@ struct ib_fmr_pool {
40458
40459 struct task_struct *thread;
40460
40461- atomic_t req_ser;
40462- atomic_t flush_ser;
40463+ atomic_unchecked_t req_ser;
40464+ atomic_unchecked_t flush_ser;
40465
40466 wait_queue_head_t force_wait;
40467 };
40468@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40469 struct ib_fmr_pool *pool = pool_ptr;
40470
40471 do {
40472- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
40473+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
40474 ib_fmr_batch_release(pool);
40475
40476- atomic_inc(&pool->flush_ser);
40477+ atomic_inc_unchecked(&pool->flush_ser);
40478 wake_up_interruptible(&pool->force_wait);
40479
40480 if (pool->flush_function)
40481@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40482 }
40483
40484 set_current_state(TASK_INTERRUPTIBLE);
40485- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
40486+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
40487 !kthread_should_stop())
40488 schedule();
40489 __set_current_state(TASK_RUNNING);
40490@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
40491 pool->dirty_watermark = params->dirty_watermark;
40492 pool->dirty_len = 0;
40493 spin_lock_init(&pool->pool_lock);
40494- atomic_set(&pool->req_ser, 0);
40495- atomic_set(&pool->flush_ser, 0);
40496+ atomic_set_unchecked(&pool->req_ser, 0);
40497+ atomic_set_unchecked(&pool->flush_ser, 0);
40498 init_waitqueue_head(&pool->force_wait);
40499
40500 pool->thread = kthread_run(ib_fmr_cleanup_thread,
40501@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
40502 }
40503 spin_unlock_irq(&pool->pool_lock);
40504
40505- serial = atomic_inc_return(&pool->req_ser);
40506+ serial = atomic_inc_return_unchecked(&pool->req_ser);
40507 wake_up_process(pool->thread);
40508
40509 if (wait_event_interruptible(pool->force_wait,
40510- atomic_read(&pool->flush_ser) - serial >= 0))
40511+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
40512 return -EINTR;
40513
40514 return 0;
40515@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
40516 } else {
40517 list_add_tail(&fmr->list, &pool->dirty_list);
40518 if (++pool->dirty_len >= pool->dirty_watermark) {
40519- atomic_inc(&pool->req_ser);
40520+ atomic_inc_unchecked(&pool->req_ser);
40521 wake_up_process(pool->thread);
40522 }
40523 }
40524diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
40525index 4cb8eb2..146bf60 100644
40526--- a/drivers/infiniband/hw/cxgb4/mem.c
40527+++ b/drivers/infiniband/hw/cxgb4/mem.c
40528@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40529 int err;
40530 struct fw_ri_tpte tpt;
40531 u32 stag_idx;
40532- static atomic_t key;
40533+ static atomic_unchecked_t key;
40534
40535 if (c4iw_fatal_error(rdev))
40536 return -EIO;
40537@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40538 if (rdev->stats.stag.cur > rdev->stats.stag.max)
40539 rdev->stats.stag.max = rdev->stats.stag.cur;
40540 mutex_unlock(&rdev->stats.lock);
40541- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
40542+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
40543 }
40544 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
40545 __func__, stag_state, type, pdid, stag_idx);
40546diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
40547index 79b3dbc..96e5fcc 100644
40548--- a/drivers/infiniband/hw/ipath/ipath_rc.c
40549+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
40550@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40551 struct ib_atomic_eth *ateth;
40552 struct ipath_ack_entry *e;
40553 u64 vaddr;
40554- atomic64_t *maddr;
40555+ atomic64_unchecked_t *maddr;
40556 u64 sdata;
40557 u32 rkey;
40558 u8 next;
40559@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40560 IB_ACCESS_REMOTE_ATOMIC)))
40561 goto nack_acc_unlck;
40562 /* Perform atomic OP and save result. */
40563- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40564+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40565 sdata = be64_to_cpu(ateth->swap_data);
40566 e = &qp->s_ack_queue[qp->r_head_ack_queue];
40567 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
40568- (u64) atomic64_add_return(sdata, maddr) - sdata :
40569+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40570 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40571 be64_to_cpu(ateth->compare_data),
40572 sdata);
40573diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
40574index 1f95bba..9530f87 100644
40575--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
40576+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
40577@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
40578 unsigned long flags;
40579 struct ib_wc wc;
40580 u64 sdata;
40581- atomic64_t *maddr;
40582+ atomic64_unchecked_t *maddr;
40583 enum ib_wc_status send_status;
40584
40585 /*
40586@@ -382,11 +382,11 @@ again:
40587 IB_ACCESS_REMOTE_ATOMIC)))
40588 goto acc_err;
40589 /* Perform atomic OP and save result. */
40590- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40591+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40592 sdata = wqe->wr.wr.atomic.compare_add;
40593 *(u64 *) sqp->s_sge.sge.vaddr =
40594 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
40595- (u64) atomic64_add_return(sdata, maddr) - sdata :
40596+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40597 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40598 sdata, wqe->wr.wr.atomic.swap);
40599 goto send_comp;
40600diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
40601index 9d3e5c1..d9afe4a 100644
40602--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
40603+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
40604@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
40605 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
40606 }
40607
40608-int mthca_QUERY_FW(struct mthca_dev *dev)
40609+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
40610 {
40611 struct mthca_mailbox *mailbox;
40612 u32 *outbox;
40613diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
40614index ed9a989..e0c5871 100644
40615--- a/drivers/infiniband/hw/mthca/mthca_mr.c
40616+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
40617@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
40618 return key;
40619 }
40620
40621-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
40622+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
40623 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
40624 {
40625 struct mthca_mailbox *mailbox;
40626diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
40627index 4291410..d2ab1fb 100644
40628--- a/drivers/infiniband/hw/nes/nes.c
40629+++ b/drivers/infiniband/hw/nes/nes.c
40630@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
40631 LIST_HEAD(nes_adapter_list);
40632 static LIST_HEAD(nes_dev_list);
40633
40634-atomic_t qps_destroyed;
40635+atomic_unchecked_t qps_destroyed;
40636
40637 static unsigned int ee_flsh_adapter;
40638 static unsigned int sysfs_nonidx_addr;
40639@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
40640 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
40641 struct nes_adapter *nesadapter = nesdev->nesadapter;
40642
40643- atomic_inc(&qps_destroyed);
40644+ atomic_inc_unchecked(&qps_destroyed);
40645
40646 /* Free the control structures */
40647
40648diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
40649index 33cc589..3bd6538 100644
40650--- a/drivers/infiniband/hw/nes/nes.h
40651+++ b/drivers/infiniband/hw/nes/nes.h
40652@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
40653 extern unsigned int wqm_quanta;
40654 extern struct list_head nes_adapter_list;
40655
40656-extern atomic_t cm_connects;
40657-extern atomic_t cm_accepts;
40658-extern atomic_t cm_disconnects;
40659-extern atomic_t cm_closes;
40660-extern atomic_t cm_connecteds;
40661-extern atomic_t cm_connect_reqs;
40662-extern atomic_t cm_rejects;
40663-extern atomic_t mod_qp_timouts;
40664-extern atomic_t qps_created;
40665-extern atomic_t qps_destroyed;
40666-extern atomic_t sw_qps_destroyed;
40667+extern atomic_unchecked_t cm_connects;
40668+extern atomic_unchecked_t cm_accepts;
40669+extern atomic_unchecked_t cm_disconnects;
40670+extern atomic_unchecked_t cm_closes;
40671+extern atomic_unchecked_t cm_connecteds;
40672+extern atomic_unchecked_t cm_connect_reqs;
40673+extern atomic_unchecked_t cm_rejects;
40674+extern atomic_unchecked_t mod_qp_timouts;
40675+extern atomic_unchecked_t qps_created;
40676+extern atomic_unchecked_t qps_destroyed;
40677+extern atomic_unchecked_t sw_qps_destroyed;
40678 extern u32 mh_detected;
40679 extern u32 mh_pauses_sent;
40680 extern u32 cm_packets_sent;
40681@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
40682 extern u32 cm_packets_received;
40683 extern u32 cm_packets_dropped;
40684 extern u32 cm_packets_retrans;
40685-extern atomic_t cm_listens_created;
40686-extern atomic_t cm_listens_destroyed;
40687+extern atomic_unchecked_t cm_listens_created;
40688+extern atomic_unchecked_t cm_listens_destroyed;
40689 extern u32 cm_backlog_drops;
40690-extern atomic_t cm_loopbacks;
40691-extern atomic_t cm_nodes_created;
40692-extern atomic_t cm_nodes_destroyed;
40693-extern atomic_t cm_accel_dropped_pkts;
40694-extern atomic_t cm_resets_recvd;
40695-extern atomic_t pau_qps_created;
40696-extern atomic_t pau_qps_destroyed;
40697+extern atomic_unchecked_t cm_loopbacks;
40698+extern atomic_unchecked_t cm_nodes_created;
40699+extern atomic_unchecked_t cm_nodes_destroyed;
40700+extern atomic_unchecked_t cm_accel_dropped_pkts;
40701+extern atomic_unchecked_t cm_resets_recvd;
40702+extern atomic_unchecked_t pau_qps_created;
40703+extern atomic_unchecked_t pau_qps_destroyed;
40704
40705 extern u32 int_mod_timer_init;
40706 extern u32 int_mod_cq_depth_256;
40707diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
40708index 24b9f1a..00fd004 100644
40709--- a/drivers/infiniband/hw/nes/nes_cm.c
40710+++ b/drivers/infiniband/hw/nes/nes_cm.c
40711@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
40712 u32 cm_packets_retrans;
40713 u32 cm_packets_created;
40714 u32 cm_packets_received;
40715-atomic_t cm_listens_created;
40716-atomic_t cm_listens_destroyed;
40717+atomic_unchecked_t cm_listens_created;
40718+atomic_unchecked_t cm_listens_destroyed;
40719 u32 cm_backlog_drops;
40720-atomic_t cm_loopbacks;
40721-atomic_t cm_nodes_created;
40722-atomic_t cm_nodes_destroyed;
40723-atomic_t cm_accel_dropped_pkts;
40724-atomic_t cm_resets_recvd;
40725+atomic_unchecked_t cm_loopbacks;
40726+atomic_unchecked_t cm_nodes_created;
40727+atomic_unchecked_t cm_nodes_destroyed;
40728+atomic_unchecked_t cm_accel_dropped_pkts;
40729+atomic_unchecked_t cm_resets_recvd;
40730
40731 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
40732 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
40733@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
40734
40735 static struct nes_cm_core *g_cm_core;
40736
40737-atomic_t cm_connects;
40738-atomic_t cm_accepts;
40739-atomic_t cm_disconnects;
40740-atomic_t cm_closes;
40741-atomic_t cm_connecteds;
40742-atomic_t cm_connect_reqs;
40743-atomic_t cm_rejects;
40744+atomic_unchecked_t cm_connects;
40745+atomic_unchecked_t cm_accepts;
40746+atomic_unchecked_t cm_disconnects;
40747+atomic_unchecked_t cm_closes;
40748+atomic_unchecked_t cm_connecteds;
40749+atomic_unchecked_t cm_connect_reqs;
40750+atomic_unchecked_t cm_rejects;
40751
40752 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
40753 {
40754@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
40755 kfree(listener);
40756 listener = NULL;
40757 ret = 0;
40758- atomic_inc(&cm_listens_destroyed);
40759+ atomic_inc_unchecked(&cm_listens_destroyed);
40760 } else {
40761 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
40762 }
40763@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
40764 cm_node->rem_mac);
40765
40766 add_hte_node(cm_core, cm_node);
40767- atomic_inc(&cm_nodes_created);
40768+ atomic_inc_unchecked(&cm_nodes_created);
40769
40770 return cm_node;
40771 }
40772@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
40773 }
40774
40775 atomic_dec(&cm_core->node_cnt);
40776- atomic_inc(&cm_nodes_destroyed);
40777+ atomic_inc_unchecked(&cm_nodes_destroyed);
40778 nesqp = cm_node->nesqp;
40779 if (nesqp) {
40780 nesqp->cm_node = NULL;
40781@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
40782
40783 static void drop_packet(struct sk_buff *skb)
40784 {
40785- atomic_inc(&cm_accel_dropped_pkts);
40786+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
40787 dev_kfree_skb_any(skb);
40788 }
40789
40790@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
40791 {
40792
40793 int reset = 0; /* whether to send reset in case of err.. */
40794- atomic_inc(&cm_resets_recvd);
40795+ atomic_inc_unchecked(&cm_resets_recvd);
40796 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
40797 " refcnt=%d\n", cm_node, cm_node->state,
40798 atomic_read(&cm_node->ref_count));
40799@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
40800 rem_ref_cm_node(cm_node->cm_core, cm_node);
40801 return NULL;
40802 }
40803- atomic_inc(&cm_loopbacks);
40804+ atomic_inc_unchecked(&cm_loopbacks);
40805 loopbackremotenode->loopbackpartner = cm_node;
40806 loopbackremotenode->tcp_cntxt.rcv_wscale =
40807 NES_CM_DEFAULT_RCV_WND_SCALE;
40808@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
40809 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
40810 else {
40811 rem_ref_cm_node(cm_core, cm_node);
40812- atomic_inc(&cm_accel_dropped_pkts);
40813+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
40814 dev_kfree_skb_any(skb);
40815 }
40816 break;
40817@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
40818
40819 if ((cm_id) && (cm_id->event_handler)) {
40820 if (issue_disconn) {
40821- atomic_inc(&cm_disconnects);
40822+ atomic_inc_unchecked(&cm_disconnects);
40823 cm_event.event = IW_CM_EVENT_DISCONNECT;
40824 cm_event.status = disconn_status;
40825 cm_event.local_addr = cm_id->local_addr;
40826@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
40827 }
40828
40829 if (issue_close) {
40830- atomic_inc(&cm_closes);
40831+ atomic_inc_unchecked(&cm_closes);
40832 nes_disconnect(nesqp, 1);
40833
40834 cm_id->provider_data = nesqp;
40835@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
40836
40837 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
40838 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
40839- atomic_inc(&cm_accepts);
40840+ atomic_inc_unchecked(&cm_accepts);
40841
40842 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
40843 netdev_refcnt_read(nesvnic->netdev));
40844@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
40845 struct nes_cm_core *cm_core;
40846 u8 *start_buff;
40847
40848- atomic_inc(&cm_rejects);
40849+ atomic_inc_unchecked(&cm_rejects);
40850 cm_node = (struct nes_cm_node *)cm_id->provider_data;
40851 loopback = cm_node->loopbackpartner;
40852 cm_core = cm_node->cm_core;
40853@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
40854 ntohl(cm_id->local_addr.sin_addr.s_addr),
40855 ntohs(cm_id->local_addr.sin_port));
40856
40857- atomic_inc(&cm_connects);
40858+ atomic_inc_unchecked(&cm_connects);
40859 nesqp->active_conn = 1;
40860
40861 /* cache the cm_id in the qp */
40862@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
40863 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
40864 return err;
40865 }
40866- atomic_inc(&cm_listens_created);
40867+ atomic_inc_unchecked(&cm_listens_created);
40868 }
40869
40870 cm_id->add_ref(cm_id);
40871@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
40872
40873 if (nesqp->destroyed)
40874 return;
40875- atomic_inc(&cm_connecteds);
40876+ atomic_inc_unchecked(&cm_connecteds);
40877 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
40878 " local port 0x%04X. jiffies = %lu.\n",
40879 nesqp->hwqp.qp_id,
40880@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
40881
40882 cm_id->add_ref(cm_id);
40883 ret = cm_id->event_handler(cm_id, &cm_event);
40884- atomic_inc(&cm_closes);
40885+ atomic_inc_unchecked(&cm_closes);
40886 cm_event.event = IW_CM_EVENT_CLOSE;
40887 cm_event.status = 0;
40888 cm_event.provider_data = cm_id->provider_data;
40889@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
40890 return;
40891 cm_id = cm_node->cm_id;
40892
40893- atomic_inc(&cm_connect_reqs);
40894+ atomic_inc_unchecked(&cm_connect_reqs);
40895 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
40896 cm_node, cm_id, jiffies);
40897
40898@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
40899 return;
40900 cm_id = cm_node->cm_id;
40901
40902- atomic_inc(&cm_connect_reqs);
40903+ atomic_inc_unchecked(&cm_connect_reqs);
40904 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
40905 cm_node, cm_id, jiffies);
40906
40907diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
40908index 4166452..fc952c3 100644
40909--- a/drivers/infiniband/hw/nes/nes_mgt.c
40910+++ b/drivers/infiniband/hw/nes/nes_mgt.c
40911@@ -40,8 +40,8 @@
40912 #include "nes.h"
40913 #include "nes_mgt.h"
40914
40915-atomic_t pau_qps_created;
40916-atomic_t pau_qps_destroyed;
40917+atomic_unchecked_t pau_qps_created;
40918+atomic_unchecked_t pau_qps_destroyed;
40919
40920 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
40921 {
40922@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
40923 {
40924 struct sk_buff *skb;
40925 unsigned long flags;
40926- atomic_inc(&pau_qps_destroyed);
40927+ atomic_inc_unchecked(&pau_qps_destroyed);
40928
40929 /* Free packets that have not yet been forwarded */
40930 /* Lock is acquired by skb_dequeue when removing the skb */
40931@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
40932 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
40933 skb_queue_head_init(&nesqp->pau_list);
40934 spin_lock_init(&nesqp->pau_lock);
40935- atomic_inc(&pau_qps_created);
40936+ atomic_inc_unchecked(&pau_qps_created);
40937 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
40938 }
40939
40940diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
40941index 49eb511..a774366 100644
40942--- a/drivers/infiniband/hw/nes/nes_nic.c
40943+++ b/drivers/infiniband/hw/nes/nes_nic.c
40944@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
40945 target_stat_values[++index] = mh_detected;
40946 target_stat_values[++index] = mh_pauses_sent;
40947 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
40948- target_stat_values[++index] = atomic_read(&cm_connects);
40949- target_stat_values[++index] = atomic_read(&cm_accepts);
40950- target_stat_values[++index] = atomic_read(&cm_disconnects);
40951- target_stat_values[++index] = atomic_read(&cm_connecteds);
40952- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
40953- target_stat_values[++index] = atomic_read(&cm_rejects);
40954- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
40955- target_stat_values[++index] = atomic_read(&qps_created);
40956- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
40957- target_stat_values[++index] = atomic_read(&qps_destroyed);
40958- target_stat_values[++index] = atomic_read(&cm_closes);
40959+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
40960+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
40961+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
40962+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
40963+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
40964+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
40965+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
40966+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
40967+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
40968+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
40969+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
40970 target_stat_values[++index] = cm_packets_sent;
40971 target_stat_values[++index] = cm_packets_bounced;
40972 target_stat_values[++index] = cm_packets_created;
40973 target_stat_values[++index] = cm_packets_received;
40974 target_stat_values[++index] = cm_packets_dropped;
40975 target_stat_values[++index] = cm_packets_retrans;
40976- target_stat_values[++index] = atomic_read(&cm_listens_created);
40977- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
40978+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
40979+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
40980 target_stat_values[++index] = cm_backlog_drops;
40981- target_stat_values[++index] = atomic_read(&cm_loopbacks);
40982- target_stat_values[++index] = atomic_read(&cm_nodes_created);
40983- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
40984- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
40985- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
40986+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
40987+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
40988+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
40989+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
40990+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
40991 target_stat_values[++index] = nesadapter->free_4kpbl;
40992 target_stat_values[++index] = nesadapter->free_256pbl;
40993 target_stat_values[++index] = int_mod_timer_init;
40994 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
40995 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
40996 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
40997- target_stat_values[++index] = atomic_read(&pau_qps_created);
40998- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
40999+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
41000+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
41001 }
41002
41003 /**
41004diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
41005index 8f67fe2..8960859 100644
41006--- a/drivers/infiniband/hw/nes/nes_verbs.c
41007+++ b/drivers/infiniband/hw/nes/nes_verbs.c
41008@@ -46,9 +46,9 @@
41009
41010 #include <rdma/ib_umem.h>
41011
41012-atomic_t mod_qp_timouts;
41013-atomic_t qps_created;
41014-atomic_t sw_qps_destroyed;
41015+atomic_unchecked_t mod_qp_timouts;
41016+atomic_unchecked_t qps_created;
41017+atomic_unchecked_t sw_qps_destroyed;
41018
41019 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
41020
41021@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
41022 if (init_attr->create_flags)
41023 return ERR_PTR(-EINVAL);
41024
41025- atomic_inc(&qps_created);
41026+ atomic_inc_unchecked(&qps_created);
41027 switch (init_attr->qp_type) {
41028 case IB_QPT_RC:
41029 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
41030@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
41031 struct iw_cm_event cm_event;
41032 int ret = 0;
41033
41034- atomic_inc(&sw_qps_destroyed);
41035+ atomic_inc_unchecked(&sw_qps_destroyed);
41036 nesqp->destroyed = 1;
41037
41038 /* Blow away the connection if it exists. */
41039diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
41040index 4d11575..3e890e5 100644
41041--- a/drivers/infiniband/hw/qib/qib.h
41042+++ b/drivers/infiniband/hw/qib/qib.h
41043@@ -51,6 +51,7 @@
41044 #include <linux/completion.h>
41045 #include <linux/kref.h>
41046 #include <linux/sched.h>
41047+#include <linux/slab.h>
41048
41049 #include "qib_common.h"
41050 #include "qib_verbs.h"
41051diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
41052index da739d9..da1c7f4 100644
41053--- a/drivers/input/gameport/gameport.c
41054+++ b/drivers/input/gameport/gameport.c
41055@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
41056 */
41057 static void gameport_init_port(struct gameport *gameport)
41058 {
41059- static atomic_t gameport_no = ATOMIC_INIT(0);
41060+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
41061
41062 __module_get(THIS_MODULE);
41063
41064 mutex_init(&gameport->drv_mutex);
41065 device_initialize(&gameport->dev);
41066 dev_set_name(&gameport->dev, "gameport%lu",
41067- (unsigned long)atomic_inc_return(&gameport_no) - 1);
41068+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
41069 gameport->dev.bus = &gameport_bus;
41070 gameport->dev.release = gameport_release_port;
41071 if (gameport->parent)
41072diff --git a/drivers/input/input.c b/drivers/input/input.c
41073index c044699..174d71a 100644
41074--- a/drivers/input/input.c
41075+++ b/drivers/input/input.c
41076@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
41077 */
41078 int input_register_device(struct input_dev *dev)
41079 {
41080- static atomic_t input_no = ATOMIC_INIT(0);
41081+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
41082 struct input_devres *devres = NULL;
41083 struct input_handler *handler;
41084 unsigned int packet_size;
41085@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
41086 dev->setkeycode = input_default_setkeycode;
41087
41088 dev_set_name(&dev->dev, "input%ld",
41089- (unsigned long) atomic_inc_return(&input_no) - 1);
41090+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
41091
41092 error = device_add(&dev->dev);
41093 if (error)
41094diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
41095index 04c69af..5f92d00 100644
41096--- a/drivers/input/joystick/sidewinder.c
41097+++ b/drivers/input/joystick/sidewinder.c
41098@@ -30,6 +30,7 @@
41099 #include <linux/kernel.h>
41100 #include <linux/module.h>
41101 #include <linux/slab.h>
41102+#include <linux/sched.h>
41103 #include <linux/init.h>
41104 #include <linux/input.h>
41105 #include <linux/gameport.h>
41106diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
41107index fa061d4..4a6957c 100644
41108--- a/drivers/input/joystick/xpad.c
41109+++ b/drivers/input/joystick/xpad.c
41110@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
41111
41112 static int xpad_led_probe(struct usb_xpad *xpad)
41113 {
41114- static atomic_t led_seq = ATOMIC_INIT(0);
41115+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
41116 long led_no;
41117 struct xpad_led *led;
41118 struct led_classdev *led_cdev;
41119@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
41120 if (!led)
41121 return -ENOMEM;
41122
41123- led_no = (long)atomic_inc_return(&led_seq) - 1;
41124+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
41125
41126 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
41127 led->xpad = xpad;
41128diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
41129index 2f0b39d..7370f13 100644
41130--- a/drivers/input/mouse/psmouse.h
41131+++ b/drivers/input/mouse/psmouse.h
41132@@ -116,7 +116,7 @@ struct psmouse_attribute {
41133 ssize_t (*set)(struct psmouse *psmouse, void *data,
41134 const char *buf, size_t count);
41135 bool protect;
41136-};
41137+} __do_const;
41138 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
41139
41140 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
41141diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
41142index 4c842c3..590b0bf 100644
41143--- a/drivers/input/mousedev.c
41144+++ b/drivers/input/mousedev.c
41145@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
41146
41147 spin_unlock_irq(&client->packet_lock);
41148
41149- if (copy_to_user(buffer, data, count))
41150+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
41151 return -EFAULT;
41152
41153 return count;
41154diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
41155index 25fc597..558bf3b3 100644
41156--- a/drivers/input/serio/serio.c
41157+++ b/drivers/input/serio/serio.c
41158@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
41159 */
41160 static void serio_init_port(struct serio *serio)
41161 {
41162- static atomic_t serio_no = ATOMIC_INIT(0);
41163+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
41164
41165 __module_get(THIS_MODULE);
41166
41167@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
41168 mutex_init(&serio->drv_mutex);
41169 device_initialize(&serio->dev);
41170 dev_set_name(&serio->dev, "serio%ld",
41171- (long)atomic_inc_return(&serio_no) - 1);
41172+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
41173 serio->dev.bus = &serio_bus;
41174 serio->dev.release = serio_release_port;
41175 serio->dev.groups = serio_device_attr_groups;
41176diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
41177index d8f98b1..f62a640 100644
41178--- a/drivers/iommu/iommu.c
41179+++ b/drivers/iommu/iommu.c
41180@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
41181 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
41182 {
41183 bus_register_notifier(bus, &iommu_bus_nb);
41184- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
41185+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
41186 }
41187
41188 /**
41189diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
41190index dcfea4e..f4226b2 100644
41191--- a/drivers/iommu/irq_remapping.c
41192+++ b/drivers/iommu/irq_remapping.c
41193@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
41194 void panic_if_irq_remap(const char *msg)
41195 {
41196 if (irq_remapping_enabled)
41197- panic(msg);
41198+ panic("%s", msg);
41199 }
41200
41201 static void ir_ack_apic_edge(struct irq_data *data)
41202@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
41203
41204 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
41205 {
41206- chip->irq_print_chip = ir_print_prefix;
41207- chip->irq_ack = ir_ack_apic_edge;
41208- chip->irq_eoi = ir_ack_apic_level;
41209- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41210+ pax_open_kernel();
41211+ *(void **)&chip->irq_print_chip = ir_print_prefix;
41212+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
41213+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
41214+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41215+ pax_close_kernel();
41216 }
41217
41218 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
41219diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
41220index 19ceaa6..3625818 100644
41221--- a/drivers/irqchip/irq-gic.c
41222+++ b/drivers/irqchip/irq-gic.c
41223@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
41224 * Supported arch specific GIC irq extension.
41225 * Default make them NULL.
41226 */
41227-struct irq_chip gic_arch_extn = {
41228+irq_chip_no_const gic_arch_extn = {
41229 .irq_eoi = NULL,
41230 .irq_mask = NULL,
41231 .irq_unmask = NULL,
41232@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
41233 chained_irq_exit(chip, desc);
41234 }
41235
41236-static struct irq_chip gic_chip = {
41237+static irq_chip_no_const gic_chip __read_only = {
41238 .name = "GIC",
41239 .irq_mask = gic_mask_irq,
41240 .irq_unmask = gic_unmask_irq,
41241diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
41242index ac6f72b..81150f2 100644
41243--- a/drivers/isdn/capi/capi.c
41244+++ b/drivers/isdn/capi/capi.c
41245@@ -81,8 +81,8 @@ struct capiminor {
41246
41247 struct capi20_appl *ap;
41248 u32 ncci;
41249- atomic_t datahandle;
41250- atomic_t msgid;
41251+ atomic_unchecked_t datahandle;
41252+ atomic_unchecked_t msgid;
41253
41254 struct tty_port port;
41255 int ttyinstop;
41256@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
41257 capimsg_setu16(s, 2, mp->ap->applid);
41258 capimsg_setu8 (s, 4, CAPI_DATA_B3);
41259 capimsg_setu8 (s, 5, CAPI_RESP);
41260- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
41261+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
41262 capimsg_setu32(s, 8, mp->ncci);
41263 capimsg_setu16(s, 12, datahandle);
41264 }
41265@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
41266 mp->outbytes -= len;
41267 spin_unlock_bh(&mp->outlock);
41268
41269- datahandle = atomic_inc_return(&mp->datahandle);
41270+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
41271 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
41272 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41273 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41274 capimsg_setu16(skb->data, 2, mp->ap->applid);
41275 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
41276 capimsg_setu8 (skb->data, 5, CAPI_REQ);
41277- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
41278+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
41279 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
41280 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
41281 capimsg_setu16(skb->data, 16, len); /* Data length */
41282diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
41283index 600c79b..3752bab 100644
41284--- a/drivers/isdn/gigaset/interface.c
41285+++ b/drivers/isdn/gigaset/interface.c
41286@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
41287 }
41288 tty->driver_data = cs;
41289
41290- ++cs->port.count;
41291+ atomic_inc(&cs->port.count);
41292
41293- if (cs->port.count == 1) {
41294+ if (atomic_read(&cs->port.count) == 1) {
41295 tty_port_tty_set(&cs->port, tty);
41296 cs->port.low_latency = 1;
41297 }
41298@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
41299
41300 if (!cs->connected)
41301 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
41302- else if (!cs->port.count)
41303+ else if (!atomic_read(&cs->port.count))
41304 dev_warn(cs->dev, "%s: device not opened\n", __func__);
41305- else if (!--cs->port.count)
41306+ else if (!atomic_dec_return(&cs->port.count))
41307 tty_port_tty_set(&cs->port, NULL);
41308
41309 mutex_unlock(&cs->mutex);
41310diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
41311index d0a41cb..f0cdb8c 100644
41312--- a/drivers/isdn/gigaset/usb-gigaset.c
41313+++ b/drivers/isdn/gigaset/usb-gigaset.c
41314@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
41315 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
41316 memcpy(cs->hw.usb->bchars, buf, 6);
41317 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
41318- 0, 0, &buf, 6, 2000);
41319+ 0, 0, buf, 6, 2000);
41320 }
41321
41322 static void gigaset_freebcshw(struct bc_state *bcs)
41323diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
41324index 4d9b195..455075c 100644
41325--- a/drivers/isdn/hardware/avm/b1.c
41326+++ b/drivers/isdn/hardware/avm/b1.c
41327@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
41328 }
41329 if (left) {
41330 if (t4file->user) {
41331- if (copy_from_user(buf, dp, left))
41332+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41333 return -EFAULT;
41334 } else {
41335 memcpy(buf, dp, left);
41336@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
41337 }
41338 if (left) {
41339 if (config->user) {
41340- if (copy_from_user(buf, dp, left))
41341+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41342 return -EFAULT;
41343 } else {
41344 memcpy(buf, dp, left);
41345diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
41346index 9bb12ba..d4262f7 100644
41347--- a/drivers/isdn/i4l/isdn_common.c
41348+++ b/drivers/isdn/i4l/isdn_common.c
41349@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
41350 } else
41351 return -EINVAL;
41352 case IIOCDBGVAR:
41353+ if (!capable(CAP_SYS_RAWIO))
41354+ return -EPERM;
41355 if (arg) {
41356 if (copy_to_user(argp, &dev, sizeof(ulong)))
41357 return -EFAULT;
41358diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
41359index 3c5f249..5fac4d0 100644
41360--- a/drivers/isdn/i4l/isdn_tty.c
41361+++ b/drivers/isdn/i4l/isdn_tty.c
41362@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
41363
41364 #ifdef ISDN_DEBUG_MODEM_OPEN
41365 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
41366- port->count);
41367+ atomic_read(&port->count));
41368 #endif
41369- port->count++;
41370+ atomic_inc(&port->count);
41371 port->tty = tty;
41372 /*
41373 * Start up serial port
41374@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41375 #endif
41376 return;
41377 }
41378- if ((tty->count == 1) && (port->count != 1)) {
41379+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
41380 /*
41381 * Uh, oh. tty->count is 1, which means that the tty
41382 * structure will be freed. Info->count should always
41383@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41384 * serial port won't be shutdown.
41385 */
41386 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
41387- "info->count is %d\n", port->count);
41388- port->count = 1;
41389+ "info->count is %d\n", atomic_read(&port->count));
41390+ atomic_set(&port->count, 1);
41391 }
41392- if (--port->count < 0) {
41393+ if (atomic_dec_return(&port->count) < 0) {
41394 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
41395- info->line, port->count);
41396- port->count = 0;
41397+ info->line, atomic_read(&port->count));
41398+ atomic_set(&port->count, 0);
41399 }
41400- if (port->count) {
41401+ if (atomic_read(&port->count)) {
41402 #ifdef ISDN_DEBUG_MODEM_OPEN
41403 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
41404 #endif
41405@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
41406 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
41407 return;
41408 isdn_tty_shutdown(info);
41409- port->count = 0;
41410+ atomic_set(&port->count, 0);
41411 port->flags &= ~ASYNC_NORMAL_ACTIVE;
41412 port->tty = NULL;
41413 wake_up_interruptible(&port->open_wait);
41414@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
41415 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
41416 modem_info *info = &dev->mdm.info[i];
41417
41418- if (info->port.count == 0)
41419+ if (atomic_read(&info->port.count) == 0)
41420 continue;
41421 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
41422 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
41423diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
41424index e74df7c..03a03ba 100644
41425--- a/drivers/isdn/icn/icn.c
41426+++ b/drivers/isdn/icn/icn.c
41427@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
41428 if (count > len)
41429 count = len;
41430 if (user) {
41431- if (copy_from_user(msg, buf, count))
41432+ if (count > sizeof msg || copy_from_user(msg, buf, count))
41433 return -EFAULT;
41434 } else
41435 memcpy(msg, buf, count);
41436diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
41437index 6a8405d..0bd1c7e 100644
41438--- a/drivers/leds/leds-clevo-mail.c
41439+++ b/drivers/leds/leds-clevo-mail.c
41440@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
41441 * detected as working, but in reality it is not) as low as
41442 * possible.
41443 */
41444-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
41445+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
41446 {
41447 .callback = clevo_mail_led_dmi_callback,
41448 .ident = "Clevo D410J",
41449diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
41450index 64e204e..c6bf189 100644
41451--- a/drivers/leds/leds-ss4200.c
41452+++ b/drivers/leds/leds-ss4200.c
41453@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
41454 * detected as working, but in reality it is not) as low as
41455 * possible.
41456 */
41457-static struct dmi_system_id __initdata nas_led_whitelist[] = {
41458+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
41459 {
41460 .callback = ss4200_led_dmi_callback,
41461 .ident = "Intel SS4200-E",
41462diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
41463index 0bf1e4e..b4bf44e 100644
41464--- a/drivers/lguest/core.c
41465+++ b/drivers/lguest/core.c
41466@@ -97,9 +97,17 @@ static __init int map_switcher(void)
41467 * The end address needs +1 because __get_vm_area allocates an
41468 * extra guard page, so we need space for that.
41469 */
41470+
41471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
41472+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41473+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
41474+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41475+#else
41476 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41477 VM_ALLOC, switcher_addr, switcher_addr
41478 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41479+#endif
41480+
41481 if (!switcher_vma) {
41482 err = -ENOMEM;
41483 printk("lguest: could not map switcher pages high\n");
41484@@ -124,7 +132,7 @@ static __init int map_switcher(void)
41485 * Now the Switcher is mapped at the right address, we can't fail!
41486 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
41487 */
41488- memcpy(switcher_vma->addr, start_switcher_text,
41489+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
41490 end_switcher_text - start_switcher_text);
41491
41492 printk(KERN_INFO "lguest: mapped switcher at %p\n",
41493diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
41494index 5b9ac32..2ef4f26 100644
41495--- a/drivers/lguest/page_tables.c
41496+++ b/drivers/lguest/page_tables.c
41497@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
41498 /*:*/
41499
41500 #ifdef CONFIG_X86_PAE
41501-static void release_pmd(pmd_t *spmd)
41502+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
41503 {
41504 /* If the entry's not present, there's nothing to release. */
41505 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
41506diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
41507index f0a3347..f6608b2 100644
41508--- a/drivers/lguest/x86/core.c
41509+++ b/drivers/lguest/x86/core.c
41510@@ -59,7 +59,7 @@ static struct {
41511 /* Offset from where switcher.S was compiled to where we've copied it */
41512 static unsigned long switcher_offset(void)
41513 {
41514- return switcher_addr - (unsigned long)start_switcher_text;
41515+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
41516 }
41517
41518 /* This cpu's struct lguest_pages (after the Switcher text page) */
41519@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
41520 * These copies are pretty cheap, so we do them unconditionally: */
41521 /* Save the current Host top-level page directory.
41522 */
41523+
41524+#ifdef CONFIG_PAX_PER_CPU_PGD
41525+ pages->state.host_cr3 = read_cr3();
41526+#else
41527 pages->state.host_cr3 = __pa(current->mm->pgd);
41528+#endif
41529+
41530 /*
41531 * Set up the Guest's page tables to see this CPU's pages (and no
41532 * other CPU's pages).
41533@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
41534 * compiled-in switcher code and the high-mapped copy we just made.
41535 */
41536 for (i = 0; i < IDT_ENTRIES; i++)
41537- default_idt_entries[i] += switcher_offset();
41538+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
41539
41540 /*
41541 * Set up the Switcher's per-cpu areas.
41542@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
41543 * it will be undisturbed when we switch. To change %cs and jump we
41544 * need this structure to feed to Intel's "lcall" instruction.
41545 */
41546- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
41547+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
41548 lguest_entry.segment = LGUEST_CS;
41549
41550 /*
41551diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
41552index 40634b0..4f5855e 100644
41553--- a/drivers/lguest/x86/switcher_32.S
41554+++ b/drivers/lguest/x86/switcher_32.S
41555@@ -87,6 +87,7 @@
41556 #include <asm/page.h>
41557 #include <asm/segment.h>
41558 #include <asm/lguest.h>
41559+#include <asm/processor-flags.h>
41560
41561 // We mark the start of the code to copy
41562 // It's placed in .text tho it's never run here
41563@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
41564 // Changes type when we load it: damn Intel!
41565 // For after we switch over our page tables
41566 // That entry will be read-only: we'd crash.
41567+
41568+#ifdef CONFIG_PAX_KERNEXEC
41569+ mov %cr0, %edx
41570+ xor $X86_CR0_WP, %edx
41571+ mov %edx, %cr0
41572+#endif
41573+
41574 movl $(GDT_ENTRY_TSS*8), %edx
41575 ltr %dx
41576
41577@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
41578 // Let's clear it again for our return.
41579 // The GDT descriptor of the Host
41580 // Points to the table after two "size" bytes
41581- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
41582+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
41583 // Clear "used" from type field (byte 5, bit 2)
41584- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
41585+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
41586+
41587+#ifdef CONFIG_PAX_KERNEXEC
41588+ mov %cr0, %eax
41589+ xor $X86_CR0_WP, %eax
41590+ mov %eax, %cr0
41591+#endif
41592
41593 // Once our page table's switched, the Guest is live!
41594 // The Host fades as we run this final step.
41595@@ -295,13 +309,12 @@ deliver_to_host:
41596 // I consulted gcc, and it gave
41597 // These instructions, which I gladly credit:
41598 leal (%edx,%ebx,8), %eax
41599- movzwl (%eax),%edx
41600- movl 4(%eax), %eax
41601- xorw %ax, %ax
41602- orl %eax, %edx
41603+ movl 4(%eax), %edx
41604+ movw (%eax), %dx
41605 // Now the address of the handler's in %edx
41606 // We call it now: its "iret" drops us home.
41607- jmp *%edx
41608+ ljmp $__KERNEL_CS, $1f
41609+1: jmp *%edx
41610
41611 // Every interrupt can come to us here
41612 // But we must truly tell each apart.
41613diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
41614index 0003992..854bbce 100644
41615--- a/drivers/md/bcache/closure.h
41616+++ b/drivers/md/bcache/closure.h
41617@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
41618 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
41619 struct workqueue_struct *wq)
41620 {
41621- BUG_ON(object_is_on_stack(cl));
41622+ BUG_ON(object_starts_on_stack(cl));
41623 closure_set_ip(cl);
41624 cl->fn = fn;
41625 cl->wq = wq;
41626diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
41627index b4713ce..b30139b 100644
41628--- a/drivers/md/bcache/super.c
41629+++ b/drivers/md/bcache/super.c
41630@@ -1603,7 +1603,7 @@ err_unlock_gc:
41631 err:
41632 closure_sync(&op.cl);
41633 /* XXX: test this, it's broken */
41634- bch_cache_set_error(c, err);
41635+ bch_cache_set_error(c, "%s", err);
41636 }
41637
41638 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
41639diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
41640index 5a2c754..0fa55db 100644
41641--- a/drivers/md/bitmap.c
41642+++ b/drivers/md/bitmap.c
41643@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
41644 chunk_kb ? "KB" : "B");
41645 if (bitmap->storage.file) {
41646 seq_printf(seq, ", file: ");
41647- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
41648+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
41649 }
41650
41651 seq_printf(seq, "\n");
41652diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
41653index 81a79b7..87a0f73 100644
41654--- a/drivers/md/dm-ioctl.c
41655+++ b/drivers/md/dm-ioctl.c
41656@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
41657 cmd == DM_LIST_VERSIONS_CMD)
41658 return 0;
41659
41660- if ((cmd == DM_DEV_CREATE_CMD)) {
41661+ if (cmd == DM_DEV_CREATE_CMD) {
41662 if (!*param->name) {
41663 DMWARN("name not supplied when creating device");
41664 return -EINVAL;
41665diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
41666index 699b5be..eac0a15 100644
41667--- a/drivers/md/dm-raid1.c
41668+++ b/drivers/md/dm-raid1.c
41669@@ -40,7 +40,7 @@ enum dm_raid1_error {
41670
41671 struct mirror {
41672 struct mirror_set *ms;
41673- atomic_t error_count;
41674+ atomic_unchecked_t error_count;
41675 unsigned long error_type;
41676 struct dm_dev *dev;
41677 sector_t offset;
41678@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
41679 struct mirror *m;
41680
41681 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
41682- if (!atomic_read(&m->error_count))
41683+ if (!atomic_read_unchecked(&m->error_count))
41684 return m;
41685
41686 return NULL;
41687@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
41688 * simple way to tell if a device has encountered
41689 * errors.
41690 */
41691- atomic_inc(&m->error_count);
41692+ atomic_inc_unchecked(&m->error_count);
41693
41694 if (test_and_set_bit(error_type, &m->error_type))
41695 return;
41696@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
41697 struct mirror *m = get_default_mirror(ms);
41698
41699 do {
41700- if (likely(!atomic_read(&m->error_count)))
41701+ if (likely(!atomic_read_unchecked(&m->error_count)))
41702 return m;
41703
41704 if (m-- == ms->mirror)
41705@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
41706 {
41707 struct mirror *default_mirror = get_default_mirror(m->ms);
41708
41709- return !atomic_read(&default_mirror->error_count);
41710+ return !atomic_read_unchecked(&default_mirror->error_count);
41711 }
41712
41713 static int mirror_available(struct mirror_set *ms, struct bio *bio)
41714@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
41715 */
41716 if (likely(region_in_sync(ms, region, 1)))
41717 m = choose_mirror(ms, bio->bi_sector);
41718- else if (m && atomic_read(&m->error_count))
41719+ else if (m && atomic_read_unchecked(&m->error_count))
41720 m = NULL;
41721
41722 if (likely(m))
41723@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
41724 }
41725
41726 ms->mirror[mirror].ms = ms;
41727- atomic_set(&(ms->mirror[mirror].error_count), 0);
41728+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
41729 ms->mirror[mirror].error_type = 0;
41730 ms->mirror[mirror].offset = offset;
41731
41732@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
41733 */
41734 static char device_status_char(struct mirror *m)
41735 {
41736- if (!atomic_read(&(m->error_count)))
41737+ if (!atomic_read_unchecked(&(m->error_count)))
41738 return 'A';
41739
41740 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
41741diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
41742index d907ca6..cfb8384 100644
41743--- a/drivers/md/dm-stripe.c
41744+++ b/drivers/md/dm-stripe.c
41745@@ -20,7 +20,7 @@ struct stripe {
41746 struct dm_dev *dev;
41747 sector_t physical_start;
41748
41749- atomic_t error_count;
41750+ atomic_unchecked_t error_count;
41751 };
41752
41753 struct stripe_c {
41754@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
41755 kfree(sc);
41756 return r;
41757 }
41758- atomic_set(&(sc->stripe[i].error_count), 0);
41759+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
41760 }
41761
41762 ti->private = sc;
41763@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
41764 DMEMIT("%d ", sc->stripes);
41765 for (i = 0; i < sc->stripes; i++) {
41766 DMEMIT("%s ", sc->stripe[i].dev->name);
41767- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
41768+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
41769 'D' : 'A';
41770 }
41771 buffer[i] = '\0';
41772@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
41773 */
41774 for (i = 0; i < sc->stripes; i++)
41775 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
41776- atomic_inc(&(sc->stripe[i].error_count));
41777- if (atomic_read(&(sc->stripe[i].error_count)) <
41778+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
41779+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
41780 DM_IO_ERROR_THRESHOLD)
41781 schedule_work(&sc->trigger_event);
41782 }
41783diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
41784index 1ff252a..ee384c1 100644
41785--- a/drivers/md/dm-table.c
41786+++ b/drivers/md/dm-table.c
41787@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
41788 if (!dev_size)
41789 return 0;
41790
41791- if ((start >= dev_size) || (start + len > dev_size)) {
41792+ if ((start >= dev_size) || (len > dev_size - start)) {
41793 DMWARN("%s: %s too small for target: "
41794 "start=%llu, len=%llu, dev_size=%llu",
41795 dm_device_name(ti->table->md), bdevname(bdev, b),
41796diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
41797index 60bce43..9b997d0 100644
41798--- a/drivers/md/dm-thin-metadata.c
41799+++ b/drivers/md/dm-thin-metadata.c
41800@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
41801 {
41802 pmd->info.tm = pmd->tm;
41803 pmd->info.levels = 2;
41804- pmd->info.value_type.context = pmd->data_sm;
41805+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
41806 pmd->info.value_type.size = sizeof(__le64);
41807 pmd->info.value_type.inc = data_block_inc;
41808 pmd->info.value_type.dec = data_block_dec;
41809@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
41810
41811 pmd->bl_info.tm = pmd->tm;
41812 pmd->bl_info.levels = 1;
41813- pmd->bl_info.value_type.context = pmd->data_sm;
41814+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
41815 pmd->bl_info.value_type.size = sizeof(__le64);
41816 pmd->bl_info.value_type.inc = data_block_inc;
41817 pmd->bl_info.value_type.dec = data_block_dec;
41818diff --git a/drivers/md/dm.c b/drivers/md/dm.c
41819index 33f2010..23fb84c 100644
41820--- a/drivers/md/dm.c
41821+++ b/drivers/md/dm.c
41822@@ -169,9 +169,9 @@ struct mapped_device {
41823 /*
41824 * Event handling.
41825 */
41826- atomic_t event_nr;
41827+ atomic_unchecked_t event_nr;
41828 wait_queue_head_t eventq;
41829- atomic_t uevent_seq;
41830+ atomic_unchecked_t uevent_seq;
41831 struct list_head uevent_list;
41832 spinlock_t uevent_lock; /* Protect access to uevent_list */
41833
41834@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
41835 rwlock_init(&md->map_lock);
41836 atomic_set(&md->holders, 1);
41837 atomic_set(&md->open_count, 0);
41838- atomic_set(&md->event_nr, 0);
41839- atomic_set(&md->uevent_seq, 0);
41840+ atomic_set_unchecked(&md->event_nr, 0);
41841+ atomic_set_unchecked(&md->uevent_seq, 0);
41842 INIT_LIST_HEAD(&md->uevent_list);
41843 spin_lock_init(&md->uevent_lock);
41844
41845@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
41846
41847 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
41848
41849- atomic_inc(&md->event_nr);
41850+ atomic_inc_unchecked(&md->event_nr);
41851 wake_up(&md->eventq);
41852 }
41853
41854@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
41855
41856 uint32_t dm_next_uevent_seq(struct mapped_device *md)
41857 {
41858- return atomic_add_return(1, &md->uevent_seq);
41859+ return atomic_add_return_unchecked(1, &md->uevent_seq);
41860 }
41861
41862 uint32_t dm_get_event_nr(struct mapped_device *md)
41863 {
41864- return atomic_read(&md->event_nr);
41865+ return atomic_read_unchecked(&md->event_nr);
41866 }
41867
41868 int dm_wait_event(struct mapped_device *md, int event_nr)
41869 {
41870 return wait_event_interruptible(md->eventq,
41871- (event_nr != atomic_read(&md->event_nr)));
41872+ (event_nr != atomic_read_unchecked(&md->event_nr)));
41873 }
41874
41875 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
41876diff --git a/drivers/md/md.c b/drivers/md/md.c
41877index 51f0345..c77810e 100644
41878--- a/drivers/md/md.c
41879+++ b/drivers/md/md.c
41880@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
41881 * start build, activate spare
41882 */
41883 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
41884-static atomic_t md_event_count;
41885+static atomic_unchecked_t md_event_count;
41886 void md_new_event(struct mddev *mddev)
41887 {
41888- atomic_inc(&md_event_count);
41889+ atomic_inc_unchecked(&md_event_count);
41890 wake_up(&md_event_waiters);
41891 }
41892 EXPORT_SYMBOL_GPL(md_new_event);
41893@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
41894 */
41895 static void md_new_event_inintr(struct mddev *mddev)
41896 {
41897- atomic_inc(&md_event_count);
41898+ atomic_inc_unchecked(&md_event_count);
41899 wake_up(&md_event_waiters);
41900 }
41901
41902@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
41903 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
41904 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
41905 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
41906- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
41907+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
41908
41909 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
41910 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
41911@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
41912 else
41913 sb->resync_offset = cpu_to_le64(0);
41914
41915- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
41916+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
41917
41918 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
41919 sb->size = cpu_to_le64(mddev->dev_sectors);
41920@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
41921 static ssize_t
41922 errors_show(struct md_rdev *rdev, char *page)
41923 {
41924- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
41925+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
41926 }
41927
41928 static ssize_t
41929@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
41930 char *e;
41931 unsigned long n = simple_strtoul(buf, &e, 10);
41932 if (*buf && (*e == 0 || *e == '\n')) {
41933- atomic_set(&rdev->corrected_errors, n);
41934+ atomic_set_unchecked(&rdev->corrected_errors, n);
41935 return len;
41936 }
41937 return -EINVAL;
41938@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
41939 rdev->sb_loaded = 0;
41940 rdev->bb_page = NULL;
41941 atomic_set(&rdev->nr_pending, 0);
41942- atomic_set(&rdev->read_errors, 0);
41943- atomic_set(&rdev->corrected_errors, 0);
41944+ atomic_set_unchecked(&rdev->read_errors, 0);
41945+ atomic_set_unchecked(&rdev->corrected_errors, 0);
41946
41947 INIT_LIST_HEAD(&rdev->same_set);
41948 init_waitqueue_head(&rdev->blocked_wait);
41949@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
41950
41951 spin_unlock(&pers_lock);
41952 seq_printf(seq, "\n");
41953- seq->poll_event = atomic_read(&md_event_count);
41954+ seq->poll_event = atomic_read_unchecked(&md_event_count);
41955 return 0;
41956 }
41957 if (v == (void*)2) {
41958@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
41959 return error;
41960
41961 seq = file->private_data;
41962- seq->poll_event = atomic_read(&md_event_count);
41963+ seq->poll_event = atomic_read_unchecked(&md_event_count);
41964 return error;
41965 }
41966
41967@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
41968 /* always allow read */
41969 mask = POLLIN | POLLRDNORM;
41970
41971- if (seq->poll_event != atomic_read(&md_event_count))
41972+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
41973 mask |= POLLERR | POLLPRI;
41974 return mask;
41975 }
41976@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
41977 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
41978 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
41979 (int)part_stat_read(&disk->part0, sectors[1]) -
41980- atomic_read(&disk->sync_io);
41981+ atomic_read_unchecked(&disk->sync_io);
41982 /* sync IO will cause sync_io to increase before the disk_stats
41983 * as sync_io is counted when a request starts, and
41984 * disk_stats is counted when it completes.
41985diff --git a/drivers/md/md.h b/drivers/md/md.h
41986index 653f992b6..6af6c40 100644
41987--- a/drivers/md/md.h
41988+++ b/drivers/md/md.h
41989@@ -94,13 +94,13 @@ struct md_rdev {
41990 * only maintained for arrays that
41991 * support hot removal
41992 */
41993- atomic_t read_errors; /* number of consecutive read errors that
41994+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
41995 * we have tried to ignore.
41996 */
41997 struct timespec last_read_error; /* monotonic time since our
41998 * last read error
41999 */
42000- atomic_t corrected_errors; /* number of corrected read errors,
42001+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
42002 * for reporting to userspace and storing
42003 * in superblock.
42004 */
42005@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
42006
42007 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
42008 {
42009- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
42010+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
42011 }
42012
42013 struct md_personality
42014diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
42015index 3e6d115..ffecdeb 100644
42016--- a/drivers/md/persistent-data/dm-space-map.h
42017+++ b/drivers/md/persistent-data/dm-space-map.h
42018@@ -71,6 +71,7 @@ struct dm_space_map {
42019 dm_sm_threshold_fn fn,
42020 void *context);
42021 };
42022+typedef struct dm_space_map __no_const dm_space_map_no_const;
42023
42024 /*----------------------------------------------------------------*/
42025
42026diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
42027index 6f48244..7d29145 100644
42028--- a/drivers/md/raid1.c
42029+++ b/drivers/md/raid1.c
42030@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
42031 if (r1_sync_page_io(rdev, sect, s,
42032 bio->bi_io_vec[idx].bv_page,
42033 READ) != 0)
42034- atomic_add(s, &rdev->corrected_errors);
42035+ atomic_add_unchecked(s, &rdev->corrected_errors);
42036 }
42037 sectors -= s;
42038 sect += s;
42039@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
42040 test_bit(In_sync, &rdev->flags)) {
42041 if (r1_sync_page_io(rdev, sect, s,
42042 conf->tmppage, READ)) {
42043- atomic_add(s, &rdev->corrected_errors);
42044+ atomic_add_unchecked(s, &rdev->corrected_errors);
42045 printk(KERN_INFO
42046 "md/raid1:%s: read error corrected "
42047 "(%d sectors at %llu on %s)\n",
42048diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
42049index 081bb33..3c4b287 100644
42050--- a/drivers/md/raid10.c
42051+++ b/drivers/md/raid10.c
42052@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
42053 /* The write handler will notice the lack of
42054 * R10BIO_Uptodate and record any errors etc
42055 */
42056- atomic_add(r10_bio->sectors,
42057+ atomic_add_unchecked(r10_bio->sectors,
42058 &conf->mirrors[d].rdev->corrected_errors);
42059
42060 /* for reconstruct, we always reschedule after a read.
42061@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42062 {
42063 struct timespec cur_time_mon;
42064 unsigned long hours_since_last;
42065- unsigned int read_errors = atomic_read(&rdev->read_errors);
42066+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
42067
42068 ktime_get_ts(&cur_time_mon);
42069
42070@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42071 * overflowing the shift of read_errors by hours_since_last.
42072 */
42073 if (hours_since_last >= 8 * sizeof(read_errors))
42074- atomic_set(&rdev->read_errors, 0);
42075+ atomic_set_unchecked(&rdev->read_errors, 0);
42076 else
42077- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
42078+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
42079 }
42080
42081 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
42082@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42083 return;
42084
42085 check_decay_read_errors(mddev, rdev);
42086- atomic_inc(&rdev->read_errors);
42087- if (atomic_read(&rdev->read_errors) > max_read_errors) {
42088+ atomic_inc_unchecked(&rdev->read_errors);
42089+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
42090 char b[BDEVNAME_SIZE];
42091 bdevname(rdev->bdev, b);
42092
42093@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42094 "md/raid10:%s: %s: Raid device exceeded "
42095 "read_error threshold [cur %d:max %d]\n",
42096 mdname(mddev), b,
42097- atomic_read(&rdev->read_errors), max_read_errors);
42098+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
42099 printk(KERN_NOTICE
42100 "md/raid10:%s: %s: Failing raid device\n",
42101 mdname(mddev), b);
42102@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42103 sect +
42104 choose_data_offset(r10_bio, rdev)),
42105 bdevname(rdev->bdev, b));
42106- atomic_add(s, &rdev->corrected_errors);
42107+ atomic_add_unchecked(s, &rdev->corrected_errors);
42108 }
42109
42110 rdev_dec_pending(rdev, mddev);
42111diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
42112index a35b846..e295c6d 100644
42113--- a/drivers/md/raid5.c
42114+++ b/drivers/md/raid5.c
42115@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
42116 mdname(conf->mddev), STRIPE_SECTORS,
42117 (unsigned long long)s,
42118 bdevname(rdev->bdev, b));
42119- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
42120+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
42121 clear_bit(R5_ReadError, &sh->dev[i].flags);
42122 clear_bit(R5_ReWrite, &sh->dev[i].flags);
42123 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
42124 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
42125
42126- if (atomic_read(&rdev->read_errors))
42127- atomic_set(&rdev->read_errors, 0);
42128+ if (atomic_read_unchecked(&rdev->read_errors))
42129+ atomic_set_unchecked(&rdev->read_errors, 0);
42130 } else {
42131 const char *bdn = bdevname(rdev->bdev, b);
42132 int retry = 0;
42133 int set_bad = 0;
42134
42135 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
42136- atomic_inc(&rdev->read_errors);
42137+ atomic_inc_unchecked(&rdev->read_errors);
42138 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
42139 printk_ratelimited(
42140 KERN_WARNING
42141@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
42142 mdname(conf->mddev),
42143 (unsigned long long)s,
42144 bdn);
42145- } else if (atomic_read(&rdev->read_errors)
42146+ } else if (atomic_read_unchecked(&rdev->read_errors)
42147 > conf->max_nr_stripes)
42148 printk(KERN_WARNING
42149 "md/raid:%s: Too many read errors, failing device %s.\n",
42150diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
42151index 401ef64..836e563 100644
42152--- a/drivers/media/dvb-core/dvbdev.c
42153+++ b/drivers/media/dvb-core/dvbdev.c
42154@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
42155 const struct dvb_device *template, void *priv, int type)
42156 {
42157 struct dvb_device *dvbdev;
42158- struct file_operations *dvbdevfops;
42159+ file_operations_no_const *dvbdevfops;
42160 struct device *clsdev;
42161 int minor;
42162 int id;
42163diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
42164index 9b6c3bb..baeb5c7 100644
42165--- a/drivers/media/dvb-frontends/dib3000.h
42166+++ b/drivers/media/dvb-frontends/dib3000.h
42167@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
42168 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
42169 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
42170 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
42171-};
42172+} __no_const;
42173
42174 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
42175 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
42176diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
42177index c7a9be1..683f6f8 100644
42178--- a/drivers/media/pci/cx88/cx88-video.c
42179+++ b/drivers/media/pci/cx88/cx88-video.c
42180@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
42181
42182 /* ------------------------------------------------------------------ */
42183
42184-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42185-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42186-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42187+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42188+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42189+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42190
42191 module_param_array(video_nr, int, NULL, 0444);
42192 module_param_array(vbi_nr, int, NULL, 0444);
42193diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
42194index d338b19..aae4f9e 100644
42195--- a/drivers/media/platform/omap/omap_vout.c
42196+++ b/drivers/media/platform/omap/omap_vout.c
42197@@ -63,7 +63,6 @@ enum omap_vout_channels {
42198 OMAP_VIDEO2,
42199 };
42200
42201-static struct videobuf_queue_ops video_vbq_ops;
42202 /* Variables configurable through module params*/
42203 static u32 video1_numbuffers = 3;
42204 static u32 video2_numbuffers = 3;
42205@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
42206 {
42207 struct videobuf_queue *q;
42208 struct omap_vout_device *vout = NULL;
42209+ static struct videobuf_queue_ops video_vbq_ops = {
42210+ .buf_setup = omap_vout_buffer_setup,
42211+ .buf_prepare = omap_vout_buffer_prepare,
42212+ .buf_release = omap_vout_buffer_release,
42213+ .buf_queue = omap_vout_buffer_queue,
42214+ };
42215
42216 vout = video_drvdata(file);
42217 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
42218@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
42219 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
42220
42221 q = &vout->vbq;
42222- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
42223- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
42224- video_vbq_ops.buf_release = omap_vout_buffer_release;
42225- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
42226 spin_lock_init(&vout->vbq_lock);
42227
42228 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
42229diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
42230index 04e6490..2df65bf 100644
42231--- a/drivers/media/platform/s5p-tv/mixer.h
42232+++ b/drivers/media/platform/s5p-tv/mixer.h
42233@@ -156,7 +156,7 @@ struct mxr_layer {
42234 /** layer index (unique identifier) */
42235 int idx;
42236 /** callbacks for layer methods */
42237- struct mxr_layer_ops ops;
42238+ struct mxr_layer_ops *ops;
42239 /** format array */
42240 const struct mxr_format **fmt_array;
42241 /** size of format array */
42242diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42243index b93a21f..2535195 100644
42244--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42245+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42246@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
42247 {
42248 struct mxr_layer *layer;
42249 int ret;
42250- struct mxr_layer_ops ops = {
42251+ static struct mxr_layer_ops ops = {
42252 .release = mxr_graph_layer_release,
42253 .buffer_set = mxr_graph_buffer_set,
42254 .stream_set = mxr_graph_stream_set,
42255diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
42256index b713403..53cb5ad 100644
42257--- a/drivers/media/platform/s5p-tv/mixer_reg.c
42258+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
42259@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
42260 layer->update_buf = next;
42261 }
42262
42263- layer->ops.buffer_set(layer, layer->update_buf);
42264+ layer->ops->buffer_set(layer, layer->update_buf);
42265
42266 if (done && done != layer->shadow_buf)
42267 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
42268diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
42269index ef0efdf..8c78eb6 100644
42270--- a/drivers/media/platform/s5p-tv/mixer_video.c
42271+++ b/drivers/media/platform/s5p-tv/mixer_video.c
42272@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
42273 layer->geo.src.height = layer->geo.src.full_height;
42274
42275 mxr_geometry_dump(mdev, &layer->geo);
42276- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42277+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42278 mxr_geometry_dump(mdev, &layer->geo);
42279 }
42280
42281@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
42282 layer->geo.dst.full_width = mbus_fmt.width;
42283 layer->geo.dst.full_height = mbus_fmt.height;
42284 layer->geo.dst.field = mbus_fmt.field;
42285- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42286+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42287
42288 mxr_geometry_dump(mdev, &layer->geo);
42289 }
42290@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
42291 /* set source size to highest accepted value */
42292 geo->src.full_width = max(geo->dst.full_width, pix->width);
42293 geo->src.full_height = max(geo->dst.full_height, pix->height);
42294- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42295+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42296 mxr_geometry_dump(mdev, &layer->geo);
42297 /* set cropping to total visible screen */
42298 geo->src.width = pix->width;
42299@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
42300 geo->src.x_offset = 0;
42301 geo->src.y_offset = 0;
42302 /* assure consistency of geometry */
42303- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42304+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42305 mxr_geometry_dump(mdev, &layer->geo);
42306 /* set full size to lowest possible value */
42307 geo->src.full_width = 0;
42308 geo->src.full_height = 0;
42309- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42310+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42311 mxr_geometry_dump(mdev, &layer->geo);
42312
42313 /* returning results */
42314@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
42315 target->width = s->r.width;
42316 target->height = s->r.height;
42317
42318- layer->ops.fix_geometry(layer, stage, s->flags);
42319+ layer->ops->fix_geometry(layer, stage, s->flags);
42320
42321 /* retrieve update selection rectangle */
42322 res.left = target->x_offset;
42323@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
42324 mxr_output_get(mdev);
42325
42326 mxr_layer_update_output(layer);
42327- layer->ops.format_set(layer);
42328+ layer->ops->format_set(layer);
42329 /* enabling layer in hardware */
42330 spin_lock_irqsave(&layer->enq_slock, flags);
42331 layer->state = MXR_LAYER_STREAMING;
42332 spin_unlock_irqrestore(&layer->enq_slock, flags);
42333
42334- layer->ops.stream_set(layer, MXR_ENABLE);
42335+ layer->ops->stream_set(layer, MXR_ENABLE);
42336 mxr_streamer_get(mdev);
42337
42338 return 0;
42339@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
42340 spin_unlock_irqrestore(&layer->enq_slock, flags);
42341
42342 /* disabling layer in hardware */
42343- layer->ops.stream_set(layer, MXR_DISABLE);
42344+ layer->ops->stream_set(layer, MXR_DISABLE);
42345 /* remove one streamer */
42346 mxr_streamer_put(mdev);
42347 /* allow changes in output configuration */
42348@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
42349
42350 void mxr_layer_release(struct mxr_layer *layer)
42351 {
42352- if (layer->ops.release)
42353- layer->ops.release(layer);
42354+ if (layer->ops->release)
42355+ layer->ops->release(layer);
42356 }
42357
42358 void mxr_base_layer_release(struct mxr_layer *layer)
42359@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
42360
42361 layer->mdev = mdev;
42362 layer->idx = idx;
42363- layer->ops = *ops;
42364+ layer->ops = ops;
42365
42366 spin_lock_init(&layer->enq_slock);
42367 INIT_LIST_HEAD(&layer->enq_list);
42368diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42369index 3d13a63..da31bf1 100644
42370--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42371+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42372@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
42373 {
42374 struct mxr_layer *layer;
42375 int ret;
42376- struct mxr_layer_ops ops = {
42377+ static struct mxr_layer_ops ops = {
42378 .release = mxr_vp_layer_release,
42379 .buffer_set = mxr_vp_buffer_set,
42380 .stream_set = mxr_vp_stream_set,
42381diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
42382index 545c04c..a14bded 100644
42383--- a/drivers/media/radio/radio-cadet.c
42384+++ b/drivers/media/radio/radio-cadet.c
42385@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42386 unsigned char readbuf[RDS_BUFFER];
42387 int i = 0;
42388
42389+ if (count > RDS_BUFFER)
42390+ return -EFAULT;
42391 mutex_lock(&dev->lock);
42392 if (dev->rdsstat == 0)
42393 cadet_start_rds(dev);
42394@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42395 while (i < count && dev->rdsin != dev->rdsout)
42396 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
42397
42398- if (i && copy_to_user(data, readbuf, i))
42399+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
42400 i = -EFAULT;
42401 unlock:
42402 mutex_unlock(&dev->lock);
42403diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
42404index 3940bb0..fb3952a 100644
42405--- a/drivers/media/usb/dvb-usb/cxusb.c
42406+++ b/drivers/media/usb/dvb-usb/cxusb.c
42407@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
42408
42409 struct dib0700_adapter_state {
42410 int (*set_param_save) (struct dvb_frontend *);
42411-};
42412+} __no_const;
42413
42414 static int dib7070_set_param_override(struct dvb_frontend *fe)
42415 {
42416diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
42417index 6e237b6..dc25556 100644
42418--- a/drivers/media/usb/dvb-usb/dw2102.c
42419+++ b/drivers/media/usb/dvb-usb/dw2102.c
42420@@ -118,7 +118,7 @@ struct su3000_state {
42421
42422 struct s6x0_state {
42423 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
42424-};
42425+} __no_const;
42426
42427 /* debug */
42428 static int dvb_usb_dw2102_debug;
42429diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42430index f129551..ecf6514 100644
42431--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42432+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42433@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
42434 __u32 reserved;
42435 };
42436
42437-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42438+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
42439 enum v4l2_memory memory)
42440 {
42441 void __user *up_pln;
42442@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42443 return 0;
42444 }
42445
42446-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42447+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
42448 enum v4l2_memory memory)
42449 {
42450 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
42451@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
42452 put_user(kp->start_block, &up->start_block) ||
42453 put_user(kp->blocks, &up->blocks) ||
42454 put_user(tmp, &up->edid) ||
42455- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
42456+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
42457 return -EFAULT;
42458 return 0;
42459 }
42460diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
42461index 7658586..1079260 100644
42462--- a/drivers/media/v4l2-core/v4l2-ioctl.c
42463+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
42464@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
42465 struct file *file, void *fh, void *p);
42466 } u;
42467 void (*debug)(const void *arg, bool write_only);
42468-};
42469+} __do_const;
42470+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
42471
42472 /* This control needs a priority check */
42473 #define INFO_FL_PRIO (1 << 0)
42474@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
42475 struct video_device *vfd = video_devdata(file);
42476 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
42477 bool write_only = false;
42478- struct v4l2_ioctl_info default_info;
42479+ v4l2_ioctl_info_no_const default_info;
42480 const struct v4l2_ioctl_info *info;
42481 void *fh = file->private_data;
42482 struct v4l2_fh *vfh = NULL;
42483@@ -2251,7 +2252,7 @@ done:
42484 }
42485
42486 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42487- void * __user *user_ptr, void ***kernel_ptr)
42488+ void __user **user_ptr, void ***kernel_ptr)
42489 {
42490 int ret = 0;
42491
42492@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42493 ret = -EINVAL;
42494 break;
42495 }
42496- *user_ptr = (void __user *)buf->m.planes;
42497+ *user_ptr = (void __force_user *)buf->m.planes;
42498 *kernel_ptr = (void *)&buf->m.planes;
42499 *array_size = sizeof(struct v4l2_plane) * buf->length;
42500 ret = 1;
42501@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42502 ret = -EINVAL;
42503 break;
42504 }
42505- *user_ptr = (void __user *)ctrls->controls;
42506+ *user_ptr = (void __force_user *)ctrls->controls;
42507 *kernel_ptr = (void *)&ctrls->controls;
42508 *array_size = sizeof(struct v4l2_ext_control)
42509 * ctrls->count;
42510diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
42511index 767ff4d..c69d259 100644
42512--- a/drivers/message/fusion/mptbase.c
42513+++ b/drivers/message/fusion/mptbase.c
42514@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
42515 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
42516 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
42517
42518+#ifdef CONFIG_GRKERNSEC_HIDESYM
42519+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
42520+#else
42521 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
42522 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
42523+#endif
42524+
42525 /*
42526 * Rounding UP to nearest 4-kB boundary here...
42527 */
42528@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
42529 ioc->facts.GlobalCredits);
42530
42531 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
42532+#ifdef CONFIG_GRKERNSEC_HIDESYM
42533+ NULL, NULL);
42534+#else
42535 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
42536+#endif
42537 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
42538 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
42539 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
42540diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
42541index dd239bd..689c4f7 100644
42542--- a/drivers/message/fusion/mptsas.c
42543+++ b/drivers/message/fusion/mptsas.c
42544@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
42545 return 0;
42546 }
42547
42548+static inline void
42549+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
42550+{
42551+ if (phy_info->port_details) {
42552+ phy_info->port_details->rphy = rphy;
42553+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
42554+ ioc->name, rphy));
42555+ }
42556+
42557+ if (rphy) {
42558+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
42559+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
42560+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
42561+ ioc->name, rphy, rphy->dev.release));
42562+ }
42563+}
42564+
42565 /* no mutex */
42566 static void
42567 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
42568@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
42569 return NULL;
42570 }
42571
42572-static inline void
42573-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
42574-{
42575- if (phy_info->port_details) {
42576- phy_info->port_details->rphy = rphy;
42577- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
42578- ioc->name, rphy));
42579- }
42580-
42581- if (rphy) {
42582- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
42583- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
42584- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
42585- ioc->name, rphy, rphy->dev.release));
42586- }
42587-}
42588-
42589 static inline struct sas_port *
42590 mptsas_get_port(struct mptsas_phyinfo *phy_info)
42591 {
42592diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
42593index 727819c..ad74694 100644
42594--- a/drivers/message/fusion/mptscsih.c
42595+++ b/drivers/message/fusion/mptscsih.c
42596@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
42597
42598 h = shost_priv(SChost);
42599
42600- if (h) {
42601- if (h->info_kbuf == NULL)
42602- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
42603- return h->info_kbuf;
42604- h->info_kbuf[0] = '\0';
42605+ if (!h)
42606+ return NULL;
42607
42608- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
42609- h->info_kbuf[size-1] = '\0';
42610- }
42611+ if (h->info_kbuf == NULL)
42612+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
42613+ return h->info_kbuf;
42614+ h->info_kbuf[0] = '\0';
42615+
42616+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
42617+ h->info_kbuf[size-1] = '\0';
42618
42619 return h->info_kbuf;
42620 }
42621diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
42622index b7d87cd..9890039 100644
42623--- a/drivers/message/i2o/i2o_proc.c
42624+++ b/drivers/message/i2o/i2o_proc.c
42625@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
42626 "Array Controller Device"
42627 };
42628
42629-static char *chtostr(char *tmp, u8 *chars, int n)
42630-{
42631- tmp[0] = 0;
42632- return strncat(tmp, (char *)chars, n);
42633-}
42634-
42635 static int i2o_report_query_status(struct seq_file *seq, int block_status,
42636 char *group)
42637 {
42638@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
42639 } *result;
42640
42641 i2o_exec_execute_ddm_table ddm_table;
42642- char tmp[28 + 1];
42643
42644 result = kmalloc(sizeof(*result), GFP_KERNEL);
42645 if (!result)
42646@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
42647
42648 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
42649 seq_printf(seq, "%-#8x", ddm_table.module_id);
42650- seq_printf(seq, "%-29s",
42651- chtostr(tmp, ddm_table.module_name_version, 28));
42652+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
42653 seq_printf(seq, "%9d ", ddm_table.data_size);
42654 seq_printf(seq, "%8d", ddm_table.code_size);
42655
42656@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
42657
42658 i2o_driver_result_table *result;
42659 i2o_driver_store_table *dst;
42660- char tmp[28 + 1];
42661
42662 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
42663 if (result == NULL)
42664@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
42665
42666 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
42667 seq_printf(seq, "%-#8x", dst->module_id);
42668- seq_printf(seq, "%-29s",
42669- chtostr(tmp, dst->module_name_version, 28));
42670- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
42671+ seq_printf(seq, "%-.28s", dst->module_name_version);
42672+ seq_printf(seq, "%-.8s", dst->date);
42673 seq_printf(seq, "%8d ", dst->module_size);
42674 seq_printf(seq, "%8d ", dst->mpb_size);
42675 seq_printf(seq, "0x%04x", dst->module_flags);
42676@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
42677 // == (allow) 512d bytes (max)
42678 static u16 *work16 = (u16 *) work32;
42679 int token;
42680- char tmp[16 + 1];
42681
42682 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
42683
42684@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
42685 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
42686 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
42687 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
42688- seq_printf(seq, "Vendor info : %s\n",
42689- chtostr(tmp, (u8 *) (work32 + 2), 16));
42690- seq_printf(seq, "Product info : %s\n",
42691- chtostr(tmp, (u8 *) (work32 + 6), 16));
42692- seq_printf(seq, "Description : %s\n",
42693- chtostr(tmp, (u8 *) (work32 + 10), 16));
42694- seq_printf(seq, "Product rev. : %s\n",
42695- chtostr(tmp, (u8 *) (work32 + 14), 8));
42696+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
42697+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
42698+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
42699+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
42700
42701 seq_printf(seq, "Serial number : ");
42702 print_serial_number(seq, (u8 *) (work32 + 16),
42703@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
42704 u8 pad[256]; // allow up to 256 byte (max) serial number
42705 } result;
42706
42707- char tmp[24 + 1];
42708-
42709 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
42710
42711 if (token < 0) {
42712@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
42713 }
42714
42715 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
42716- seq_printf(seq, "Module name : %s\n",
42717- chtostr(tmp, result.module_name, 24));
42718- seq_printf(seq, "Module revision : %s\n",
42719- chtostr(tmp, result.module_rev, 8));
42720+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
42721+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
42722
42723 seq_printf(seq, "Serial number : ");
42724 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
42725@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
42726 u8 instance_number[4];
42727 } result;
42728
42729- char tmp[64 + 1];
42730-
42731 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
42732
42733 if (token < 0) {
42734@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
42735 return 0;
42736 }
42737
42738- seq_printf(seq, "Device name : %s\n",
42739- chtostr(tmp, result.device_name, 64));
42740- seq_printf(seq, "Service name : %s\n",
42741- chtostr(tmp, result.service_name, 64));
42742- seq_printf(seq, "Physical name : %s\n",
42743- chtostr(tmp, result.physical_location, 64));
42744- seq_printf(seq, "Instance number : %s\n",
42745- chtostr(tmp, result.instance_number, 4));
42746+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
42747+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
42748+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
42749+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
42750
42751 return 0;
42752 }
42753diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
42754index a8c08f3..155fe3d 100644
42755--- a/drivers/message/i2o/iop.c
42756+++ b/drivers/message/i2o/iop.c
42757@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
42758
42759 spin_lock_irqsave(&c->context_list_lock, flags);
42760
42761- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
42762- atomic_inc(&c->context_list_counter);
42763+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
42764+ atomic_inc_unchecked(&c->context_list_counter);
42765
42766- entry->context = atomic_read(&c->context_list_counter);
42767+ entry->context = atomic_read_unchecked(&c->context_list_counter);
42768
42769 list_add(&entry->list, &c->context_list);
42770
42771@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
42772
42773 #if BITS_PER_LONG == 64
42774 spin_lock_init(&c->context_list_lock);
42775- atomic_set(&c->context_list_counter, 0);
42776+ atomic_set_unchecked(&c->context_list_counter, 0);
42777 INIT_LIST_HEAD(&c->context_list);
42778 #endif
42779
42780diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
42781index 45ece11..8efa218 100644
42782--- a/drivers/mfd/janz-cmodio.c
42783+++ b/drivers/mfd/janz-cmodio.c
42784@@ -13,6 +13,7 @@
42785
42786 #include <linux/kernel.h>
42787 #include <linux/module.h>
42788+#include <linux/slab.h>
42789 #include <linux/init.h>
42790 #include <linux/pci.h>
42791 #include <linux/interrupt.h>
42792diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
42793index a5f9888..1c0ed56 100644
42794--- a/drivers/mfd/twl4030-irq.c
42795+++ b/drivers/mfd/twl4030-irq.c
42796@@ -35,6 +35,7 @@
42797 #include <linux/of.h>
42798 #include <linux/irqdomain.h>
42799 #include <linux/i2c/twl.h>
42800+#include <asm/pgtable.h>
42801
42802 #include "twl-core.h"
42803
42804@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
42805 * Install an irq handler for each of the SIH modules;
42806 * clone dummy irq_chip since PIH can't *do* anything
42807 */
42808- twl4030_irq_chip = dummy_irq_chip;
42809- twl4030_irq_chip.name = "twl4030";
42810+ pax_open_kernel();
42811+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
42812+ *(const char **)&twl4030_irq_chip.name = "twl4030";
42813
42814- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
42815+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
42816+ pax_close_kernel();
42817
42818 for (i = irq_base; i < irq_end; i++) {
42819 irq_set_chip_and_handler(i, &twl4030_irq_chip,
42820diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
42821index 277a8db..0e0b754 100644
42822--- a/drivers/mfd/twl6030-irq.c
42823+++ b/drivers/mfd/twl6030-irq.c
42824@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
42825 * install an irq handler for each of the modules;
42826 * clone dummy irq_chip since PIH can't *do* anything
42827 */
42828- twl6030_irq_chip = dummy_irq_chip;
42829- twl6030_irq_chip.name = "twl6030";
42830- twl6030_irq_chip.irq_set_type = NULL;
42831- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
42832+ pax_open_kernel();
42833+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
42834+ *(const char **)&twl6030_irq_chip.name = "twl6030";
42835+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
42836+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
42837+ pax_close_kernel();
42838
42839 for (i = irq_base; i < irq_end; i++) {
42840 irq_set_chip_and_handler(i, &twl6030_irq_chip,
42841diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
42842index f32550a..e3e52a2 100644
42843--- a/drivers/misc/c2port/core.c
42844+++ b/drivers/misc/c2port/core.c
42845@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
42846 mutex_init(&c2dev->mutex);
42847
42848 /* Create binary file */
42849- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
42850+ pax_open_kernel();
42851+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
42852+ pax_close_kernel();
42853 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
42854 if (unlikely(ret))
42855 goto error_device_create_bin_file;
42856diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
42857index 36f5d52..32311c3 100644
42858--- a/drivers/misc/kgdbts.c
42859+++ b/drivers/misc/kgdbts.c
42860@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
42861 char before[BREAK_INSTR_SIZE];
42862 char after[BREAK_INSTR_SIZE];
42863
42864- probe_kernel_read(before, (char *)kgdbts_break_test,
42865+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
42866 BREAK_INSTR_SIZE);
42867 init_simple_test();
42868 ts.tst = plant_and_detach_test;
42869@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
42870 /* Activate test with initial breakpoint */
42871 if (!is_early)
42872 kgdb_breakpoint();
42873- probe_kernel_read(after, (char *)kgdbts_break_test,
42874+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
42875 BREAK_INSTR_SIZE);
42876 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
42877 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
42878diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
42879index 4cd4a3d..b48cbc7 100644
42880--- a/drivers/misc/lis3lv02d/lis3lv02d.c
42881+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
42882@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
42883 * the lid is closed. This leads to interrupts as soon as a little move
42884 * is done.
42885 */
42886- atomic_inc(&lis3->count);
42887+ atomic_inc_unchecked(&lis3->count);
42888
42889 wake_up_interruptible(&lis3->misc_wait);
42890 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
42891@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
42892 if (lis3->pm_dev)
42893 pm_runtime_get_sync(lis3->pm_dev);
42894
42895- atomic_set(&lis3->count, 0);
42896+ atomic_set_unchecked(&lis3->count, 0);
42897 return 0;
42898 }
42899
42900@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
42901 add_wait_queue(&lis3->misc_wait, &wait);
42902 while (true) {
42903 set_current_state(TASK_INTERRUPTIBLE);
42904- data = atomic_xchg(&lis3->count, 0);
42905+ data = atomic_xchg_unchecked(&lis3->count, 0);
42906 if (data)
42907 break;
42908
42909@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
42910 struct lis3lv02d, miscdev);
42911
42912 poll_wait(file, &lis3->misc_wait, wait);
42913- if (atomic_read(&lis3->count))
42914+ if (atomic_read_unchecked(&lis3->count))
42915 return POLLIN | POLLRDNORM;
42916 return 0;
42917 }
42918diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
42919index c439c82..1f20f57 100644
42920--- a/drivers/misc/lis3lv02d/lis3lv02d.h
42921+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
42922@@ -297,7 +297,7 @@ struct lis3lv02d {
42923 struct input_polled_dev *idev; /* input device */
42924 struct platform_device *pdev; /* platform device */
42925 struct regulator_bulk_data regulators[2];
42926- atomic_t count; /* interrupt count after last read */
42927+ atomic_unchecked_t count; /* interrupt count after last read */
42928 union axis_conversion ac; /* hw -> logical axis */
42929 int mapped_btns[3];
42930
42931diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
42932index 2f30bad..c4c13d0 100644
42933--- a/drivers/misc/sgi-gru/gruhandles.c
42934+++ b/drivers/misc/sgi-gru/gruhandles.c
42935@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
42936 unsigned long nsec;
42937
42938 nsec = CLKS2NSEC(clks);
42939- atomic_long_inc(&mcs_op_statistics[op].count);
42940- atomic_long_add(nsec, &mcs_op_statistics[op].total);
42941+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
42942+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
42943 if (mcs_op_statistics[op].max < nsec)
42944 mcs_op_statistics[op].max = nsec;
42945 }
42946diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
42947index 797d796..ae8f01e 100644
42948--- a/drivers/misc/sgi-gru/gruprocfs.c
42949+++ b/drivers/misc/sgi-gru/gruprocfs.c
42950@@ -32,9 +32,9 @@
42951
42952 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
42953
42954-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
42955+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
42956 {
42957- unsigned long val = atomic_long_read(v);
42958+ unsigned long val = atomic_long_read_unchecked(v);
42959
42960 seq_printf(s, "%16lu %s\n", val, id);
42961 }
42962@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
42963
42964 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
42965 for (op = 0; op < mcsop_last; op++) {
42966- count = atomic_long_read(&mcs_op_statistics[op].count);
42967- total = atomic_long_read(&mcs_op_statistics[op].total);
42968+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
42969+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
42970 max = mcs_op_statistics[op].max;
42971 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
42972 count ? total / count : 0, max);
42973diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
42974index 5c3ce24..4915ccb 100644
42975--- a/drivers/misc/sgi-gru/grutables.h
42976+++ b/drivers/misc/sgi-gru/grutables.h
42977@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
42978 * GRU statistics.
42979 */
42980 struct gru_stats_s {
42981- atomic_long_t vdata_alloc;
42982- atomic_long_t vdata_free;
42983- atomic_long_t gts_alloc;
42984- atomic_long_t gts_free;
42985- atomic_long_t gms_alloc;
42986- atomic_long_t gms_free;
42987- atomic_long_t gts_double_allocate;
42988- atomic_long_t assign_context;
42989- atomic_long_t assign_context_failed;
42990- atomic_long_t free_context;
42991- atomic_long_t load_user_context;
42992- atomic_long_t load_kernel_context;
42993- atomic_long_t lock_kernel_context;
42994- atomic_long_t unlock_kernel_context;
42995- atomic_long_t steal_user_context;
42996- atomic_long_t steal_kernel_context;
42997- atomic_long_t steal_context_failed;
42998- atomic_long_t nopfn;
42999- atomic_long_t asid_new;
43000- atomic_long_t asid_next;
43001- atomic_long_t asid_wrap;
43002- atomic_long_t asid_reuse;
43003- atomic_long_t intr;
43004- atomic_long_t intr_cbr;
43005- atomic_long_t intr_tfh;
43006- atomic_long_t intr_spurious;
43007- atomic_long_t intr_mm_lock_failed;
43008- atomic_long_t call_os;
43009- atomic_long_t call_os_wait_queue;
43010- atomic_long_t user_flush_tlb;
43011- atomic_long_t user_unload_context;
43012- atomic_long_t user_exception;
43013- atomic_long_t set_context_option;
43014- atomic_long_t check_context_retarget_intr;
43015- atomic_long_t check_context_unload;
43016- atomic_long_t tlb_dropin;
43017- atomic_long_t tlb_preload_page;
43018- atomic_long_t tlb_dropin_fail_no_asid;
43019- atomic_long_t tlb_dropin_fail_upm;
43020- atomic_long_t tlb_dropin_fail_invalid;
43021- atomic_long_t tlb_dropin_fail_range_active;
43022- atomic_long_t tlb_dropin_fail_idle;
43023- atomic_long_t tlb_dropin_fail_fmm;
43024- atomic_long_t tlb_dropin_fail_no_exception;
43025- atomic_long_t tfh_stale_on_fault;
43026- atomic_long_t mmu_invalidate_range;
43027- atomic_long_t mmu_invalidate_page;
43028- atomic_long_t flush_tlb;
43029- atomic_long_t flush_tlb_gru;
43030- atomic_long_t flush_tlb_gru_tgh;
43031- atomic_long_t flush_tlb_gru_zero_asid;
43032+ atomic_long_unchecked_t vdata_alloc;
43033+ atomic_long_unchecked_t vdata_free;
43034+ atomic_long_unchecked_t gts_alloc;
43035+ atomic_long_unchecked_t gts_free;
43036+ atomic_long_unchecked_t gms_alloc;
43037+ atomic_long_unchecked_t gms_free;
43038+ atomic_long_unchecked_t gts_double_allocate;
43039+ atomic_long_unchecked_t assign_context;
43040+ atomic_long_unchecked_t assign_context_failed;
43041+ atomic_long_unchecked_t free_context;
43042+ atomic_long_unchecked_t load_user_context;
43043+ atomic_long_unchecked_t load_kernel_context;
43044+ atomic_long_unchecked_t lock_kernel_context;
43045+ atomic_long_unchecked_t unlock_kernel_context;
43046+ atomic_long_unchecked_t steal_user_context;
43047+ atomic_long_unchecked_t steal_kernel_context;
43048+ atomic_long_unchecked_t steal_context_failed;
43049+ atomic_long_unchecked_t nopfn;
43050+ atomic_long_unchecked_t asid_new;
43051+ atomic_long_unchecked_t asid_next;
43052+ atomic_long_unchecked_t asid_wrap;
43053+ atomic_long_unchecked_t asid_reuse;
43054+ atomic_long_unchecked_t intr;
43055+ atomic_long_unchecked_t intr_cbr;
43056+ atomic_long_unchecked_t intr_tfh;
43057+ atomic_long_unchecked_t intr_spurious;
43058+ atomic_long_unchecked_t intr_mm_lock_failed;
43059+ atomic_long_unchecked_t call_os;
43060+ atomic_long_unchecked_t call_os_wait_queue;
43061+ atomic_long_unchecked_t user_flush_tlb;
43062+ atomic_long_unchecked_t user_unload_context;
43063+ atomic_long_unchecked_t user_exception;
43064+ atomic_long_unchecked_t set_context_option;
43065+ atomic_long_unchecked_t check_context_retarget_intr;
43066+ atomic_long_unchecked_t check_context_unload;
43067+ atomic_long_unchecked_t tlb_dropin;
43068+ atomic_long_unchecked_t tlb_preload_page;
43069+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
43070+ atomic_long_unchecked_t tlb_dropin_fail_upm;
43071+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
43072+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
43073+ atomic_long_unchecked_t tlb_dropin_fail_idle;
43074+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
43075+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
43076+ atomic_long_unchecked_t tfh_stale_on_fault;
43077+ atomic_long_unchecked_t mmu_invalidate_range;
43078+ atomic_long_unchecked_t mmu_invalidate_page;
43079+ atomic_long_unchecked_t flush_tlb;
43080+ atomic_long_unchecked_t flush_tlb_gru;
43081+ atomic_long_unchecked_t flush_tlb_gru_tgh;
43082+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
43083
43084- atomic_long_t copy_gpa;
43085- atomic_long_t read_gpa;
43086+ atomic_long_unchecked_t copy_gpa;
43087+ atomic_long_unchecked_t read_gpa;
43088
43089- atomic_long_t mesq_receive;
43090- atomic_long_t mesq_receive_none;
43091- atomic_long_t mesq_send;
43092- atomic_long_t mesq_send_failed;
43093- atomic_long_t mesq_noop;
43094- atomic_long_t mesq_send_unexpected_error;
43095- atomic_long_t mesq_send_lb_overflow;
43096- atomic_long_t mesq_send_qlimit_reached;
43097- atomic_long_t mesq_send_amo_nacked;
43098- atomic_long_t mesq_send_put_nacked;
43099- atomic_long_t mesq_page_overflow;
43100- atomic_long_t mesq_qf_locked;
43101- atomic_long_t mesq_qf_noop_not_full;
43102- atomic_long_t mesq_qf_switch_head_failed;
43103- atomic_long_t mesq_qf_unexpected_error;
43104- atomic_long_t mesq_noop_unexpected_error;
43105- atomic_long_t mesq_noop_lb_overflow;
43106- atomic_long_t mesq_noop_qlimit_reached;
43107- atomic_long_t mesq_noop_amo_nacked;
43108- atomic_long_t mesq_noop_put_nacked;
43109- atomic_long_t mesq_noop_page_overflow;
43110+ atomic_long_unchecked_t mesq_receive;
43111+ atomic_long_unchecked_t mesq_receive_none;
43112+ atomic_long_unchecked_t mesq_send;
43113+ atomic_long_unchecked_t mesq_send_failed;
43114+ atomic_long_unchecked_t mesq_noop;
43115+ atomic_long_unchecked_t mesq_send_unexpected_error;
43116+ atomic_long_unchecked_t mesq_send_lb_overflow;
43117+ atomic_long_unchecked_t mesq_send_qlimit_reached;
43118+ atomic_long_unchecked_t mesq_send_amo_nacked;
43119+ atomic_long_unchecked_t mesq_send_put_nacked;
43120+ atomic_long_unchecked_t mesq_page_overflow;
43121+ atomic_long_unchecked_t mesq_qf_locked;
43122+ atomic_long_unchecked_t mesq_qf_noop_not_full;
43123+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
43124+ atomic_long_unchecked_t mesq_qf_unexpected_error;
43125+ atomic_long_unchecked_t mesq_noop_unexpected_error;
43126+ atomic_long_unchecked_t mesq_noop_lb_overflow;
43127+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
43128+ atomic_long_unchecked_t mesq_noop_amo_nacked;
43129+ atomic_long_unchecked_t mesq_noop_put_nacked;
43130+ atomic_long_unchecked_t mesq_noop_page_overflow;
43131
43132 };
43133
43134@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
43135 tghop_invalidate, mcsop_last};
43136
43137 struct mcs_op_statistic {
43138- atomic_long_t count;
43139- atomic_long_t total;
43140+ atomic_long_unchecked_t count;
43141+ atomic_long_unchecked_t total;
43142 unsigned long max;
43143 };
43144
43145@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
43146
43147 #define STAT(id) do { \
43148 if (gru_options & OPT_STATS) \
43149- atomic_long_inc(&gru_stats.id); \
43150+ atomic_long_inc_unchecked(&gru_stats.id); \
43151 } while (0)
43152
43153 #ifdef CONFIG_SGI_GRU_DEBUG
43154diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
43155index c862cd4..0d176fe 100644
43156--- a/drivers/misc/sgi-xp/xp.h
43157+++ b/drivers/misc/sgi-xp/xp.h
43158@@ -288,7 +288,7 @@ struct xpc_interface {
43159 xpc_notify_func, void *);
43160 void (*received) (short, int, void *);
43161 enum xp_retval (*partid_to_nasids) (short, void *);
43162-};
43163+} __no_const;
43164
43165 extern struct xpc_interface xpc_interface;
43166
43167diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
43168index b94d5f7..7f494c5 100644
43169--- a/drivers/misc/sgi-xp/xpc.h
43170+++ b/drivers/misc/sgi-xp/xpc.h
43171@@ -835,6 +835,7 @@ struct xpc_arch_operations {
43172 void (*received_payload) (struct xpc_channel *, void *);
43173 void (*notify_senders_of_disconnect) (struct xpc_channel *);
43174 };
43175+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
43176
43177 /* struct xpc_partition act_state values (for XPC HB) */
43178
43179@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
43180 /* found in xpc_main.c */
43181 extern struct device *xpc_part;
43182 extern struct device *xpc_chan;
43183-extern struct xpc_arch_operations xpc_arch_ops;
43184+extern xpc_arch_operations_no_const xpc_arch_ops;
43185 extern int xpc_disengage_timelimit;
43186 extern int xpc_disengage_timedout;
43187 extern int xpc_activate_IRQ_rcvd;
43188diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
43189index d971817..33bdca5 100644
43190--- a/drivers/misc/sgi-xp/xpc_main.c
43191+++ b/drivers/misc/sgi-xp/xpc_main.c
43192@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
43193 .notifier_call = xpc_system_die,
43194 };
43195
43196-struct xpc_arch_operations xpc_arch_ops;
43197+xpc_arch_operations_no_const xpc_arch_ops;
43198
43199 /*
43200 * Timer function to enforce the timelimit on the partition disengage.
43201@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
43202
43203 if (((die_args->trapnr == X86_TRAP_MF) ||
43204 (die_args->trapnr == X86_TRAP_XF)) &&
43205- !user_mode_vm(die_args->regs))
43206+ !user_mode(die_args->regs))
43207 xpc_die_deactivate();
43208
43209 break;
43210diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
43211index 49f04bc..65660c2 100644
43212--- a/drivers/mmc/core/mmc_ops.c
43213+++ b/drivers/mmc/core/mmc_ops.c
43214@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
43215 void *data_buf;
43216 int is_on_stack;
43217
43218- is_on_stack = object_is_on_stack(buf);
43219+ is_on_stack = object_starts_on_stack(buf);
43220 if (is_on_stack) {
43221 /*
43222 * dma onto stack is unsafe/nonportable, but callers to this
43223diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
43224index 0b74189..818358f 100644
43225--- a/drivers/mmc/host/dw_mmc.h
43226+++ b/drivers/mmc/host/dw_mmc.h
43227@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
43228 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
43229 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
43230 int (*parse_dt)(struct dw_mci *host);
43231-};
43232+} __do_const;
43233 #endif /* _DW_MMC_H_ */
43234diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
43235index c6f6246..60760a8 100644
43236--- a/drivers/mmc/host/sdhci-s3c.c
43237+++ b/drivers/mmc/host/sdhci-s3c.c
43238@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
43239 * we can use overriding functions instead of default.
43240 */
43241 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
43242- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43243- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43244- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43245+ pax_open_kernel();
43246+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43247+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43248+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43249+ pax_close_kernel();
43250 }
43251
43252 /* It supports additional host capabilities if needed */
43253diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
43254index 0c8bb6b..6f35deb 100644
43255--- a/drivers/mtd/nand/denali.c
43256+++ b/drivers/mtd/nand/denali.c
43257@@ -24,6 +24,7 @@
43258 #include <linux/slab.h>
43259 #include <linux/mtd/mtd.h>
43260 #include <linux/module.h>
43261+#include <linux/slab.h>
43262
43263 #include "denali.h"
43264
43265diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
43266index 51b9d6a..52af9a7 100644
43267--- a/drivers/mtd/nftlmount.c
43268+++ b/drivers/mtd/nftlmount.c
43269@@ -24,6 +24,7 @@
43270 #include <asm/errno.h>
43271 #include <linux/delay.h>
43272 #include <linux/slab.h>
43273+#include <linux/sched.h>
43274 #include <linux/mtd/mtd.h>
43275 #include <linux/mtd/nand.h>
43276 #include <linux/mtd/nftl.h>
43277diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
43278index f9d5615..99dd95f 100644
43279--- a/drivers/mtd/sm_ftl.c
43280+++ b/drivers/mtd/sm_ftl.c
43281@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
43282 #define SM_CIS_VENDOR_OFFSET 0x59
43283 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
43284 {
43285- struct attribute_group *attr_group;
43286+ attribute_group_no_const *attr_group;
43287 struct attribute **attributes;
43288 struct sm_sysfs_attribute *vendor_attribute;
43289
43290diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
43291index f975696..4597e21 100644
43292--- a/drivers/net/bonding/bond_main.c
43293+++ b/drivers/net/bonding/bond_main.c
43294@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
43295 return tx_queues;
43296 }
43297
43298-static struct rtnl_link_ops bond_link_ops __read_mostly = {
43299+static struct rtnl_link_ops bond_link_ops = {
43300 .kind = "bond",
43301 .priv_size = sizeof(struct bonding),
43302 .setup = bond_setup,
43303@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
43304
43305 bond_destroy_debugfs();
43306
43307- rtnl_link_unregister(&bond_link_ops);
43308 unregister_pernet_subsys(&bond_net_ops);
43309+ rtnl_link_unregister(&bond_link_ops);
43310
43311 #ifdef CONFIG_NET_POLL_CONTROLLER
43312 /*
43313diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
43314index e1d2643..7f4133b 100644
43315--- a/drivers/net/ethernet/8390/ax88796.c
43316+++ b/drivers/net/ethernet/8390/ax88796.c
43317@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
43318 if (ax->plat->reg_offsets)
43319 ei_local->reg_offset = ax->plat->reg_offsets;
43320 else {
43321+ resource_size_t _mem_size = mem_size;
43322+ do_div(_mem_size, 0x18);
43323 ei_local->reg_offset = ax->reg_offsets;
43324 for (ret = 0; ret < 0x18; ret++)
43325- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
43326+ ax->reg_offsets[ret] = _mem_size * ret;
43327 }
43328
43329 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
43330diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43331index 151675d..0139a9d 100644
43332--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43333+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43334@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
43335 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
43336 {
43337 /* RX_MODE controlling object */
43338- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
43339+ bnx2x_init_rx_mode_obj(bp);
43340
43341 /* multicast configuration controlling object */
43342 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
43343diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43344index ce1a916..10b52b0 100644
43345--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43346+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43347@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
43348 struct bnx2x *bp = netdev_priv(dev);
43349
43350 /* Use the ethtool_dump "flag" field as the dump preset index */
43351+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
43352+ return -EINVAL;
43353+
43354 bp->dump_preset_idx = val->flag;
43355 return 0;
43356 }
43357@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
43358 struct bnx2x *bp = netdev_priv(dev);
43359 struct dump_header dump_hdr = {0};
43360
43361- memset(p, 0, dump->len);
43362-
43363 /* Disable parity attentions as long as following dump may
43364 * cause false alarms by reading never written registers. We
43365 * will re-enable parity attentions right after the dump.
43366diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43367index b4c9dea..2a9927f 100644
43368--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43369+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43370@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
43371 bp->min_msix_vec_cnt = 2;
43372 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
43373
43374+ bp->dump_preset_idx = 1;
43375+
43376 return rc;
43377 }
43378
43379diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43380index 32a9609..0b1c53a 100644
43381--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43382+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43383@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
43384 return rc;
43385 }
43386
43387-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
43388- struct bnx2x_rx_mode_obj *o)
43389+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
43390 {
43391 if (CHIP_IS_E1x(bp)) {
43392- o->wait_comp = bnx2x_empty_rx_mode_wait;
43393- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
43394+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
43395+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
43396 } else {
43397- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
43398- o->config_rx_mode = bnx2x_set_rx_mode_e2;
43399+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
43400+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
43401 }
43402 }
43403
43404diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43405index 43c00bc..dd1d03d 100644
43406--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43407+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43408@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
43409
43410 /********************* RX MODE ****************/
43411
43412-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
43413- struct bnx2x_rx_mode_obj *o);
43414+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
43415
43416 /**
43417 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
43418diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
43419index ff6e30e..87e8452 100644
43420--- a/drivers/net/ethernet/broadcom/tg3.h
43421+++ b/drivers/net/ethernet/broadcom/tg3.h
43422@@ -147,6 +147,7 @@
43423 #define CHIPREV_ID_5750_A0 0x4000
43424 #define CHIPREV_ID_5750_A1 0x4001
43425 #define CHIPREV_ID_5750_A3 0x4003
43426+#define CHIPREV_ID_5750_C1 0x4201
43427 #define CHIPREV_ID_5750_C2 0x4202
43428 #define CHIPREV_ID_5752_A0_HW 0x5000
43429 #define CHIPREV_ID_5752_A0 0x6000
43430diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43431index 71497e8..b650951 100644
43432--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43433+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43434@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
43435 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
43436 t3_read_reg(adapter, A_PCIE_PEX_ERR));
43437
43438+ rtnl_lock();
43439 t3_resume_ports(adapter);
43440+ rtnl_unlock();
43441 }
43442
43443 static const struct pci_error_handlers t3_err_handler = {
43444diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43445index 8cffcdf..aadf043 100644
43446--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43447+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43448@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
43449 */
43450 struct l2t_skb_cb {
43451 arp_failure_handler_func arp_failure_handler;
43452-};
43453+} __no_const;
43454
43455 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
43456
43457diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
43458index 4c83003..2a2a5b9 100644
43459--- a/drivers/net/ethernet/dec/tulip/de4x5.c
43460+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
43461@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43462 for (i=0; i<ETH_ALEN; i++) {
43463 tmp.addr[i] = dev->dev_addr[i];
43464 }
43465- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
43466+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
43467 break;
43468
43469 case DE4X5_SET_HWADDR: /* Set the hardware address */
43470@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43471 spin_lock_irqsave(&lp->lock, flags);
43472 memcpy(&statbuf, &lp->pktStats, ioc->len);
43473 spin_unlock_irqrestore(&lp->lock, flags);
43474- if (copy_to_user(ioc->data, &statbuf, ioc->len))
43475+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
43476 return -EFAULT;
43477 break;
43478 }
43479diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
43480index 6e43426..1bd8365 100644
43481--- a/drivers/net/ethernet/emulex/benet/be_main.c
43482+++ b/drivers/net/ethernet/emulex/benet/be_main.c
43483@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
43484
43485 if (wrapped)
43486 newacc += 65536;
43487- ACCESS_ONCE(*acc) = newacc;
43488+ ACCESS_ONCE_RW(*acc) = newacc;
43489 }
43490
43491 void populate_erx_stats(struct be_adapter *adapter,
43492diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
43493index 21b85fb..b49e5fc 100644
43494--- a/drivers/net/ethernet/faraday/ftgmac100.c
43495+++ b/drivers/net/ethernet/faraday/ftgmac100.c
43496@@ -31,6 +31,8 @@
43497 #include <linux/netdevice.h>
43498 #include <linux/phy.h>
43499 #include <linux/platform_device.h>
43500+#include <linux/interrupt.h>
43501+#include <linux/irqreturn.h>
43502 #include <net/ip.h>
43503
43504 #include "ftgmac100.h"
43505diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
43506index a6eda8d..935d273 100644
43507--- a/drivers/net/ethernet/faraday/ftmac100.c
43508+++ b/drivers/net/ethernet/faraday/ftmac100.c
43509@@ -31,6 +31,8 @@
43510 #include <linux/module.h>
43511 #include <linux/netdevice.h>
43512 #include <linux/platform_device.h>
43513+#include <linux/interrupt.h>
43514+#include <linux/irqreturn.h>
43515
43516 #include "ftmac100.h"
43517
43518diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43519index 331987d..3be1135 100644
43520--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43521+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43522@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
43523 }
43524
43525 /* update the base incval used to calculate frequency adjustment */
43526- ACCESS_ONCE(adapter->base_incval) = incval;
43527+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
43528 smp_mb();
43529
43530 /* need lock to prevent incorrect read while modifying cyclecounter */
43531diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
43532index fbe5363..266b4e3 100644
43533--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
43534+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
43535@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
43536 struct __vxge_hw_fifo *fifo;
43537 struct vxge_hw_fifo_config *config;
43538 u32 txdl_size, txdl_per_memblock;
43539- struct vxge_hw_mempool_cbs fifo_mp_callback;
43540+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
43541+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
43542+ };
43543+
43544 struct __vxge_hw_virtualpath *vpath;
43545
43546 if ((vp == NULL) || (attr == NULL)) {
43547@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
43548 goto exit;
43549 }
43550
43551- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
43552-
43553 fifo->mempool =
43554 __vxge_hw_mempool_create(vpath->hldev,
43555 fifo->config->memblock_size,
43556diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43557index 5e7fb1d..f8d1810 100644
43558--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43559+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43560@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
43561 op_mode = QLC_83XX_DEFAULT_OPMODE;
43562
43563 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
43564- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
43565+ pax_open_kernel();
43566+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
43567+ pax_close_kernel();
43568 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43569 } else {
43570 return -EIO;
43571diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43572index b0c3de9..fc5857e 100644
43573--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43574+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43575@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
43576 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
43577 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
43578 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43579- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
43580+ pax_open_kernel();
43581+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
43582+ pax_close_kernel();
43583 } else if (priv_level == QLCNIC_PRIV_FUNC) {
43584 ahw->op_mode = QLCNIC_PRIV_FUNC;
43585 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
43586- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
43587+ pax_open_kernel();
43588+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
43589+ pax_close_kernel();
43590 } else if (priv_level == QLCNIC_MGMT_FUNC) {
43591 ahw->op_mode = QLCNIC_MGMT_FUNC;
43592 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43593- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
43594+ pax_open_kernel();
43595+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
43596+ pax_close_kernel();
43597 } else {
43598 return -EIO;
43599 }
43600diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43601index 6acf82b..14b097e 100644
43602--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43603+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43604@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
43605 if (err) {
43606 dev_info(&adapter->pdev->dev,
43607 "Failed to set driver version in firmware\n");
43608- return -EIO;
43609+ err = -EIO;
43610 }
43611-
43612- return 0;
43613+ qlcnic_free_mbx_args(&cmd);
43614+ return err;
43615 }
43616
43617 int
43618diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43619index d3f8797..82a03d3 100644
43620--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43621+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43622@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
43623
43624 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
43625 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
43626- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
43627+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
43628
43629 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
43630 vlan_req->vlan_id = cpu_to_le16(vlan_id);
43631diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
43632index 887aebe..9095ff9 100644
43633--- a/drivers/net/ethernet/realtek/8139cp.c
43634+++ b/drivers/net/ethernet/realtek/8139cp.c
43635@@ -524,6 +524,7 @@ rx_status_loop:
43636 PCI_DMA_FROMDEVICE);
43637 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
43638 dev->stats.rx_dropped++;
43639+ kfree_skb(new_skb);
43640 goto rx_next;
43641 }
43642
43643diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
43644index 393f961..d343034 100644
43645--- a/drivers/net/ethernet/realtek/r8169.c
43646+++ b/drivers/net/ethernet/realtek/r8169.c
43647@@ -753,22 +753,22 @@ struct rtl8169_private {
43648 struct mdio_ops {
43649 void (*write)(struct rtl8169_private *, int, int);
43650 int (*read)(struct rtl8169_private *, int);
43651- } mdio_ops;
43652+ } __no_const mdio_ops;
43653
43654 struct pll_power_ops {
43655 void (*down)(struct rtl8169_private *);
43656 void (*up)(struct rtl8169_private *);
43657- } pll_power_ops;
43658+ } __no_const pll_power_ops;
43659
43660 struct jumbo_ops {
43661 void (*enable)(struct rtl8169_private *);
43662 void (*disable)(struct rtl8169_private *);
43663- } jumbo_ops;
43664+ } __no_const jumbo_ops;
43665
43666 struct csi_ops {
43667 void (*write)(struct rtl8169_private *, int, int);
43668 u32 (*read)(struct rtl8169_private *, int);
43669- } csi_ops;
43670+ } __no_const csi_ops;
43671
43672 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
43673 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
43674diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
43675index 9a95abf..36df7f9 100644
43676--- a/drivers/net/ethernet/sfc/ptp.c
43677+++ b/drivers/net/ethernet/sfc/ptp.c
43678@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
43679 (u32)((u64)ptp->start.dma_addr >> 32));
43680
43681 /* Clear flag that signals MC ready */
43682- ACCESS_ONCE(*start) = 0;
43683+ ACCESS_ONCE_RW(*start) = 0;
43684 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
43685 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
43686
43687diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43688index 50617c5..b13724c 100644
43689--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43690+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43691@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
43692
43693 writel(value, ioaddr + MMC_CNTRL);
43694
43695- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
43696- MMC_CNTRL, value);
43697+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
43698+// MMC_CNTRL, value);
43699 }
43700
43701 /* To mask all all interrupts.*/
43702diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
43703index e6fe0d8..2b7d752 100644
43704--- a/drivers/net/hyperv/hyperv_net.h
43705+++ b/drivers/net/hyperv/hyperv_net.h
43706@@ -101,7 +101,7 @@ struct rndis_device {
43707
43708 enum rndis_device_state state;
43709 bool link_state;
43710- atomic_t new_req_id;
43711+ atomic_unchecked_t new_req_id;
43712
43713 spinlock_t request_lock;
43714 struct list_head req_list;
43715diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
43716index 0775f0a..d4fb316 100644
43717--- a/drivers/net/hyperv/rndis_filter.c
43718+++ b/drivers/net/hyperv/rndis_filter.c
43719@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
43720 * template
43721 */
43722 set = &rndis_msg->msg.set_req;
43723- set->req_id = atomic_inc_return(&dev->new_req_id);
43724+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
43725
43726 /* Add to the request list */
43727 spin_lock_irqsave(&dev->request_lock, flags);
43728@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
43729
43730 /* Setup the rndis set */
43731 halt = &request->request_msg.msg.halt_req;
43732- halt->req_id = atomic_inc_return(&dev->new_req_id);
43733+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
43734
43735 /* Ignore return since this msg is optional. */
43736 rndis_filter_send_request(dev, request);
43737diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
43738index bf0d55e..82bcfbd1 100644
43739--- a/drivers/net/ieee802154/fakehard.c
43740+++ b/drivers/net/ieee802154/fakehard.c
43741@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
43742 phy->transmit_power = 0xbf;
43743
43744 dev->netdev_ops = &fake_ops;
43745- dev->ml_priv = &fake_mlme;
43746+ dev->ml_priv = (void *)&fake_mlme;
43747
43748 priv = netdev_priv(dev);
43749 priv->phy = phy;
43750diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
43751index 6e91931..2b0ebe7 100644
43752--- a/drivers/net/macvlan.c
43753+++ b/drivers/net/macvlan.c
43754@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
43755 int macvlan_link_register(struct rtnl_link_ops *ops)
43756 {
43757 /* common fields */
43758- ops->priv_size = sizeof(struct macvlan_dev);
43759- ops->validate = macvlan_validate;
43760- ops->maxtype = IFLA_MACVLAN_MAX;
43761- ops->policy = macvlan_policy;
43762- ops->changelink = macvlan_changelink;
43763- ops->get_size = macvlan_get_size;
43764- ops->fill_info = macvlan_fill_info;
43765+ pax_open_kernel();
43766+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
43767+ *(void **)&ops->validate = macvlan_validate;
43768+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
43769+ *(const void **)&ops->policy = macvlan_policy;
43770+ *(void **)&ops->changelink = macvlan_changelink;
43771+ *(void **)&ops->get_size = macvlan_get_size;
43772+ *(void **)&ops->fill_info = macvlan_fill_info;
43773+ pax_close_kernel();
43774
43775 return rtnl_link_register(ops);
43776 };
43777@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
43778 return NOTIFY_DONE;
43779 }
43780
43781-static struct notifier_block macvlan_notifier_block __read_mostly = {
43782+static struct notifier_block macvlan_notifier_block = {
43783 .notifier_call = macvlan_device_event,
43784 };
43785
43786diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
43787index 523d6b2..5e16aa1 100644
43788--- a/drivers/net/macvtap.c
43789+++ b/drivers/net/macvtap.c
43790@@ -1110,7 +1110,7 @@ static int macvtap_device_event(struct notifier_block *unused,
43791 return NOTIFY_DONE;
43792 }
43793
43794-static struct notifier_block macvtap_notifier_block __read_mostly = {
43795+static struct notifier_block macvtap_notifier_block = {
43796 .notifier_call = macvtap_device_event,
43797 };
43798
43799diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
43800index daec9b0..6428fcb 100644
43801--- a/drivers/net/phy/mdio-bitbang.c
43802+++ b/drivers/net/phy/mdio-bitbang.c
43803@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
43804 struct mdiobb_ctrl *ctrl = bus->priv;
43805
43806 module_put(ctrl->ops->owner);
43807+ mdiobus_unregister(bus);
43808 mdiobus_free(bus);
43809 }
43810 EXPORT_SYMBOL(free_mdio_bitbang);
43811diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
43812index 72ff14b..11d442d 100644
43813--- a/drivers/net/ppp/ppp_generic.c
43814+++ b/drivers/net/ppp/ppp_generic.c
43815@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43816 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
43817 struct ppp_stats stats;
43818 struct ppp_comp_stats cstats;
43819- char *vers;
43820
43821 switch (cmd) {
43822 case SIOCGPPPSTATS:
43823@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43824 break;
43825
43826 case SIOCGPPPVER:
43827- vers = PPP_VERSION;
43828- if (copy_to_user(addr, vers, strlen(vers) + 1))
43829+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
43830 break;
43831 err = 0;
43832 break;
43833diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
43834index 1252d9c..80e660b 100644
43835--- a/drivers/net/slip/slhc.c
43836+++ b/drivers/net/slip/slhc.c
43837@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
43838 register struct tcphdr *thp;
43839 register struct iphdr *ip;
43840 register struct cstate *cs;
43841- int len, hdrlen;
43842+ long len, hdrlen;
43843 unsigned char *cp = icp;
43844
43845 /* We've got a compressed packet; read the change byte */
43846diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
43847index b305105..8ead6df 100644
43848--- a/drivers/net/team/team.c
43849+++ b/drivers/net/team/team.c
43850@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
43851 return NOTIFY_DONE;
43852 }
43853
43854-static struct notifier_block team_notifier_block __read_mostly = {
43855+static struct notifier_block team_notifier_block = {
43856 .notifier_call = team_device_event,
43857 };
43858
43859diff --git a/drivers/net/tun.c b/drivers/net/tun.c
43860index 2491eb2..1a453eb 100644
43861--- a/drivers/net/tun.c
43862+++ b/drivers/net/tun.c
43863@@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
43864 u32 rxhash;
43865
43866 if (!(tun->flags & TUN_NO_PI)) {
43867- if ((len -= sizeof(pi)) > total_len)
43868+ if (len < sizeof(pi))
43869 return -EINVAL;
43870+ len -= sizeof(pi);
43871
43872 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
43873 return -EFAULT;
43874@@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
43875 }
43876
43877 if (tun->flags & TUN_VNET_HDR) {
43878- if ((len -= tun->vnet_hdr_sz) > total_len)
43879+ if (len < tun->vnet_hdr_sz)
43880 return -EINVAL;
43881+ len -= tun->vnet_hdr_sz;
43882
43883 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
43884 return -EFAULT;
43885@@ -1869,7 +1871,7 @@ unlock:
43886 }
43887
43888 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
43889- unsigned long arg, int ifreq_len)
43890+ unsigned long arg, size_t ifreq_len)
43891 {
43892 struct tun_file *tfile = file->private_data;
43893 struct tun_struct *tun;
43894@@ -1881,6 +1883,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
43895 int vnet_hdr_sz;
43896 int ret;
43897
43898+ if (ifreq_len > sizeof ifr)
43899+ return -EFAULT;
43900+
43901 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
43902 if (copy_from_user(&ifr, argp, ifreq_len))
43903 return -EFAULT;
43904diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
43905index cba1d46..f703766 100644
43906--- a/drivers/net/usb/hso.c
43907+++ b/drivers/net/usb/hso.c
43908@@ -71,7 +71,7 @@
43909 #include <asm/byteorder.h>
43910 #include <linux/serial_core.h>
43911 #include <linux/serial.h>
43912-
43913+#include <asm/local.h>
43914
43915 #define MOD_AUTHOR "Option Wireless"
43916 #define MOD_DESCRIPTION "USB High Speed Option driver"
43917@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
43918 struct urb *urb;
43919
43920 urb = serial->rx_urb[0];
43921- if (serial->port.count > 0) {
43922+ if (atomic_read(&serial->port.count) > 0) {
43923 count = put_rxbuf_data(urb, serial);
43924 if (count == -1)
43925 return;
43926@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
43927 DUMP1(urb->transfer_buffer, urb->actual_length);
43928
43929 /* Anyone listening? */
43930- if (serial->port.count == 0)
43931+ if (atomic_read(&serial->port.count) == 0)
43932 return;
43933
43934 if (status == 0) {
43935@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
43936 tty_port_tty_set(&serial->port, tty);
43937
43938 /* check for port already opened, if not set the termios */
43939- serial->port.count++;
43940- if (serial->port.count == 1) {
43941+ if (atomic_inc_return(&serial->port.count) == 1) {
43942 serial->rx_state = RX_IDLE;
43943 /* Force default termio settings */
43944 _hso_serial_set_termios(tty, NULL);
43945@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
43946 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
43947 if (result) {
43948 hso_stop_serial_device(serial->parent);
43949- serial->port.count--;
43950+ atomic_dec(&serial->port.count);
43951 kref_put(&serial->parent->ref, hso_serial_ref_free);
43952 }
43953 } else {
43954@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
43955
43956 /* reset the rts and dtr */
43957 /* do the actual close */
43958- serial->port.count--;
43959+ atomic_dec(&serial->port.count);
43960
43961- if (serial->port.count <= 0) {
43962- serial->port.count = 0;
43963+ if (atomic_read(&serial->port.count) <= 0) {
43964+ atomic_set(&serial->port.count, 0);
43965 tty_port_tty_set(&serial->port, NULL);
43966 if (!usb_gone)
43967 hso_stop_serial_device(serial->parent);
43968@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
43969
43970 /* the actual setup */
43971 spin_lock_irqsave(&serial->serial_lock, flags);
43972- if (serial->port.count)
43973+ if (atomic_read(&serial->port.count))
43974 _hso_serial_set_termios(tty, old);
43975 else
43976 tty->termios = *old;
43977@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
43978 D1("Pending read interrupt on port %d\n", i);
43979 spin_lock(&serial->serial_lock);
43980 if (serial->rx_state == RX_IDLE &&
43981- serial->port.count > 0) {
43982+ atomic_read(&serial->port.count) > 0) {
43983 /* Setup and send a ctrl req read on
43984 * port i */
43985 if (!serial->rx_urb_filled[0]) {
43986@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
43987 /* Start all serial ports */
43988 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
43989 if (serial_table[i] && (serial_table[i]->interface == iface)) {
43990- if (dev2ser(serial_table[i])->port.count) {
43991+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
43992 result =
43993 hso_start_serial_device(serial_table[i], GFP_NOIO);
43994 hso_kick_transmit(dev2ser(serial_table[i]));
43995diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
43996index 57325f3..36b181f 100644
43997--- a/drivers/net/vxlan.c
43998+++ b/drivers/net/vxlan.c
43999@@ -1579,7 +1579,7 @@ nla_put_failure:
44000 return -EMSGSIZE;
44001 }
44002
44003-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
44004+static struct rtnl_link_ops vxlan_link_ops = {
44005 .kind = "vxlan",
44006 .maxtype = IFLA_VXLAN_MAX,
44007 .policy = vxlan_policy,
44008diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
44009index 34c8a33..3261fdc 100644
44010--- a/drivers/net/wireless/at76c50x-usb.c
44011+++ b/drivers/net/wireless/at76c50x-usb.c
44012@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
44013 }
44014
44015 /* Convert timeout from the DFU status to jiffies */
44016-static inline unsigned long at76_get_timeout(struct dfu_status *s)
44017+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
44018 {
44019 return msecs_to_jiffies((s->poll_timeout[2] << 16)
44020 | (s->poll_timeout[1] << 8)
44021diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44022index 8d78253..bebbb68 100644
44023--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44024+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44025@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44026 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
44027 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
44028
44029- ACCESS_ONCE(ads->ds_link) = i->link;
44030- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
44031+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
44032+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
44033
44034 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
44035 ctl6 = SM(i->keytype, AR_EncrType);
44036@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44037
44038 if ((i->is_first || i->is_last) &&
44039 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
44040- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
44041+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
44042 | set11nTries(i->rates, 1)
44043 | set11nTries(i->rates, 2)
44044 | set11nTries(i->rates, 3)
44045 | (i->dur_update ? AR_DurUpdateEna : 0)
44046 | SM(0, AR_BurstDur);
44047
44048- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
44049+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
44050 | set11nRate(i->rates, 1)
44051 | set11nRate(i->rates, 2)
44052 | set11nRate(i->rates, 3);
44053 } else {
44054- ACCESS_ONCE(ads->ds_ctl2) = 0;
44055- ACCESS_ONCE(ads->ds_ctl3) = 0;
44056+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
44057+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
44058 }
44059
44060 if (!i->is_first) {
44061- ACCESS_ONCE(ads->ds_ctl0) = 0;
44062- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44063- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44064+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
44065+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44066+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44067 return;
44068 }
44069
44070@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44071 break;
44072 }
44073
44074- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44075+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44076 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44077 | SM(i->txpower, AR_XmitPower)
44078 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44079@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44080 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
44081 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
44082
44083- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44084- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44085+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44086+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44087
44088 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
44089 return;
44090
44091- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44092+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44093 | set11nPktDurRTSCTS(i->rates, 1);
44094
44095- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44096+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44097 | set11nPktDurRTSCTS(i->rates, 3);
44098
44099- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44100+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44101 | set11nRateFlags(i->rates, 1)
44102 | set11nRateFlags(i->rates, 2)
44103 | set11nRateFlags(i->rates, 3)
44104diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44105index 301bf72..3f5654f 100644
44106--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44107+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44108@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44109 (i->qcu << AR_TxQcuNum_S) | desc_len;
44110
44111 checksum += val;
44112- ACCESS_ONCE(ads->info) = val;
44113+ ACCESS_ONCE_RW(ads->info) = val;
44114
44115 checksum += i->link;
44116- ACCESS_ONCE(ads->link) = i->link;
44117+ ACCESS_ONCE_RW(ads->link) = i->link;
44118
44119 checksum += i->buf_addr[0];
44120- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
44121+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
44122 checksum += i->buf_addr[1];
44123- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
44124+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
44125 checksum += i->buf_addr[2];
44126- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
44127+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
44128 checksum += i->buf_addr[3];
44129- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
44130+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
44131
44132 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
44133- ACCESS_ONCE(ads->ctl3) = val;
44134+ ACCESS_ONCE_RW(ads->ctl3) = val;
44135 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
44136- ACCESS_ONCE(ads->ctl5) = val;
44137+ ACCESS_ONCE_RW(ads->ctl5) = val;
44138 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
44139- ACCESS_ONCE(ads->ctl7) = val;
44140+ ACCESS_ONCE_RW(ads->ctl7) = val;
44141 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
44142- ACCESS_ONCE(ads->ctl9) = val;
44143+ ACCESS_ONCE_RW(ads->ctl9) = val;
44144
44145 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
44146- ACCESS_ONCE(ads->ctl10) = checksum;
44147+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
44148
44149 if (i->is_first || i->is_last) {
44150- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
44151+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
44152 | set11nTries(i->rates, 1)
44153 | set11nTries(i->rates, 2)
44154 | set11nTries(i->rates, 3)
44155 | (i->dur_update ? AR_DurUpdateEna : 0)
44156 | SM(0, AR_BurstDur);
44157
44158- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
44159+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
44160 | set11nRate(i->rates, 1)
44161 | set11nRate(i->rates, 2)
44162 | set11nRate(i->rates, 3);
44163 } else {
44164- ACCESS_ONCE(ads->ctl13) = 0;
44165- ACCESS_ONCE(ads->ctl14) = 0;
44166+ ACCESS_ONCE_RW(ads->ctl13) = 0;
44167+ ACCESS_ONCE_RW(ads->ctl14) = 0;
44168 }
44169
44170 ads->ctl20 = 0;
44171@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44172
44173 ctl17 = SM(i->keytype, AR_EncrType);
44174 if (!i->is_first) {
44175- ACCESS_ONCE(ads->ctl11) = 0;
44176- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44177- ACCESS_ONCE(ads->ctl15) = 0;
44178- ACCESS_ONCE(ads->ctl16) = 0;
44179- ACCESS_ONCE(ads->ctl17) = ctl17;
44180- ACCESS_ONCE(ads->ctl18) = 0;
44181- ACCESS_ONCE(ads->ctl19) = 0;
44182+ ACCESS_ONCE_RW(ads->ctl11) = 0;
44183+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44184+ ACCESS_ONCE_RW(ads->ctl15) = 0;
44185+ ACCESS_ONCE_RW(ads->ctl16) = 0;
44186+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44187+ ACCESS_ONCE_RW(ads->ctl18) = 0;
44188+ ACCESS_ONCE_RW(ads->ctl19) = 0;
44189 return;
44190 }
44191
44192- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44193+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44194 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44195 | SM(i->txpower, AR_XmitPower)
44196 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44197@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44198 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
44199 ctl12 |= SM(val, AR_PAPRDChainMask);
44200
44201- ACCESS_ONCE(ads->ctl12) = ctl12;
44202- ACCESS_ONCE(ads->ctl17) = ctl17;
44203+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
44204+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44205
44206- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44207+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44208 | set11nPktDurRTSCTS(i->rates, 1);
44209
44210- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44211+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44212 | set11nPktDurRTSCTS(i->rates, 3);
44213
44214- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
44215+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
44216 | set11nRateFlags(i->rates, 1)
44217 | set11nRateFlags(i->rates, 2)
44218 | set11nRateFlags(i->rates, 3)
44219 | SM(i->rtscts_rate, AR_RTSCTSRate);
44220
44221- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
44222+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
44223 }
44224
44225 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
44226diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
44227index ae30343..a117806 100644
44228--- a/drivers/net/wireless/ath/ath9k/hw.h
44229+++ b/drivers/net/wireless/ath/ath9k/hw.h
44230@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
44231
44232 /* ANI */
44233 void (*ani_cache_ini_regs)(struct ath_hw *ah);
44234-};
44235+} __no_const;
44236
44237 /**
44238 * struct ath_spec_scan - parameters for Atheros spectral scan
44239@@ -721,7 +721,7 @@ struct ath_hw_ops {
44240 struct ath_spec_scan *param);
44241 void (*spectral_scan_trigger)(struct ath_hw *ah);
44242 void (*spectral_scan_wait)(struct ath_hw *ah);
44243-};
44244+} __no_const;
44245
44246 struct ath_nf_limits {
44247 s16 max;
44248diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
44249index b37a582..680835d 100644
44250--- a/drivers/net/wireless/iwlegacy/3945-mac.c
44251+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
44252@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44253 */
44254 if (il3945_mod_params.disable_hw_scan) {
44255 D_INFO("Disabling hw_scan\n");
44256- il3945_mac_ops.hw_scan = NULL;
44257+ pax_open_kernel();
44258+ *(void **)&il3945_mac_ops.hw_scan = NULL;
44259+ pax_close_kernel();
44260 }
44261
44262 D_INFO("*** LOAD DRIVER ***\n");
44263diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44264index d532948..e0d8bb1 100644
44265--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44266+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44267@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
44268 {
44269 struct iwl_priv *priv = file->private_data;
44270 char buf[64];
44271- int buf_size;
44272+ size_t buf_size;
44273 u32 offset, len;
44274
44275 memset(buf, 0, sizeof(buf));
44276@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
44277 struct iwl_priv *priv = file->private_data;
44278
44279 char buf[8];
44280- int buf_size;
44281+ size_t buf_size;
44282 u32 reset_flag;
44283
44284 memset(buf, 0, sizeof(buf));
44285@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
44286 {
44287 struct iwl_priv *priv = file->private_data;
44288 char buf[8];
44289- int buf_size;
44290+ size_t buf_size;
44291 int ht40;
44292
44293 memset(buf, 0, sizeof(buf));
44294@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
44295 {
44296 struct iwl_priv *priv = file->private_data;
44297 char buf[8];
44298- int buf_size;
44299+ size_t buf_size;
44300 int value;
44301
44302 memset(buf, 0, sizeof(buf));
44303@@ -698,10 +698,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
44304 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
44305 DEBUGFS_READ_FILE_OPS(current_sleep_command);
44306
44307-static const char *fmt_value = " %-30s %10u\n";
44308-static const char *fmt_hex = " %-30s 0x%02X\n";
44309-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
44310-static const char *fmt_header =
44311+static const char fmt_value[] = " %-30s %10u\n";
44312+static const char fmt_hex[] = " %-30s 0x%02X\n";
44313+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
44314+static const char fmt_header[] =
44315 "%-32s current cumulative delta max\n";
44316
44317 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
44318@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
44319 {
44320 struct iwl_priv *priv = file->private_data;
44321 char buf[8];
44322- int buf_size;
44323+ size_t buf_size;
44324 int clear;
44325
44326 memset(buf, 0, sizeof(buf));
44327@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
44328 {
44329 struct iwl_priv *priv = file->private_data;
44330 char buf[8];
44331- int buf_size;
44332+ size_t buf_size;
44333 int trace;
44334
44335 memset(buf, 0, sizeof(buf));
44336@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
44337 {
44338 struct iwl_priv *priv = file->private_data;
44339 char buf[8];
44340- int buf_size;
44341+ size_t buf_size;
44342 int missed;
44343
44344 memset(buf, 0, sizeof(buf));
44345@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
44346
44347 struct iwl_priv *priv = file->private_data;
44348 char buf[8];
44349- int buf_size;
44350+ size_t buf_size;
44351 int plcp;
44352
44353 memset(buf, 0, sizeof(buf));
44354@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
44355
44356 struct iwl_priv *priv = file->private_data;
44357 char buf[8];
44358- int buf_size;
44359+ size_t buf_size;
44360 int flush;
44361
44362 memset(buf, 0, sizeof(buf));
44363@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
44364
44365 struct iwl_priv *priv = file->private_data;
44366 char buf[8];
44367- int buf_size;
44368+ size_t buf_size;
44369 int rts;
44370
44371 if (!priv->cfg->ht_params)
44372@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
44373 {
44374 struct iwl_priv *priv = file->private_data;
44375 char buf[8];
44376- int buf_size;
44377+ size_t buf_size;
44378
44379 memset(buf, 0, sizeof(buf));
44380 buf_size = min(count, sizeof(buf) - 1);
44381@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
44382 struct iwl_priv *priv = file->private_data;
44383 u32 event_log_flag;
44384 char buf[8];
44385- int buf_size;
44386+ size_t buf_size;
44387
44388 /* check that the interface is up */
44389 if (!iwl_is_ready(priv))
44390@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
44391 struct iwl_priv *priv = file->private_data;
44392 char buf[8];
44393 u32 calib_disabled;
44394- int buf_size;
44395+ size_t buf_size;
44396
44397 memset(buf, 0, sizeof(buf));
44398 buf_size = min(count, sizeof(buf) - 1);
44399diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
44400index a8afc7b..de058b2 100644
44401--- a/drivers/net/wireless/iwlwifi/dvm/main.c
44402+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
44403@@ -1189,7 +1189,7 @@ static void iwl_option_config(struct iwl_priv *priv)
44404 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
44405 {
44406 struct iwl_nvm_data *data = priv->nvm_data;
44407- char *debug_msg;
44408+ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
44409
44410 if (data->sku_cap_11n_enable &&
44411 !priv->cfg->ht_params) {
44412@@ -1203,7 +1203,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
44413 return -EINVAL;
44414 }
44415
44416- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
44417 IWL_DEBUG_INFO(priv, debug_msg,
44418 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
44419 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
44420diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
44421index aeb70e1..d7b5bb5 100644
44422--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
44423+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
44424@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
44425 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
44426
44427 char buf[8];
44428- int buf_size;
44429+ size_t buf_size;
44430 u32 reset_flag;
44431
44432 memset(buf, 0, sizeof(buf));
44433@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
44434 {
44435 struct iwl_trans *trans = file->private_data;
44436 char buf[8];
44437- int buf_size;
44438+ size_t buf_size;
44439 int csr;
44440
44441 memset(buf, 0, sizeof(buf));
44442diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
44443index cb34c78..9fec0dc 100644
44444--- a/drivers/net/wireless/mac80211_hwsim.c
44445+++ b/drivers/net/wireless/mac80211_hwsim.c
44446@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
44447
44448 if (channels > 1) {
44449 hwsim_if_comb.num_different_channels = channels;
44450- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
44451- mac80211_hwsim_ops.cancel_hw_scan =
44452- mac80211_hwsim_cancel_hw_scan;
44453- mac80211_hwsim_ops.sw_scan_start = NULL;
44454- mac80211_hwsim_ops.sw_scan_complete = NULL;
44455- mac80211_hwsim_ops.remain_on_channel =
44456- mac80211_hwsim_roc;
44457- mac80211_hwsim_ops.cancel_remain_on_channel =
44458- mac80211_hwsim_croc;
44459- mac80211_hwsim_ops.add_chanctx =
44460- mac80211_hwsim_add_chanctx;
44461- mac80211_hwsim_ops.remove_chanctx =
44462- mac80211_hwsim_remove_chanctx;
44463- mac80211_hwsim_ops.change_chanctx =
44464- mac80211_hwsim_change_chanctx;
44465- mac80211_hwsim_ops.assign_vif_chanctx =
44466- mac80211_hwsim_assign_vif_chanctx;
44467- mac80211_hwsim_ops.unassign_vif_chanctx =
44468- mac80211_hwsim_unassign_vif_chanctx;
44469+ pax_open_kernel();
44470+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
44471+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
44472+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
44473+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
44474+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
44475+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
44476+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
44477+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
44478+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
44479+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
44480+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
44481+ pax_close_kernel();
44482 }
44483
44484 spin_lock_init(&hwsim_radio_lock);
44485diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
44486index 8169a85..7fa3b47 100644
44487--- a/drivers/net/wireless/rndis_wlan.c
44488+++ b/drivers/net/wireless/rndis_wlan.c
44489@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
44490
44491 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
44492
44493- if (rts_threshold < 0 || rts_threshold > 2347)
44494+ if (rts_threshold > 2347)
44495 rts_threshold = 2347;
44496
44497 tmp = cpu_to_le32(rts_threshold);
44498diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
44499index 7510723..5ba37f5 100644
44500--- a/drivers/net/wireless/rt2x00/rt2x00.h
44501+++ b/drivers/net/wireless/rt2x00/rt2x00.h
44502@@ -386,7 +386,7 @@ struct rt2x00_intf {
44503 * for hardware which doesn't support hardware
44504 * sequence counting.
44505 */
44506- atomic_t seqno;
44507+ atomic_unchecked_t seqno;
44508 };
44509
44510 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
44511diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
44512index d955741..8730748 100644
44513--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
44514+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
44515@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
44516 * sequence counter given by mac80211.
44517 */
44518 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
44519- seqno = atomic_add_return(0x10, &intf->seqno);
44520+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
44521 else
44522- seqno = atomic_read(&intf->seqno);
44523+ seqno = atomic_read_unchecked(&intf->seqno);
44524
44525 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
44526 hdr->seq_ctrl |= cpu_to_le16(seqno);
44527diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
44528index e2b3d9c..67a5184 100644
44529--- a/drivers/net/wireless/ti/wl1251/sdio.c
44530+++ b/drivers/net/wireless/ti/wl1251/sdio.c
44531@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
44532
44533 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
44534
44535- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
44536- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
44537+ pax_open_kernel();
44538+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
44539+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
44540+ pax_close_kernel();
44541
44542 wl1251_info("using dedicated interrupt line");
44543 } else {
44544- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
44545- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
44546+ pax_open_kernel();
44547+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
44548+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
44549+ pax_close_kernel();
44550
44551 wl1251_info("using SDIO interrupt");
44552 }
44553diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
44554index 1c627da..69f7d17 100644
44555--- a/drivers/net/wireless/ti/wl12xx/main.c
44556+++ b/drivers/net/wireless/ti/wl12xx/main.c
44557@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
44558 sizeof(wl->conf.mem));
44559
44560 /* read data preparation is only needed by wl127x */
44561- wl->ops->prepare_read = wl127x_prepare_read;
44562+ pax_open_kernel();
44563+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
44564+ pax_close_kernel();
44565
44566 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
44567 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
44568@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
44569 sizeof(wl->conf.mem));
44570
44571 /* read data preparation is only needed by wl127x */
44572- wl->ops->prepare_read = wl127x_prepare_read;
44573+ pax_open_kernel();
44574+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
44575+ pax_close_kernel();
44576
44577 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
44578 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
44579diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
44580index 9fa692d..b31fee0 100644
44581--- a/drivers/net/wireless/ti/wl18xx/main.c
44582+++ b/drivers/net/wireless/ti/wl18xx/main.c
44583@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
44584 }
44585
44586 if (!checksum_param) {
44587- wl18xx_ops.set_rx_csum = NULL;
44588- wl18xx_ops.init_vif = NULL;
44589+ pax_open_kernel();
44590+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
44591+ *(void **)&wl18xx_ops.init_vif = NULL;
44592+ pax_close_kernel();
44593 }
44594
44595 /* Enable 11a Band only if we have 5G antennas */
44596diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
44597index 7ef0b4a..ff65c28 100644
44598--- a/drivers/net/wireless/zd1211rw/zd_usb.c
44599+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
44600@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
44601 {
44602 struct zd_usb *usb = urb->context;
44603 struct zd_usb_interrupt *intr = &usb->intr;
44604- int len;
44605+ unsigned int len;
44606 u16 int_num;
44607
44608 ZD_ASSERT(in_interrupt());
44609diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
44610index d93b2b6..ae50401 100644
44611--- a/drivers/oprofile/buffer_sync.c
44612+++ b/drivers/oprofile/buffer_sync.c
44613@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
44614 if (cookie == NO_COOKIE)
44615 offset = pc;
44616 if (cookie == INVALID_COOKIE) {
44617- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
44618+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
44619 offset = pc;
44620 }
44621 if (cookie != last_cookie) {
44622@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
44623 /* add userspace sample */
44624
44625 if (!mm) {
44626- atomic_inc(&oprofile_stats.sample_lost_no_mm);
44627+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
44628 return 0;
44629 }
44630
44631 cookie = lookup_dcookie(mm, s->eip, &offset);
44632
44633 if (cookie == INVALID_COOKIE) {
44634- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
44635+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
44636 return 0;
44637 }
44638
44639@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
44640 /* ignore backtraces if failed to add a sample */
44641 if (state == sb_bt_start) {
44642 state = sb_bt_ignore;
44643- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
44644+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
44645 }
44646 }
44647 release_mm(mm);
44648diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
44649index c0cc4e7..44d4e54 100644
44650--- a/drivers/oprofile/event_buffer.c
44651+++ b/drivers/oprofile/event_buffer.c
44652@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
44653 }
44654
44655 if (buffer_pos == buffer_size) {
44656- atomic_inc(&oprofile_stats.event_lost_overflow);
44657+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
44658 return;
44659 }
44660
44661diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
44662index ed2c3ec..deda85a 100644
44663--- a/drivers/oprofile/oprof.c
44664+++ b/drivers/oprofile/oprof.c
44665@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
44666 if (oprofile_ops.switch_events())
44667 return;
44668
44669- atomic_inc(&oprofile_stats.multiplex_counter);
44670+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
44671 start_switch_worker();
44672 }
44673
44674diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
44675index 84a208d..d61b0a1 100644
44676--- a/drivers/oprofile/oprofile_files.c
44677+++ b/drivers/oprofile/oprofile_files.c
44678@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
44679
44680 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
44681
44682-static ssize_t timeout_read(struct file *file, char __user *buf,
44683+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
44684 size_t count, loff_t *offset)
44685 {
44686 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
44687diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
44688index 917d28e..d62d981 100644
44689--- a/drivers/oprofile/oprofile_stats.c
44690+++ b/drivers/oprofile/oprofile_stats.c
44691@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
44692 cpu_buf->sample_invalid_eip = 0;
44693 }
44694
44695- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
44696- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
44697- atomic_set(&oprofile_stats.event_lost_overflow, 0);
44698- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
44699- atomic_set(&oprofile_stats.multiplex_counter, 0);
44700+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
44701+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
44702+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
44703+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
44704+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
44705 }
44706
44707
44708diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
44709index 38b6fc0..b5cbfce 100644
44710--- a/drivers/oprofile/oprofile_stats.h
44711+++ b/drivers/oprofile/oprofile_stats.h
44712@@ -13,11 +13,11 @@
44713 #include <linux/atomic.h>
44714
44715 struct oprofile_stat_struct {
44716- atomic_t sample_lost_no_mm;
44717- atomic_t sample_lost_no_mapping;
44718- atomic_t bt_lost_no_mapping;
44719- atomic_t event_lost_overflow;
44720- atomic_t multiplex_counter;
44721+ atomic_unchecked_t sample_lost_no_mm;
44722+ atomic_unchecked_t sample_lost_no_mapping;
44723+ atomic_unchecked_t bt_lost_no_mapping;
44724+ atomic_unchecked_t event_lost_overflow;
44725+ atomic_unchecked_t multiplex_counter;
44726 };
44727
44728 extern struct oprofile_stat_struct oprofile_stats;
44729diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
44730index 7c12d9c..558bf3bb 100644
44731--- a/drivers/oprofile/oprofilefs.c
44732+++ b/drivers/oprofile/oprofilefs.c
44733@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
44734
44735
44736 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
44737- char const *name, atomic_t *val)
44738+ char const *name, atomic_unchecked_t *val)
44739 {
44740 return __oprofilefs_create_file(sb, root, name,
44741 &atomic_ro_fops, 0444, val);
44742diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
44743index 93404f7..4a313d8 100644
44744--- a/drivers/oprofile/timer_int.c
44745+++ b/drivers/oprofile/timer_int.c
44746@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
44747 return NOTIFY_OK;
44748 }
44749
44750-static struct notifier_block __refdata oprofile_cpu_notifier = {
44751+static struct notifier_block oprofile_cpu_notifier = {
44752 .notifier_call = oprofile_cpu_notify,
44753 };
44754
44755diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
44756index 92ed045..62d39bd7 100644
44757--- a/drivers/parport/procfs.c
44758+++ b/drivers/parport/procfs.c
44759@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
44760
44761 *ppos += len;
44762
44763- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
44764+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
44765 }
44766
44767 #ifdef CONFIG_PARPORT_1284
44768@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
44769
44770 *ppos += len;
44771
44772- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
44773+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
44774 }
44775 #endif /* IEEE1284.3 support. */
44776
44777diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
44778index c35e8ad..fc33beb 100644
44779--- a/drivers/pci/hotplug/acpiphp_ibm.c
44780+++ b/drivers/pci/hotplug/acpiphp_ibm.c
44781@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
44782 goto init_cleanup;
44783 }
44784
44785- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
44786+ pax_open_kernel();
44787+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
44788+ pax_close_kernel();
44789 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
44790
44791 return retval;
44792diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
44793index a6a71c4..c91097b 100644
44794--- a/drivers/pci/hotplug/cpcihp_generic.c
44795+++ b/drivers/pci/hotplug/cpcihp_generic.c
44796@@ -73,7 +73,6 @@ static u16 port;
44797 static unsigned int enum_bit;
44798 static u8 enum_mask;
44799
44800-static struct cpci_hp_controller_ops generic_hpc_ops;
44801 static struct cpci_hp_controller generic_hpc;
44802
44803 static int __init validate_parameters(void)
44804@@ -139,6 +138,10 @@ static int query_enum(void)
44805 return ((value & enum_mask) == enum_mask);
44806 }
44807
44808+static struct cpci_hp_controller_ops generic_hpc_ops = {
44809+ .query_enum = query_enum,
44810+};
44811+
44812 static int __init cpcihp_generic_init(void)
44813 {
44814 int status;
44815@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
44816 pci_dev_put(dev);
44817
44818 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
44819- generic_hpc_ops.query_enum = query_enum;
44820 generic_hpc.ops = &generic_hpc_ops;
44821
44822 status = cpci_hp_register_controller(&generic_hpc);
44823diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
44824index 449b4bb..257e2e8 100644
44825--- a/drivers/pci/hotplug/cpcihp_zt5550.c
44826+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
44827@@ -59,7 +59,6 @@
44828 /* local variables */
44829 static bool debug;
44830 static bool poll;
44831-static struct cpci_hp_controller_ops zt5550_hpc_ops;
44832 static struct cpci_hp_controller zt5550_hpc;
44833
44834 /* Primary cPCI bus bridge device */
44835@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
44836 return 0;
44837 }
44838
44839+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
44840+ .query_enum = zt5550_hc_query_enum,
44841+};
44842+
44843 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
44844 {
44845 int status;
44846@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
44847 dbg("returned from zt5550_hc_config");
44848
44849 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
44850- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
44851 zt5550_hpc.ops = &zt5550_hpc_ops;
44852 if(!poll) {
44853 zt5550_hpc.irq = hc_dev->irq;
44854 zt5550_hpc.irq_flags = IRQF_SHARED;
44855 zt5550_hpc.dev_id = hc_dev;
44856
44857- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
44858- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
44859- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
44860+ pax_open_kernel();
44861+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
44862+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
44863+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
44864+ pax_open_kernel();
44865 } else {
44866 info("using ENUM# polling mode");
44867 }
44868diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
44869index 76ba8a1..20ca857 100644
44870--- a/drivers/pci/hotplug/cpqphp_nvram.c
44871+++ b/drivers/pci/hotplug/cpqphp_nvram.c
44872@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
44873
44874 void compaq_nvram_init (void __iomem *rom_start)
44875 {
44876+
44877+#ifndef CONFIG_PAX_KERNEXEC
44878 if (rom_start) {
44879 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
44880 }
44881+#endif
44882+
44883 dbg("int15 entry = %p\n", compaq_int15_entry_point);
44884
44885 /* initialize our int15 lock */
44886diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
44887index ec20f74..c1d961e 100644
44888--- a/drivers/pci/hotplug/pci_hotplug_core.c
44889+++ b/drivers/pci/hotplug/pci_hotplug_core.c
44890@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
44891 return -EINVAL;
44892 }
44893
44894- slot->ops->owner = owner;
44895- slot->ops->mod_name = mod_name;
44896+ pax_open_kernel();
44897+ *(struct module **)&slot->ops->owner = owner;
44898+ *(const char **)&slot->ops->mod_name = mod_name;
44899+ pax_close_kernel();
44900
44901 mutex_lock(&pci_hp_mutex);
44902 /*
44903diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
44904index 7d72c5e..edce02c 100644
44905--- a/drivers/pci/hotplug/pciehp_core.c
44906+++ b/drivers/pci/hotplug/pciehp_core.c
44907@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
44908 struct slot *slot = ctrl->slot;
44909 struct hotplug_slot *hotplug = NULL;
44910 struct hotplug_slot_info *info = NULL;
44911- struct hotplug_slot_ops *ops = NULL;
44912+ hotplug_slot_ops_no_const *ops = NULL;
44913 char name[SLOT_NAME_SIZE];
44914 int retval = -ENOMEM;
44915
44916diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
44917index 5127f3f..cc115f7 100644
44918--- a/drivers/pci/hotplug/pciehp_hpc.c
44919+++ b/drivers/pci/hotplug/pciehp_hpc.c
44920@@ -780,7 +780,7 @@ static int pcie_init_slot(struct controller *ctrl)
44921 return -ENOMEM;
44922
44923 snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
44924- slot->wq = alloc_workqueue(name, 0, 0);
44925+ slot->wq = alloc_workqueue("%s", 0, 0, name);
44926 if (!slot->wq)
44927 goto abort;
44928
44929diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
44930index 5b4a9d9..cd5ac1f 100644
44931--- a/drivers/pci/pci-sysfs.c
44932+++ b/drivers/pci/pci-sysfs.c
44933@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
44934 {
44935 /* allocate attribute structure, piggyback attribute name */
44936 int name_len = write_combine ? 13 : 10;
44937- struct bin_attribute *res_attr;
44938+ bin_attribute_no_const *res_attr;
44939 int retval;
44940
44941 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
44942@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
44943 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
44944 {
44945 int retval;
44946- struct bin_attribute *attr;
44947+ bin_attribute_no_const *attr;
44948
44949 /* If the device has VPD, try to expose it in sysfs. */
44950 if (dev->vpd) {
44951@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
44952 {
44953 int retval;
44954 int rom_size = 0;
44955- struct bin_attribute *attr;
44956+ bin_attribute_no_const *attr;
44957
44958 if (!sysfs_initialized)
44959 return -EACCES;
44960diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
44961index d1182c4..2a138ec 100644
44962--- a/drivers/pci/pci.h
44963+++ b/drivers/pci/pci.h
44964@@ -92,7 +92,7 @@ struct pci_vpd_ops {
44965 struct pci_vpd {
44966 unsigned int len;
44967 const struct pci_vpd_ops *ops;
44968- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
44969+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
44970 };
44971
44972 int pci_vpd_pci22_init(struct pci_dev *dev);
44973diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
44974index d320df6..ca9a8f6 100644
44975--- a/drivers/pci/pcie/aspm.c
44976+++ b/drivers/pci/pcie/aspm.c
44977@@ -27,9 +27,9 @@
44978 #define MODULE_PARAM_PREFIX "pcie_aspm."
44979
44980 /* Note: those are not register definitions */
44981-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
44982-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
44983-#define ASPM_STATE_L1 (4) /* L1 state */
44984+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
44985+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
44986+#define ASPM_STATE_L1 (4U) /* L1 state */
44987 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
44988 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
44989
44990diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
44991index ea37072..10e58e56 100644
44992--- a/drivers/pci/probe.c
44993+++ b/drivers/pci/probe.c
44994@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
44995 struct pci_bus_region region;
44996 bool bar_too_big = false, bar_disabled = false;
44997
44998- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
44999+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
45000
45001 /* No printks while decoding is disabled! */
45002 if (!dev->mmio_always_on) {
45003diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
45004index 0812608..b04018c4 100644
45005--- a/drivers/pci/proc.c
45006+++ b/drivers/pci/proc.c
45007@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
45008 static int __init pci_proc_init(void)
45009 {
45010 struct pci_dev *dev = NULL;
45011+
45012+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45013+#ifdef CONFIG_GRKERNSEC_PROC_USER
45014+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
45015+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45016+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45017+#endif
45018+#else
45019 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
45020+#endif
45021 proc_create("devices", 0, proc_bus_pci_dir,
45022 &proc_bus_pci_dev_operations);
45023 proc_initialized = 1;
45024diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
45025index 3e5b4497..dcdfb70 100644
45026--- a/drivers/platform/x86/chromeos_laptop.c
45027+++ b/drivers/platform/x86/chromeos_laptop.c
45028@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
45029 return 0;
45030 }
45031
45032-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
45033+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
45034 {
45035 .ident = "Samsung Series 5 550 - Touchpad",
45036 .matches = {
45037diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
45038index 6b22938..bc9700e 100644
45039--- a/drivers/platform/x86/msi-laptop.c
45040+++ b/drivers/platform/x86/msi-laptop.c
45041@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
45042
45043 if (!quirks->ec_read_only) {
45044 /* allow userland write sysfs file */
45045- dev_attr_bluetooth.store = store_bluetooth;
45046- dev_attr_wlan.store = store_wlan;
45047- dev_attr_threeg.store = store_threeg;
45048- dev_attr_bluetooth.attr.mode |= S_IWUSR;
45049- dev_attr_wlan.attr.mode |= S_IWUSR;
45050- dev_attr_threeg.attr.mode |= S_IWUSR;
45051+ pax_open_kernel();
45052+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
45053+ *(void **)&dev_attr_wlan.store = store_wlan;
45054+ *(void **)&dev_attr_threeg.store = store_threeg;
45055+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
45056+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
45057+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
45058+ pax_close_kernel();
45059 }
45060
45061 /* disable hardware control by fn key */
45062diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
45063index 2ac045f..39c443d 100644
45064--- a/drivers/platform/x86/sony-laptop.c
45065+++ b/drivers/platform/x86/sony-laptop.c
45066@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
45067 }
45068
45069 /* High speed charging function */
45070-static struct device_attribute *hsc_handle;
45071+static device_attribute_no_const *hsc_handle;
45072
45073 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
45074 struct device_attribute *attr,
45075diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
45076index 54d31c0..3f896d3 100644
45077--- a/drivers/platform/x86/thinkpad_acpi.c
45078+++ b/drivers/platform/x86/thinkpad_acpi.c
45079@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
45080 return 0;
45081 }
45082
45083-void static hotkey_mask_warn_incomplete_mask(void)
45084+static void hotkey_mask_warn_incomplete_mask(void)
45085 {
45086 /* log only what the user can fix... */
45087 const u32 wantedmask = hotkey_driver_mask &
45088@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
45089 }
45090 }
45091
45092-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45093- struct tp_nvram_state *newn,
45094- const u32 event_mask)
45095-{
45096-
45097 #define TPACPI_COMPARE_KEY(__scancode, __member) \
45098 do { \
45099 if ((event_mask & (1 << __scancode)) && \
45100@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45101 tpacpi_hotkey_send_key(__scancode); \
45102 } while (0)
45103
45104- void issue_volchange(const unsigned int oldvol,
45105- const unsigned int newvol)
45106- {
45107- unsigned int i = oldvol;
45108+static void issue_volchange(const unsigned int oldvol,
45109+ const unsigned int newvol,
45110+ const u32 event_mask)
45111+{
45112+ unsigned int i = oldvol;
45113
45114- while (i > newvol) {
45115- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
45116- i--;
45117- }
45118- while (i < newvol) {
45119- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45120- i++;
45121- }
45122+ while (i > newvol) {
45123+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
45124+ i--;
45125 }
45126+ while (i < newvol) {
45127+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45128+ i++;
45129+ }
45130+}
45131
45132- void issue_brightnesschange(const unsigned int oldbrt,
45133- const unsigned int newbrt)
45134- {
45135- unsigned int i = oldbrt;
45136+static void issue_brightnesschange(const unsigned int oldbrt,
45137+ const unsigned int newbrt,
45138+ const u32 event_mask)
45139+{
45140+ unsigned int i = oldbrt;
45141
45142- while (i > newbrt) {
45143- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
45144- i--;
45145- }
45146- while (i < newbrt) {
45147- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45148- i++;
45149- }
45150+ while (i > newbrt) {
45151+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
45152+ i--;
45153+ }
45154+ while (i < newbrt) {
45155+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45156+ i++;
45157 }
45158+}
45159
45160+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45161+ struct tp_nvram_state *newn,
45162+ const u32 event_mask)
45163+{
45164 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
45165 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
45166 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
45167@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45168 oldn->volume_level != newn->volume_level) {
45169 /* recently muted, or repeated mute keypress, or
45170 * multiple presses ending in mute */
45171- issue_volchange(oldn->volume_level, newn->volume_level);
45172+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
45173 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
45174 }
45175 } else {
45176@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45177 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45178 }
45179 if (oldn->volume_level != newn->volume_level) {
45180- issue_volchange(oldn->volume_level, newn->volume_level);
45181+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
45182 } else if (oldn->volume_toggle != newn->volume_toggle) {
45183 /* repeated vol up/down keypress at end of scale ? */
45184 if (newn->volume_level == 0)
45185@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45186 /* handle brightness */
45187 if (oldn->brightness_level != newn->brightness_level) {
45188 issue_brightnesschange(oldn->brightness_level,
45189- newn->brightness_level);
45190+ newn->brightness_level,
45191+ event_mask);
45192 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
45193 /* repeated key presses that didn't change state */
45194 if (newn->brightness_level == 0)
45195@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45196 && !tp_features.bright_unkfw)
45197 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45198 }
45199+}
45200
45201 #undef TPACPI_COMPARE_KEY
45202 #undef TPACPI_MAY_SEND_KEY
45203-}
45204
45205 /*
45206 * Polling driver
45207diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
45208index e4ac38a..b13344c 100644
45209--- a/drivers/platform/x86/wmi.c
45210+++ b/drivers/platform/x86/wmi.c
45211@@ -743,7 +743,7 @@ static int wmi_create_device(const struct guid_block *gblock,
45212 wblock->dev.class = &wmi_class;
45213
45214 wmi_gtoa(gblock->guid, guid_string);
45215- dev_set_name(&wblock->dev, guid_string);
45216+ dev_set_name(&wblock->dev, "%s", guid_string);
45217
45218 dev_set_drvdata(&wblock->dev, wblock);
45219
45220diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
45221index 769d265..a3a05ca 100644
45222--- a/drivers/pnp/pnpbios/bioscalls.c
45223+++ b/drivers/pnp/pnpbios/bioscalls.c
45224@@ -58,7 +58,7 @@ do { \
45225 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
45226 } while(0)
45227
45228-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
45229+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
45230 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
45231
45232 /*
45233@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
45234
45235 cpu = get_cpu();
45236 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
45237+
45238+ pax_open_kernel();
45239 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
45240+ pax_close_kernel();
45241
45242 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
45243 spin_lock_irqsave(&pnp_bios_lock, flags);
45244@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
45245 :"memory");
45246 spin_unlock_irqrestore(&pnp_bios_lock, flags);
45247
45248+ pax_open_kernel();
45249 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
45250+ pax_close_kernel();
45251+
45252 put_cpu();
45253
45254 /* If we get here and this is set then the PnP BIOS faulted on us. */
45255@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
45256 return status;
45257 }
45258
45259-void pnpbios_calls_init(union pnp_bios_install_struct *header)
45260+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
45261 {
45262 int i;
45263
45264@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
45265 pnp_bios_callpoint.offset = header->fields.pm16offset;
45266 pnp_bios_callpoint.segment = PNP_CS16;
45267
45268+ pax_open_kernel();
45269+
45270 for_each_possible_cpu(i) {
45271 struct desc_struct *gdt = get_cpu_gdt_table(i);
45272 if (!gdt)
45273@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
45274 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
45275 (unsigned long)__va(header->fields.pm16dseg));
45276 }
45277+
45278+ pax_close_kernel();
45279 }
45280diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
45281index 3e6db1c..1fbbdae 100644
45282--- a/drivers/pnp/resource.c
45283+++ b/drivers/pnp/resource.c
45284@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
45285 return 1;
45286
45287 /* check if the resource is valid */
45288- if (*irq < 0 || *irq > 15)
45289+ if (*irq > 15)
45290 return 0;
45291
45292 /* check if the resource is reserved */
45293@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
45294 return 1;
45295
45296 /* check if the resource is valid */
45297- if (*dma < 0 || *dma == 4 || *dma > 7)
45298+ if (*dma == 4 || *dma > 7)
45299 return 0;
45300
45301 /* check if the resource is reserved */
45302diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
45303index 0c52e2a..3421ab7 100644
45304--- a/drivers/power/pda_power.c
45305+++ b/drivers/power/pda_power.c
45306@@ -37,7 +37,11 @@ static int polling;
45307
45308 #if IS_ENABLED(CONFIG_USB_PHY)
45309 static struct usb_phy *transceiver;
45310-static struct notifier_block otg_nb;
45311+static int otg_handle_notification(struct notifier_block *nb,
45312+ unsigned long event, void *unused);
45313+static struct notifier_block otg_nb = {
45314+ .notifier_call = otg_handle_notification
45315+};
45316 #endif
45317
45318 static struct regulator *ac_draw;
45319@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
45320
45321 #if IS_ENABLED(CONFIG_USB_PHY)
45322 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
45323- otg_nb.notifier_call = otg_handle_notification;
45324 ret = usb_register_notifier(transceiver, &otg_nb);
45325 if (ret) {
45326 dev_err(dev, "failure to register otg notifier\n");
45327diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
45328index cc439fd..8fa30df 100644
45329--- a/drivers/power/power_supply.h
45330+++ b/drivers/power/power_supply.h
45331@@ -16,12 +16,12 @@ struct power_supply;
45332
45333 #ifdef CONFIG_SYSFS
45334
45335-extern void power_supply_init_attrs(struct device_type *dev_type);
45336+extern void power_supply_init_attrs(void);
45337 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
45338
45339 #else
45340
45341-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
45342+static inline void power_supply_init_attrs(void) {}
45343 #define power_supply_uevent NULL
45344
45345 #endif /* CONFIG_SYSFS */
45346diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
45347index 1c517c3..ffa2f17 100644
45348--- a/drivers/power/power_supply_core.c
45349+++ b/drivers/power/power_supply_core.c
45350@@ -24,7 +24,10 @@
45351 struct class *power_supply_class;
45352 EXPORT_SYMBOL_GPL(power_supply_class);
45353
45354-static struct device_type power_supply_dev_type;
45355+extern const struct attribute_group *power_supply_attr_groups[];
45356+static struct device_type power_supply_dev_type = {
45357+ .groups = power_supply_attr_groups,
45358+};
45359
45360 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
45361 struct power_supply *supply)
45362@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
45363 return PTR_ERR(power_supply_class);
45364
45365 power_supply_class->dev_uevent = power_supply_uevent;
45366- power_supply_init_attrs(&power_supply_dev_type);
45367+ power_supply_init_attrs();
45368
45369 return 0;
45370 }
45371diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
45372index 29178f7..c65f324 100644
45373--- a/drivers/power/power_supply_sysfs.c
45374+++ b/drivers/power/power_supply_sysfs.c
45375@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
45376 .is_visible = power_supply_attr_is_visible,
45377 };
45378
45379-static const struct attribute_group *power_supply_attr_groups[] = {
45380+const struct attribute_group *power_supply_attr_groups[] = {
45381 &power_supply_attr_group,
45382 NULL,
45383 };
45384
45385-void power_supply_init_attrs(struct device_type *dev_type)
45386+void power_supply_init_attrs(void)
45387 {
45388 int i;
45389
45390- dev_type->groups = power_supply_attr_groups;
45391-
45392 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
45393 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
45394 }
45395diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
45396index d428ef9..fdc0357 100644
45397--- a/drivers/regulator/max8660.c
45398+++ b/drivers/regulator/max8660.c
45399@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
45400 max8660->shadow_regs[MAX8660_OVER1] = 5;
45401 } else {
45402 /* Otherwise devices can be toggled via software */
45403- max8660_dcdc_ops.enable = max8660_dcdc_enable;
45404- max8660_dcdc_ops.disable = max8660_dcdc_disable;
45405+ pax_open_kernel();
45406+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
45407+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
45408+ pax_close_kernel();
45409 }
45410
45411 /*
45412diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
45413index adb1414..c13e0ce 100644
45414--- a/drivers/regulator/max8973-regulator.c
45415+++ b/drivers/regulator/max8973-regulator.c
45416@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
45417 if (!pdata->enable_ext_control) {
45418 max->desc.enable_reg = MAX8973_VOUT;
45419 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
45420- max8973_dcdc_ops.enable = regulator_enable_regmap;
45421- max8973_dcdc_ops.disable = regulator_disable_regmap;
45422- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
45423+ pax_open_kernel();
45424+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
45425+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
45426+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
45427+ pax_close_kernel();
45428 }
45429
45430 max->enable_external_control = pdata->enable_ext_control;
45431diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
45432index b716283..3cc4349 100644
45433--- a/drivers/regulator/mc13892-regulator.c
45434+++ b/drivers/regulator/mc13892-regulator.c
45435@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
45436 }
45437 mc13xxx_unlock(mc13892);
45438
45439- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
45440+ pax_open_kernel();
45441+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
45442 = mc13892_vcam_set_mode;
45443- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
45444+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
45445 = mc13892_vcam_get_mode;
45446+ pax_close_kernel();
45447
45448 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
45449 ARRAY_SIZE(mc13892_regulators));
45450diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
45451index f1cb706..4c7832a 100644
45452--- a/drivers/rtc/rtc-cmos.c
45453+++ b/drivers/rtc/rtc-cmos.c
45454@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
45455 hpet_rtc_timer_init();
45456
45457 /* export at least the first block of NVRAM */
45458- nvram.size = address_space - NVRAM_OFFSET;
45459+ pax_open_kernel();
45460+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
45461+ pax_close_kernel();
45462 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
45463 if (retval < 0) {
45464 dev_dbg(dev, "can't create nvram file? %d\n", retval);
45465diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
45466index d049393..bb20be0 100644
45467--- a/drivers/rtc/rtc-dev.c
45468+++ b/drivers/rtc/rtc-dev.c
45469@@ -16,6 +16,7 @@
45470 #include <linux/module.h>
45471 #include <linux/rtc.h>
45472 #include <linux/sched.h>
45473+#include <linux/grsecurity.h>
45474 #include "rtc-core.h"
45475
45476 static dev_t rtc_devt;
45477@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
45478 if (copy_from_user(&tm, uarg, sizeof(tm)))
45479 return -EFAULT;
45480
45481+ gr_log_timechange();
45482+
45483 return rtc_set_time(rtc, &tm);
45484
45485 case RTC_PIE_ON:
45486diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
45487index b53992a..776df84 100644
45488--- a/drivers/rtc/rtc-ds1307.c
45489+++ b/drivers/rtc/rtc-ds1307.c
45490@@ -107,7 +107,7 @@ struct ds1307 {
45491 u8 offset; /* register's offset */
45492 u8 regs[11];
45493 u16 nvram_offset;
45494- struct bin_attribute *nvram;
45495+ bin_attribute_no_const *nvram;
45496 enum ds_type type;
45497 unsigned long flags;
45498 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
45499diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
45500index 130f29a..6179d03 100644
45501--- a/drivers/rtc/rtc-m48t59.c
45502+++ b/drivers/rtc/rtc-m48t59.c
45503@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
45504 goto out;
45505 }
45506
45507- m48t59_nvram_attr.size = pdata->offset;
45508+ pax_open_kernel();
45509+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
45510+ pax_close_kernel();
45511
45512 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
45513 if (ret) {
45514diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
45515index e693af6..2e525b6 100644
45516--- a/drivers/scsi/bfa/bfa_fcpim.h
45517+++ b/drivers/scsi/bfa/bfa_fcpim.h
45518@@ -36,7 +36,7 @@ struct bfa_iotag_s {
45519
45520 struct bfa_itn_s {
45521 bfa_isr_func_t isr;
45522-};
45523+} __no_const;
45524
45525 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
45526 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
45527diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
45528index 23a90e7..9cf04ee 100644
45529--- a/drivers/scsi/bfa/bfa_ioc.h
45530+++ b/drivers/scsi/bfa/bfa_ioc.h
45531@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
45532 bfa_ioc_disable_cbfn_t disable_cbfn;
45533 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
45534 bfa_ioc_reset_cbfn_t reset_cbfn;
45535-};
45536+} __no_const;
45537
45538 /*
45539 * IOC event notification mechanism.
45540@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
45541 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
45542 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
45543 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
45544-};
45545+} __no_const;
45546
45547 /*
45548 * Queue element to wait for room in request queue. FIFO order is
45549diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
45550index df0c3c7..b00e1d0 100644
45551--- a/drivers/scsi/hosts.c
45552+++ b/drivers/scsi/hosts.c
45553@@ -42,7 +42,7 @@
45554 #include "scsi_logging.h"
45555
45556
45557-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
45558+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
45559
45560
45561 static void scsi_host_cls_release(struct device *dev)
45562@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
45563 * subtract one because we increment first then return, but we need to
45564 * know what the next host number was before increment
45565 */
45566- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
45567+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
45568 shost->dma_channel = 0xff;
45569
45570 /* These three are default values which can be overridden */
45571diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
45572index 7f4f790..b75b92a 100644
45573--- a/drivers/scsi/hpsa.c
45574+++ b/drivers/scsi/hpsa.c
45575@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
45576 unsigned long flags;
45577
45578 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
45579- return h->access.command_completed(h, q);
45580+ return h->access->command_completed(h, q);
45581
45582 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
45583 a = rq->head[rq->current_entry];
45584@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
45585 while (!list_empty(&h->reqQ)) {
45586 c = list_entry(h->reqQ.next, struct CommandList, list);
45587 /* can't do anything if fifo is full */
45588- if ((h->access.fifo_full(h))) {
45589+ if ((h->access->fifo_full(h))) {
45590 dev_warn(&h->pdev->dev, "fifo full\n");
45591 break;
45592 }
45593@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
45594
45595 /* Tell the controller execute command */
45596 spin_unlock_irqrestore(&h->lock, flags);
45597- h->access.submit_command(h, c);
45598+ h->access->submit_command(h, c);
45599 spin_lock_irqsave(&h->lock, flags);
45600 }
45601 spin_unlock_irqrestore(&h->lock, flags);
45602@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
45603
45604 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
45605 {
45606- return h->access.command_completed(h, q);
45607+ return h->access->command_completed(h, q);
45608 }
45609
45610 static inline bool interrupt_pending(struct ctlr_info *h)
45611 {
45612- return h->access.intr_pending(h);
45613+ return h->access->intr_pending(h);
45614 }
45615
45616 static inline long interrupt_not_for_us(struct ctlr_info *h)
45617 {
45618- return (h->access.intr_pending(h) == 0) ||
45619+ return (h->access->intr_pending(h) == 0) ||
45620 (h->interrupts_enabled == 0);
45621 }
45622
45623@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
45624 if (prod_index < 0)
45625 return -ENODEV;
45626 h->product_name = products[prod_index].product_name;
45627- h->access = *(products[prod_index].access);
45628+ h->access = products[prod_index].access;
45629
45630 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
45631 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
45632@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
45633
45634 assert_spin_locked(&lockup_detector_lock);
45635 remove_ctlr_from_lockup_detector_list(h);
45636- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45637+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45638 spin_lock_irqsave(&h->lock, flags);
45639 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
45640 spin_unlock_irqrestore(&h->lock, flags);
45641@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
45642 }
45643
45644 /* make sure the board interrupts are off */
45645- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45646+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45647
45648 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
45649 goto clean2;
45650@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
45651 * fake ones to scoop up any residual completions.
45652 */
45653 spin_lock_irqsave(&h->lock, flags);
45654- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45655+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45656 spin_unlock_irqrestore(&h->lock, flags);
45657 free_irqs(h);
45658 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
45659@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
45660 dev_info(&h->pdev->dev, "Board READY.\n");
45661 dev_info(&h->pdev->dev,
45662 "Waiting for stale completions to drain.\n");
45663- h->access.set_intr_mask(h, HPSA_INTR_ON);
45664+ h->access->set_intr_mask(h, HPSA_INTR_ON);
45665 msleep(10000);
45666- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45667+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45668
45669 rc = controller_reset_failed(h->cfgtable);
45670 if (rc)
45671@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
45672 }
45673
45674 /* Turn the interrupts on so we can service requests */
45675- h->access.set_intr_mask(h, HPSA_INTR_ON);
45676+ h->access->set_intr_mask(h, HPSA_INTR_ON);
45677
45678 hpsa_hba_inquiry(h);
45679 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
45680@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
45681 * To write all data in the battery backed cache to disks
45682 */
45683 hpsa_flush_cache(h);
45684- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45685+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45686 hpsa_free_irqs_and_disable_msix(h);
45687 }
45688
45689@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
45690 return;
45691 }
45692 /* Change the access methods to the performant access methods */
45693- h->access = SA5_performant_access;
45694+ h->access = &SA5_performant_access;
45695 h->transMethod = CFGTBL_Trans_Performant;
45696 }
45697
45698diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
45699index 9816479..c5d4e97 100644
45700--- a/drivers/scsi/hpsa.h
45701+++ b/drivers/scsi/hpsa.h
45702@@ -79,7 +79,7 @@ struct ctlr_info {
45703 unsigned int msix_vector;
45704 unsigned int msi_vector;
45705 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
45706- struct access_method access;
45707+ struct access_method *access;
45708
45709 /* queue and queue Info */
45710 struct list_head reqQ;
45711diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
45712index 8b928c6..9c76300 100644
45713--- a/drivers/scsi/libfc/fc_exch.c
45714+++ b/drivers/scsi/libfc/fc_exch.c
45715@@ -100,12 +100,12 @@ struct fc_exch_mgr {
45716 u16 pool_max_index;
45717
45718 struct {
45719- atomic_t no_free_exch;
45720- atomic_t no_free_exch_xid;
45721- atomic_t xid_not_found;
45722- atomic_t xid_busy;
45723- atomic_t seq_not_found;
45724- atomic_t non_bls_resp;
45725+ atomic_unchecked_t no_free_exch;
45726+ atomic_unchecked_t no_free_exch_xid;
45727+ atomic_unchecked_t xid_not_found;
45728+ atomic_unchecked_t xid_busy;
45729+ atomic_unchecked_t seq_not_found;
45730+ atomic_unchecked_t non_bls_resp;
45731 } stats;
45732 };
45733
45734@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
45735 /* allocate memory for exchange */
45736 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
45737 if (!ep) {
45738- atomic_inc(&mp->stats.no_free_exch);
45739+ atomic_inc_unchecked(&mp->stats.no_free_exch);
45740 goto out;
45741 }
45742 memset(ep, 0, sizeof(*ep));
45743@@ -797,7 +797,7 @@ out:
45744 return ep;
45745 err:
45746 spin_unlock_bh(&pool->lock);
45747- atomic_inc(&mp->stats.no_free_exch_xid);
45748+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
45749 mempool_free(ep, mp->ep_pool);
45750 return NULL;
45751 }
45752@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45753 xid = ntohs(fh->fh_ox_id); /* we originated exch */
45754 ep = fc_exch_find(mp, xid);
45755 if (!ep) {
45756- atomic_inc(&mp->stats.xid_not_found);
45757+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45758 reject = FC_RJT_OX_ID;
45759 goto out;
45760 }
45761@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45762 ep = fc_exch_find(mp, xid);
45763 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
45764 if (ep) {
45765- atomic_inc(&mp->stats.xid_busy);
45766+ atomic_inc_unchecked(&mp->stats.xid_busy);
45767 reject = FC_RJT_RX_ID;
45768 goto rel;
45769 }
45770@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45771 }
45772 xid = ep->xid; /* get our XID */
45773 } else if (!ep) {
45774- atomic_inc(&mp->stats.xid_not_found);
45775+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45776 reject = FC_RJT_RX_ID; /* XID not found */
45777 goto out;
45778 }
45779@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45780 } else {
45781 sp = &ep->seq;
45782 if (sp->id != fh->fh_seq_id) {
45783- atomic_inc(&mp->stats.seq_not_found);
45784+ atomic_inc_unchecked(&mp->stats.seq_not_found);
45785 if (f_ctl & FC_FC_END_SEQ) {
45786 /*
45787 * Update sequence_id based on incoming last
45788@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45789
45790 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
45791 if (!ep) {
45792- atomic_inc(&mp->stats.xid_not_found);
45793+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45794 goto out;
45795 }
45796 if (ep->esb_stat & ESB_ST_COMPLETE) {
45797- atomic_inc(&mp->stats.xid_not_found);
45798+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45799 goto rel;
45800 }
45801 if (ep->rxid == FC_XID_UNKNOWN)
45802 ep->rxid = ntohs(fh->fh_rx_id);
45803 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
45804- atomic_inc(&mp->stats.xid_not_found);
45805+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45806 goto rel;
45807 }
45808 if (ep->did != ntoh24(fh->fh_s_id) &&
45809 ep->did != FC_FID_FLOGI) {
45810- atomic_inc(&mp->stats.xid_not_found);
45811+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45812 goto rel;
45813 }
45814 sof = fr_sof(fp);
45815@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45816 sp->ssb_stat |= SSB_ST_RESP;
45817 sp->id = fh->fh_seq_id;
45818 } else if (sp->id != fh->fh_seq_id) {
45819- atomic_inc(&mp->stats.seq_not_found);
45820+ atomic_inc_unchecked(&mp->stats.seq_not_found);
45821 goto rel;
45822 }
45823
45824@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45825 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
45826
45827 if (!sp)
45828- atomic_inc(&mp->stats.xid_not_found);
45829+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45830 else
45831- atomic_inc(&mp->stats.non_bls_resp);
45832+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
45833
45834 fc_frame_free(fp);
45835 }
45836@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
45837
45838 list_for_each_entry(ema, &lport->ema_list, ema_list) {
45839 mp = ema->mp;
45840- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
45841+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
45842 st->fc_no_free_exch_xid +=
45843- atomic_read(&mp->stats.no_free_exch_xid);
45844- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
45845- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
45846- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
45847- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
45848+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
45849+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
45850+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
45851+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
45852+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
45853 }
45854 }
45855 EXPORT_SYMBOL(fc_exch_update_stats);
45856diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
45857index 161c98e..6d563b3 100644
45858--- a/drivers/scsi/libsas/sas_ata.c
45859+++ b/drivers/scsi/libsas/sas_ata.c
45860@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
45861 .postreset = ata_std_postreset,
45862 .error_handler = ata_std_error_handler,
45863 .post_internal_cmd = sas_ata_post_internal,
45864- .qc_defer = ata_std_qc_defer,
45865+ .qc_defer = ata_std_qc_defer,
45866 .qc_prep = ata_noop_qc_prep,
45867 .qc_issue = sas_ata_qc_issue,
45868 .qc_fill_rtf = sas_ata_qc_fill_rtf,
45869diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
45870index bcc56ca..6f4174a 100644
45871--- a/drivers/scsi/lpfc/lpfc.h
45872+++ b/drivers/scsi/lpfc/lpfc.h
45873@@ -431,7 +431,7 @@ struct lpfc_vport {
45874 struct dentry *debug_nodelist;
45875 struct dentry *vport_debugfs_root;
45876 struct lpfc_debugfs_trc *disc_trc;
45877- atomic_t disc_trc_cnt;
45878+ atomic_unchecked_t disc_trc_cnt;
45879 #endif
45880 uint8_t stat_data_enabled;
45881 uint8_t stat_data_blocked;
45882@@ -865,8 +865,8 @@ struct lpfc_hba {
45883 struct timer_list fabric_block_timer;
45884 unsigned long bit_flags;
45885 #define FABRIC_COMANDS_BLOCKED 0
45886- atomic_t num_rsrc_err;
45887- atomic_t num_cmd_success;
45888+ atomic_unchecked_t num_rsrc_err;
45889+ atomic_unchecked_t num_cmd_success;
45890 unsigned long last_rsrc_error_time;
45891 unsigned long last_ramp_down_time;
45892 unsigned long last_ramp_up_time;
45893@@ -902,7 +902,7 @@ struct lpfc_hba {
45894
45895 struct dentry *debug_slow_ring_trc;
45896 struct lpfc_debugfs_trc *slow_ring_trc;
45897- atomic_t slow_ring_trc_cnt;
45898+ atomic_unchecked_t slow_ring_trc_cnt;
45899 /* iDiag debugfs sub-directory */
45900 struct dentry *idiag_root;
45901 struct dentry *idiag_pci_cfg;
45902diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
45903index f525ecb..32549a4 100644
45904--- a/drivers/scsi/lpfc/lpfc_debugfs.c
45905+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
45906@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
45907
45908 #include <linux/debugfs.h>
45909
45910-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
45911+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
45912 static unsigned long lpfc_debugfs_start_time = 0L;
45913
45914 /* iDiag */
45915@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
45916 lpfc_debugfs_enable = 0;
45917
45918 len = 0;
45919- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
45920+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
45921 (lpfc_debugfs_max_disc_trc - 1);
45922 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
45923 dtp = vport->disc_trc + i;
45924@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
45925 lpfc_debugfs_enable = 0;
45926
45927 len = 0;
45928- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
45929+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
45930 (lpfc_debugfs_max_slow_ring_trc - 1);
45931 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
45932 dtp = phba->slow_ring_trc + i;
45933@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
45934 !vport || !vport->disc_trc)
45935 return;
45936
45937- index = atomic_inc_return(&vport->disc_trc_cnt) &
45938+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
45939 (lpfc_debugfs_max_disc_trc - 1);
45940 dtp = vport->disc_trc + index;
45941 dtp->fmt = fmt;
45942 dtp->data1 = data1;
45943 dtp->data2 = data2;
45944 dtp->data3 = data3;
45945- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
45946+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
45947 dtp->jif = jiffies;
45948 #endif
45949 return;
45950@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
45951 !phba || !phba->slow_ring_trc)
45952 return;
45953
45954- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
45955+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
45956 (lpfc_debugfs_max_slow_ring_trc - 1);
45957 dtp = phba->slow_ring_trc + index;
45958 dtp->fmt = fmt;
45959 dtp->data1 = data1;
45960 dtp->data2 = data2;
45961 dtp->data3 = data3;
45962- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
45963+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
45964 dtp->jif = jiffies;
45965 #endif
45966 return;
45967@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
45968 "slow_ring buffer\n");
45969 goto debug_failed;
45970 }
45971- atomic_set(&phba->slow_ring_trc_cnt, 0);
45972+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
45973 memset(phba->slow_ring_trc, 0,
45974 (sizeof(struct lpfc_debugfs_trc) *
45975 lpfc_debugfs_max_slow_ring_trc));
45976@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
45977 "buffer\n");
45978 goto debug_failed;
45979 }
45980- atomic_set(&vport->disc_trc_cnt, 0);
45981+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
45982
45983 snprintf(name, sizeof(name), "discovery_trace");
45984 vport->debug_disc_trc =
45985diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
45986index cb465b2..2e7b25f 100644
45987--- a/drivers/scsi/lpfc/lpfc_init.c
45988+++ b/drivers/scsi/lpfc/lpfc_init.c
45989@@ -10950,8 +10950,10 @@ lpfc_init(void)
45990 "misc_register returned with status %d", error);
45991
45992 if (lpfc_enable_npiv) {
45993- lpfc_transport_functions.vport_create = lpfc_vport_create;
45994- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
45995+ pax_open_kernel();
45996+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
45997+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
45998+ pax_close_kernel();
45999 }
46000 lpfc_transport_template =
46001 fc_attach_transport(&lpfc_transport_functions);
46002diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
46003index 8523b278e..ce1d812 100644
46004--- a/drivers/scsi/lpfc/lpfc_scsi.c
46005+++ b/drivers/scsi/lpfc/lpfc_scsi.c
46006@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
46007 uint32_t evt_posted;
46008
46009 spin_lock_irqsave(&phba->hbalock, flags);
46010- atomic_inc(&phba->num_rsrc_err);
46011+ atomic_inc_unchecked(&phba->num_rsrc_err);
46012 phba->last_rsrc_error_time = jiffies;
46013
46014 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
46015@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
46016 unsigned long flags;
46017 struct lpfc_hba *phba = vport->phba;
46018 uint32_t evt_posted;
46019- atomic_inc(&phba->num_cmd_success);
46020+ atomic_inc_unchecked(&phba->num_cmd_success);
46021
46022 if (vport->cfg_lun_queue_depth <= queue_depth)
46023 return;
46024@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
46025 unsigned long num_rsrc_err, num_cmd_success;
46026 int i;
46027
46028- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
46029- num_cmd_success = atomic_read(&phba->num_cmd_success);
46030+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
46031+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
46032
46033 /*
46034 * The error and success command counters are global per
46035@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
46036 }
46037 }
46038 lpfc_destroy_vport_work_array(phba, vports);
46039- atomic_set(&phba->num_rsrc_err, 0);
46040- atomic_set(&phba->num_cmd_success, 0);
46041+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
46042+ atomic_set_unchecked(&phba->num_cmd_success, 0);
46043 }
46044
46045 /**
46046@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
46047 }
46048 }
46049 lpfc_destroy_vport_work_array(phba, vports);
46050- atomic_set(&phba->num_rsrc_err, 0);
46051- atomic_set(&phba->num_cmd_success, 0);
46052+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
46053+ atomic_set_unchecked(&phba->num_cmd_success, 0);
46054 }
46055
46056 /**
46057diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
46058index 8e1b737..50ff510 100644
46059--- a/drivers/scsi/pmcraid.c
46060+++ b/drivers/scsi/pmcraid.c
46061@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
46062 res->scsi_dev = scsi_dev;
46063 scsi_dev->hostdata = res;
46064 res->change_detected = 0;
46065- atomic_set(&res->read_failures, 0);
46066- atomic_set(&res->write_failures, 0);
46067+ atomic_set_unchecked(&res->read_failures, 0);
46068+ atomic_set_unchecked(&res->write_failures, 0);
46069 rc = 0;
46070 }
46071 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
46072@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
46073
46074 /* If this was a SCSI read/write command keep count of errors */
46075 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
46076- atomic_inc(&res->read_failures);
46077+ atomic_inc_unchecked(&res->read_failures);
46078 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
46079- atomic_inc(&res->write_failures);
46080+ atomic_inc_unchecked(&res->write_failures);
46081
46082 if (!RES_IS_GSCSI(res->cfg_entry) &&
46083 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
46084@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
46085 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
46086 * hrrq_id assigned here in queuecommand
46087 */
46088- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
46089+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
46090 pinstance->num_hrrq;
46091 cmd->cmd_done = pmcraid_io_done;
46092
46093@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
46094 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
46095 * hrrq_id assigned here in queuecommand
46096 */
46097- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
46098+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
46099 pinstance->num_hrrq;
46100
46101 if (request_size) {
46102@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
46103
46104 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
46105 /* add resources only after host is added into system */
46106- if (!atomic_read(&pinstance->expose_resources))
46107+ if (!atomic_read_unchecked(&pinstance->expose_resources))
46108 return;
46109
46110 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
46111@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
46112 init_waitqueue_head(&pinstance->reset_wait_q);
46113
46114 atomic_set(&pinstance->outstanding_cmds, 0);
46115- atomic_set(&pinstance->last_message_id, 0);
46116- atomic_set(&pinstance->expose_resources, 0);
46117+ atomic_set_unchecked(&pinstance->last_message_id, 0);
46118+ atomic_set_unchecked(&pinstance->expose_resources, 0);
46119
46120 INIT_LIST_HEAD(&pinstance->free_res_q);
46121 INIT_LIST_HEAD(&pinstance->used_res_q);
46122@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
46123 /* Schedule worker thread to handle CCN and take care of adding and
46124 * removing devices to OS
46125 */
46126- atomic_set(&pinstance->expose_resources, 1);
46127+ atomic_set_unchecked(&pinstance->expose_resources, 1);
46128 schedule_work(&pinstance->worker_q);
46129 return rc;
46130
46131diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
46132index e1d150f..6c6df44 100644
46133--- a/drivers/scsi/pmcraid.h
46134+++ b/drivers/scsi/pmcraid.h
46135@@ -748,7 +748,7 @@ struct pmcraid_instance {
46136 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
46137
46138 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
46139- atomic_t last_message_id;
46140+ atomic_unchecked_t last_message_id;
46141
46142 /* configuration table */
46143 struct pmcraid_config_table *cfg_table;
46144@@ -777,7 +777,7 @@ struct pmcraid_instance {
46145 atomic_t outstanding_cmds;
46146
46147 /* should add/delete resources to mid-layer now ?*/
46148- atomic_t expose_resources;
46149+ atomic_unchecked_t expose_resources;
46150
46151
46152
46153@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
46154 struct pmcraid_config_table_entry_ext cfg_entry_ext;
46155 };
46156 struct scsi_device *scsi_dev; /* Link scsi_device structure */
46157- atomic_t read_failures; /* count of failed READ commands */
46158- atomic_t write_failures; /* count of failed WRITE commands */
46159+ atomic_unchecked_t read_failures; /* count of failed READ commands */
46160+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
46161
46162 /* To indicate add/delete/modify during CCN */
46163 u8 change_detected;
46164diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
46165index bf60c63..74d4dce 100644
46166--- a/drivers/scsi/qla2xxx/qla_attr.c
46167+++ b/drivers/scsi/qla2xxx/qla_attr.c
46168@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
46169 return 0;
46170 }
46171
46172-struct fc_function_template qla2xxx_transport_functions = {
46173+fc_function_template_no_const qla2xxx_transport_functions = {
46174
46175 .show_host_node_name = 1,
46176 .show_host_port_name = 1,
46177@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
46178 .bsg_timeout = qla24xx_bsg_timeout,
46179 };
46180
46181-struct fc_function_template qla2xxx_transport_vport_functions = {
46182+fc_function_template_no_const qla2xxx_transport_vport_functions = {
46183
46184 .show_host_node_name = 1,
46185 .show_host_port_name = 1,
46186diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
46187index 026bfde..90c4018 100644
46188--- a/drivers/scsi/qla2xxx/qla_gbl.h
46189+++ b/drivers/scsi/qla2xxx/qla_gbl.h
46190@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
46191 struct device_attribute;
46192 extern struct device_attribute *qla2x00_host_attrs[];
46193 struct fc_function_template;
46194-extern struct fc_function_template qla2xxx_transport_functions;
46195-extern struct fc_function_template qla2xxx_transport_vport_functions;
46196+extern fc_function_template_no_const qla2xxx_transport_functions;
46197+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
46198 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
46199 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
46200 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
46201diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
46202index ad72c1d..afc9a98 100644
46203--- a/drivers/scsi/qla2xxx/qla_os.c
46204+++ b/drivers/scsi/qla2xxx/qla_os.c
46205@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
46206 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
46207 /* Ok, a 64bit DMA mask is applicable. */
46208 ha->flags.enable_64bit_addressing = 1;
46209- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
46210- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
46211+ pax_open_kernel();
46212+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
46213+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
46214+ pax_close_kernel();
46215 return;
46216 }
46217 }
46218diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
46219index ddf16a8..80f4dd0 100644
46220--- a/drivers/scsi/qla4xxx/ql4_def.h
46221+++ b/drivers/scsi/qla4xxx/ql4_def.h
46222@@ -291,7 +291,7 @@ struct ddb_entry {
46223 * (4000 only) */
46224 atomic_t relogin_timer; /* Max Time to wait for
46225 * relogin to complete */
46226- atomic_t relogin_retry_count; /* Num of times relogin has been
46227+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
46228 * retried */
46229 uint32_t default_time2wait; /* Default Min time between
46230 * relogins (+aens) */
46231diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
46232index 4d231c1..2892c37 100644
46233--- a/drivers/scsi/qla4xxx/ql4_os.c
46234+++ b/drivers/scsi/qla4xxx/ql4_os.c
46235@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
46236 */
46237 if (!iscsi_is_session_online(cls_sess)) {
46238 /* Reset retry relogin timer */
46239- atomic_inc(&ddb_entry->relogin_retry_count);
46240+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
46241 DEBUG2(ql4_printk(KERN_INFO, ha,
46242 "%s: index[%d] relogin timed out-retrying"
46243 " relogin (%d), retry (%d)\n", __func__,
46244 ddb_entry->fw_ddb_index,
46245- atomic_read(&ddb_entry->relogin_retry_count),
46246+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
46247 ddb_entry->default_time2wait + 4));
46248 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
46249 atomic_set(&ddb_entry->retry_relogin_timer,
46250@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
46251
46252 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
46253 atomic_set(&ddb_entry->relogin_timer, 0);
46254- atomic_set(&ddb_entry->relogin_retry_count, 0);
46255+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
46256 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
46257 ddb_entry->default_relogin_timeout =
46258 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
46259diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
46260index eaa808e..95f8841 100644
46261--- a/drivers/scsi/scsi.c
46262+++ b/drivers/scsi/scsi.c
46263@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
46264 unsigned long timeout;
46265 int rtn = 0;
46266
46267- atomic_inc(&cmd->device->iorequest_cnt);
46268+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
46269
46270 /* check if the device is still usable */
46271 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
46272diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
46273index 86d5220..f22c51a 100644
46274--- a/drivers/scsi/scsi_lib.c
46275+++ b/drivers/scsi/scsi_lib.c
46276@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
46277 shost = sdev->host;
46278 scsi_init_cmd_errh(cmd);
46279 cmd->result = DID_NO_CONNECT << 16;
46280- atomic_inc(&cmd->device->iorequest_cnt);
46281+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
46282
46283 /*
46284 * SCSI request completion path will do scsi_device_unbusy(),
46285@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
46286
46287 INIT_LIST_HEAD(&cmd->eh_entry);
46288
46289- atomic_inc(&cmd->device->iodone_cnt);
46290+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
46291 if (cmd->result)
46292- atomic_inc(&cmd->device->ioerr_cnt);
46293+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
46294
46295 disposition = scsi_decide_disposition(cmd);
46296 if (disposition != SUCCESS &&
46297diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
46298index 931a7d9..0c2a754 100644
46299--- a/drivers/scsi/scsi_sysfs.c
46300+++ b/drivers/scsi/scsi_sysfs.c
46301@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
46302 char *buf) \
46303 { \
46304 struct scsi_device *sdev = to_scsi_device(dev); \
46305- unsigned long long count = atomic_read(&sdev->field); \
46306+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
46307 return snprintf(buf, 20, "0x%llx\n", count); \
46308 } \
46309 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
46310diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
46311index 84a1fdf..693b0d6 100644
46312--- a/drivers/scsi/scsi_tgt_lib.c
46313+++ b/drivers/scsi/scsi_tgt_lib.c
46314@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
46315 int err;
46316
46317 dprintk("%lx %u\n", uaddr, len);
46318- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
46319+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
46320 if (err) {
46321 /*
46322 * TODO: need to fixup sg_tablesize, max_segment_size,
46323diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
46324index e106c27..11a380e 100644
46325--- a/drivers/scsi/scsi_transport_fc.c
46326+++ b/drivers/scsi/scsi_transport_fc.c
46327@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
46328 * Netlink Infrastructure
46329 */
46330
46331-static atomic_t fc_event_seq;
46332+static atomic_unchecked_t fc_event_seq;
46333
46334 /**
46335 * fc_get_event_number - Obtain the next sequential FC event number
46336@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
46337 u32
46338 fc_get_event_number(void)
46339 {
46340- return atomic_add_return(1, &fc_event_seq);
46341+ return atomic_add_return_unchecked(1, &fc_event_seq);
46342 }
46343 EXPORT_SYMBOL(fc_get_event_number);
46344
46345@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
46346 {
46347 int error;
46348
46349- atomic_set(&fc_event_seq, 0);
46350+ atomic_set_unchecked(&fc_event_seq, 0);
46351
46352 error = transport_class_register(&fc_host_class);
46353 if (error)
46354@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
46355 char *cp;
46356
46357 *val = simple_strtoul(buf, &cp, 0);
46358- if ((*cp && (*cp != '\n')) || (*val < 0))
46359+ if (*cp && (*cp != '\n'))
46360 return -EINVAL;
46361 /*
46362 * Check for overflow; dev_loss_tmo is u32
46363diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
46364index 133926b..903000d 100644
46365--- a/drivers/scsi/scsi_transport_iscsi.c
46366+++ b/drivers/scsi/scsi_transport_iscsi.c
46367@@ -80,7 +80,7 @@ struct iscsi_internal {
46368 struct transport_container session_cont;
46369 };
46370
46371-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
46372+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
46373 static struct workqueue_struct *iscsi_eh_timer_workq;
46374
46375 static DEFINE_IDA(iscsi_sess_ida);
46376@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
46377 int err;
46378
46379 ihost = shost->shost_data;
46380- session->sid = atomic_add_return(1, &iscsi_session_nr);
46381+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
46382
46383 if (target_id == ISCSI_MAX_TARGET) {
46384 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
46385@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
46386 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
46387 ISCSI_TRANSPORT_VERSION);
46388
46389- atomic_set(&iscsi_session_nr, 0);
46390+ atomic_set_unchecked(&iscsi_session_nr, 0);
46391
46392 err = class_register(&iscsi_transport_class);
46393 if (err)
46394diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
46395index f379c7f..e8fc69c 100644
46396--- a/drivers/scsi/scsi_transport_srp.c
46397+++ b/drivers/scsi/scsi_transport_srp.c
46398@@ -33,7 +33,7 @@
46399 #include "scsi_transport_srp_internal.h"
46400
46401 struct srp_host_attrs {
46402- atomic_t next_port_id;
46403+ atomic_unchecked_t next_port_id;
46404 };
46405 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
46406
46407@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
46408 struct Scsi_Host *shost = dev_to_shost(dev);
46409 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
46410
46411- atomic_set(&srp_host->next_port_id, 0);
46412+ atomic_set_unchecked(&srp_host->next_port_id, 0);
46413 return 0;
46414 }
46415
46416@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
46417 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
46418 rport->roles = ids->roles;
46419
46420- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
46421+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
46422 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
46423
46424 transport_setup_device(&rport->dev);
46425diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
46426index 610417e..167c46c 100644
46427--- a/drivers/scsi/sd.c
46428+++ b/drivers/scsi/sd.c
46429@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
46430 sdkp->disk = gd;
46431 sdkp->index = index;
46432 atomic_set(&sdkp->openers, 0);
46433- atomic_set(&sdkp->device->ioerr_cnt, 0);
46434+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
46435
46436 if (!sdp->request_queue->rq_timeout) {
46437 if (sdp->type != TYPE_MOD)
46438@@ -2941,7 +2941,7 @@ static int sd_probe(struct device *dev)
46439 device_initialize(&sdkp->dev);
46440 sdkp->dev.parent = dev;
46441 sdkp->dev.class = &sd_disk_class;
46442- dev_set_name(&sdkp->dev, dev_name(dev));
46443+ dev_set_name(&sdkp->dev, "%s", dev_name(dev));
46444
46445 if (device_add(&sdkp->dev))
46446 goto out_free_index;
46447diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
46448index df5e961..df6b97f 100644
46449--- a/drivers/scsi/sg.c
46450+++ b/drivers/scsi/sg.c
46451@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
46452 sdp->disk->disk_name,
46453 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
46454 NULL,
46455- (char *)arg);
46456+ (char __user *)arg);
46457 case BLKTRACESTART:
46458 return blk_trace_startstop(sdp->device->request_queue, 1);
46459 case BLKTRACESTOP:
46460diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
46461index 32b7bb1..2f1c4bd 100644
46462--- a/drivers/spi/spi.c
46463+++ b/drivers/spi/spi.c
46464@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
46465 EXPORT_SYMBOL_GPL(spi_bus_unlock);
46466
46467 /* portable code must never pass more than 32 bytes */
46468-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
46469+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
46470
46471 static u8 *buf;
46472
46473diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
46474index 3675020..e80d92c 100644
46475--- a/drivers/staging/media/solo6x10/solo6x10-core.c
46476+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
46477@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
46478
46479 static int solo_sysfs_init(struct solo_dev *solo_dev)
46480 {
46481- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46482+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46483 struct device *dev = &solo_dev->dev;
46484 const char *driver;
46485 int i;
46486diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
46487index 34afc16..ffe44dd 100644
46488--- a/drivers/staging/octeon/ethernet-rx.c
46489+++ b/drivers/staging/octeon/ethernet-rx.c
46490@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
46491 /* Increment RX stats for virtual ports */
46492 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
46493 #ifdef CONFIG_64BIT
46494- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
46495- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
46496+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
46497+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
46498 #else
46499- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
46500- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
46501+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
46502+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
46503 #endif
46504 }
46505 netif_receive_skb(skb);
46506@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
46507 dev->name);
46508 */
46509 #ifdef CONFIG_64BIT
46510- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
46511+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
46512 #else
46513- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
46514+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
46515 #endif
46516 dev_kfree_skb_irq(skb);
46517 }
46518diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
46519index c3a90e7..023619a 100644
46520--- a/drivers/staging/octeon/ethernet.c
46521+++ b/drivers/staging/octeon/ethernet.c
46522@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
46523 * since the RX tasklet also increments it.
46524 */
46525 #ifdef CONFIG_64BIT
46526- atomic64_add(rx_status.dropped_packets,
46527- (atomic64_t *)&priv->stats.rx_dropped);
46528+ atomic64_add_unchecked(rx_status.dropped_packets,
46529+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
46530 #else
46531- atomic_add(rx_status.dropped_packets,
46532- (atomic_t *)&priv->stats.rx_dropped);
46533+ atomic_add_unchecked(rx_status.dropped_packets,
46534+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
46535 #endif
46536 }
46537
46538diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
46539index dc23395..cf7e9b1 100644
46540--- a/drivers/staging/rtl8712/rtl871x_io.h
46541+++ b/drivers/staging/rtl8712/rtl871x_io.h
46542@@ -108,7 +108,7 @@ struct _io_ops {
46543 u8 *pmem);
46544 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
46545 u8 *pmem);
46546-};
46547+} __no_const;
46548
46549 struct io_req {
46550 struct list_head list;
46551diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
46552index 1f5088b..0e59820 100644
46553--- a/drivers/staging/sbe-2t3e3/netdev.c
46554+++ b/drivers/staging/sbe-2t3e3/netdev.c
46555@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46556 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
46557
46558 if (rlen)
46559- if (copy_to_user(data, &resp, rlen))
46560+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
46561 return -EFAULT;
46562
46563 return 0;
46564diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
46565index a863a98..d272795 100644
46566--- a/drivers/staging/usbip/vhci.h
46567+++ b/drivers/staging/usbip/vhci.h
46568@@ -83,7 +83,7 @@ struct vhci_hcd {
46569 unsigned resuming:1;
46570 unsigned long re_timeout;
46571
46572- atomic_t seqnum;
46573+ atomic_unchecked_t seqnum;
46574
46575 /*
46576 * NOTE:
46577diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
46578index d7974cb..d78076b 100644
46579--- a/drivers/staging/usbip/vhci_hcd.c
46580+++ b/drivers/staging/usbip/vhci_hcd.c
46581@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
46582
46583 spin_lock(&vdev->priv_lock);
46584
46585- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
46586+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
46587 if (priv->seqnum == 0xffff)
46588 dev_info(&urb->dev->dev, "seqnum max\n");
46589
46590@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
46591 return -ENOMEM;
46592 }
46593
46594- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
46595+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
46596 if (unlink->seqnum == 0xffff)
46597 pr_info("seqnum max\n");
46598
46599@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
46600 vdev->rhport = rhport;
46601 }
46602
46603- atomic_set(&vhci->seqnum, 0);
46604+ atomic_set_unchecked(&vhci->seqnum, 0);
46605 spin_lock_init(&vhci->lock);
46606
46607 hcd->power_budget = 0; /* no limit */
46608diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
46609index d07fcb5..358e1e1 100644
46610--- a/drivers/staging/usbip/vhci_rx.c
46611+++ b/drivers/staging/usbip/vhci_rx.c
46612@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
46613 if (!urb) {
46614 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
46615 pr_info("max seqnum %d\n",
46616- atomic_read(&the_controller->seqnum));
46617+ atomic_read_unchecked(&the_controller->seqnum));
46618 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
46619 return;
46620 }
46621diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
46622index 8417c2f..ef5ebd6 100644
46623--- a/drivers/staging/vt6655/hostap.c
46624+++ b/drivers/staging/vt6655/hostap.c
46625@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
46626 *
46627 */
46628
46629+static net_device_ops_no_const apdev_netdev_ops;
46630+
46631 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
46632 {
46633 PSDevice apdev_priv;
46634 struct net_device *dev = pDevice->dev;
46635 int ret;
46636- const struct net_device_ops apdev_netdev_ops = {
46637- .ndo_start_xmit = pDevice->tx_80211,
46638- };
46639
46640 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
46641
46642@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
46643 *apdev_priv = *pDevice;
46644 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
46645
46646+ /* only half broken now */
46647+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
46648 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
46649
46650 pDevice->apdev->type = ARPHRD_IEEE80211;
46651diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
46652index c699a30..b90a5fd 100644
46653--- a/drivers/staging/vt6656/hostap.c
46654+++ b/drivers/staging/vt6656/hostap.c
46655@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
46656 *
46657 */
46658
46659+static net_device_ops_no_const apdev_netdev_ops;
46660+
46661 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
46662 {
46663 struct vnt_private *apdev_priv;
46664 struct net_device *dev = pDevice->dev;
46665 int ret;
46666- const struct net_device_ops apdev_netdev_ops = {
46667- .ndo_start_xmit = pDevice->tx_80211,
46668- };
46669
46670 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
46671
46672@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
46673 *apdev_priv = *pDevice;
46674 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
46675
46676+ /* only half broken now */
46677+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
46678 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
46679
46680 pDevice->apdev->type = ARPHRD_IEEE80211;
46681diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
46682index d128ce2..fc1f9a1 100644
46683--- a/drivers/staging/zcache/tmem.h
46684+++ b/drivers/staging/zcache/tmem.h
46685@@ -225,7 +225,7 @@ struct tmem_pamops {
46686 bool (*is_remote)(void *);
46687 int (*replace_in_obj)(void *, struct tmem_obj *);
46688 #endif
46689-};
46690+} __no_const;
46691 extern void tmem_register_pamops(struct tmem_pamops *m);
46692
46693 /* memory allocation methods provided by the host implementation */
46694@@ -234,7 +234,7 @@ struct tmem_hostops {
46695 void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
46696 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
46697 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
46698-};
46699+} __no_const;
46700 extern void tmem_register_hostops(struct tmem_hostops *m);
46701
46702 /* core tmem accessor functions */
46703diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
46704index 4630481..c26782a 100644
46705--- a/drivers/target/target_core_device.c
46706+++ b/drivers/target/target_core_device.c
46707@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
46708 spin_lock_init(&dev->se_port_lock);
46709 spin_lock_init(&dev->se_tmr_lock);
46710 spin_lock_init(&dev->qf_cmd_lock);
46711- atomic_set(&dev->dev_ordered_id, 0);
46712+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
46713 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
46714 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
46715 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
46716diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
46717index 21e3158..43c6004 100644
46718--- a/drivers/target/target_core_transport.c
46719+++ b/drivers/target/target_core_transport.c
46720@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
46721 * Used to determine when ORDERED commands should go from
46722 * Dormant to Active status.
46723 */
46724- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
46725+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
46726 smp_mb__after_atomic_inc();
46727 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
46728 cmd->se_ordered_id, cmd->sam_task_attr,
46729diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
46730index 33f83fe..d80f8e1 100644
46731--- a/drivers/tty/cyclades.c
46732+++ b/drivers/tty/cyclades.c
46733@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
46734 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
46735 info->port.count);
46736 #endif
46737- info->port.count++;
46738+ atomic_inc(&info->port.count);
46739 #ifdef CY_DEBUG_COUNT
46740 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
46741- current->pid, info->port.count);
46742+ current->pid, atomic_read(&info->port.count));
46743 #endif
46744
46745 /*
46746@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
46747 for (j = 0; j < cy_card[i].nports; j++) {
46748 info = &cy_card[i].ports[j];
46749
46750- if (info->port.count) {
46751+ if (atomic_read(&info->port.count)) {
46752 /* XXX is the ldisc num worth this? */
46753 struct tty_struct *tty;
46754 struct tty_ldisc *ld;
46755diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
46756index eb255e8..f637a57 100644
46757--- a/drivers/tty/hvc/hvc_console.c
46758+++ b/drivers/tty/hvc/hvc_console.c
46759@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
46760
46761 spin_lock_irqsave(&hp->port.lock, flags);
46762 /* Check and then increment for fast path open. */
46763- if (hp->port.count++ > 0) {
46764+ if (atomic_inc_return(&hp->port.count) > 1) {
46765 spin_unlock_irqrestore(&hp->port.lock, flags);
46766 hvc_kick();
46767 return 0;
46768@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
46769
46770 spin_lock_irqsave(&hp->port.lock, flags);
46771
46772- if (--hp->port.count == 0) {
46773+ if (atomic_dec_return(&hp->port.count) == 0) {
46774 spin_unlock_irqrestore(&hp->port.lock, flags);
46775 /* We are done with the tty pointer now. */
46776 tty_port_tty_set(&hp->port, NULL);
46777@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
46778 */
46779 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
46780 } else {
46781- if (hp->port.count < 0)
46782+ if (atomic_read(&hp->port.count) < 0)
46783 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
46784- hp->vtermno, hp->port.count);
46785+ hp->vtermno, atomic_read(&hp->port.count));
46786 spin_unlock_irqrestore(&hp->port.lock, flags);
46787 }
46788 }
46789@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
46790 * open->hangup case this can be called after the final close so prevent
46791 * that from happening for now.
46792 */
46793- if (hp->port.count <= 0) {
46794+ if (atomic_read(&hp->port.count) <= 0) {
46795 spin_unlock_irqrestore(&hp->port.lock, flags);
46796 return;
46797 }
46798
46799- hp->port.count = 0;
46800+ atomic_set(&hp->port.count, 0);
46801 spin_unlock_irqrestore(&hp->port.lock, flags);
46802 tty_port_tty_set(&hp->port, NULL);
46803
46804@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
46805 return -EPIPE;
46806
46807 /* FIXME what's this (unprotected) check for? */
46808- if (hp->port.count <= 0)
46809+ if (atomic_read(&hp->port.count) <= 0)
46810 return -EIO;
46811
46812 spin_lock_irqsave(&hp->lock, flags);
46813diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
46814index 81e939e..95ead10 100644
46815--- a/drivers/tty/hvc/hvcs.c
46816+++ b/drivers/tty/hvc/hvcs.c
46817@@ -83,6 +83,7 @@
46818 #include <asm/hvcserver.h>
46819 #include <asm/uaccess.h>
46820 #include <asm/vio.h>
46821+#include <asm/local.h>
46822
46823 /*
46824 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
46825@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
46826
46827 spin_lock_irqsave(&hvcsd->lock, flags);
46828
46829- if (hvcsd->port.count > 0) {
46830+ if (atomic_read(&hvcsd->port.count) > 0) {
46831 spin_unlock_irqrestore(&hvcsd->lock, flags);
46832 printk(KERN_INFO "HVCS: vterm state unchanged. "
46833 "The hvcs device node is still in use.\n");
46834@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
46835 }
46836 }
46837
46838- hvcsd->port.count = 0;
46839+ atomic_set(&hvcsd->port.count, 0);
46840 hvcsd->port.tty = tty;
46841 tty->driver_data = hvcsd;
46842
46843@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
46844 unsigned long flags;
46845
46846 spin_lock_irqsave(&hvcsd->lock, flags);
46847- hvcsd->port.count++;
46848+ atomic_inc(&hvcsd->port.count);
46849 hvcsd->todo_mask |= HVCS_SCHED_READ;
46850 spin_unlock_irqrestore(&hvcsd->lock, flags);
46851
46852@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
46853 hvcsd = tty->driver_data;
46854
46855 spin_lock_irqsave(&hvcsd->lock, flags);
46856- if (--hvcsd->port.count == 0) {
46857+ if (atomic_dec_and_test(&hvcsd->port.count)) {
46858
46859 vio_disable_interrupts(hvcsd->vdev);
46860
46861@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
46862
46863 free_irq(irq, hvcsd);
46864 return;
46865- } else if (hvcsd->port.count < 0) {
46866+ } else if (atomic_read(&hvcsd->port.count) < 0) {
46867 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
46868 " is missmanaged.\n",
46869- hvcsd->vdev->unit_address, hvcsd->port.count);
46870+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
46871 }
46872
46873 spin_unlock_irqrestore(&hvcsd->lock, flags);
46874@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
46875
46876 spin_lock_irqsave(&hvcsd->lock, flags);
46877 /* Preserve this so that we know how many kref refs to put */
46878- temp_open_count = hvcsd->port.count;
46879+ temp_open_count = atomic_read(&hvcsd->port.count);
46880
46881 /*
46882 * Don't kref put inside the spinlock because the destruction
46883@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
46884 tty->driver_data = NULL;
46885 hvcsd->port.tty = NULL;
46886
46887- hvcsd->port.count = 0;
46888+ atomic_set(&hvcsd->port.count, 0);
46889
46890 /* This will drop any buffered data on the floor which is OK in a hangup
46891 * scenario. */
46892@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
46893 * the middle of a write operation? This is a crummy place to do this
46894 * but we want to keep it all in the spinlock.
46895 */
46896- if (hvcsd->port.count <= 0) {
46897+ if (atomic_read(&hvcsd->port.count) <= 0) {
46898 spin_unlock_irqrestore(&hvcsd->lock, flags);
46899 return -ENODEV;
46900 }
46901@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
46902 {
46903 struct hvcs_struct *hvcsd = tty->driver_data;
46904
46905- if (!hvcsd || hvcsd->port.count <= 0)
46906+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
46907 return 0;
46908
46909 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
46910diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
46911index 8fd72ff..34a0bed 100644
46912--- a/drivers/tty/ipwireless/tty.c
46913+++ b/drivers/tty/ipwireless/tty.c
46914@@ -29,6 +29,7 @@
46915 #include <linux/tty_driver.h>
46916 #include <linux/tty_flip.h>
46917 #include <linux/uaccess.h>
46918+#include <asm/local.h>
46919
46920 #include "tty.h"
46921 #include "network.h"
46922@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
46923 mutex_unlock(&tty->ipw_tty_mutex);
46924 return -ENODEV;
46925 }
46926- if (tty->port.count == 0)
46927+ if (atomic_read(&tty->port.count) == 0)
46928 tty->tx_bytes_queued = 0;
46929
46930- tty->port.count++;
46931+ atomic_inc(&tty->port.count);
46932
46933 tty->port.tty = linux_tty;
46934 linux_tty->driver_data = tty;
46935@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
46936
46937 static void do_ipw_close(struct ipw_tty *tty)
46938 {
46939- tty->port.count--;
46940-
46941- if (tty->port.count == 0) {
46942+ if (atomic_dec_return(&tty->port.count) == 0) {
46943 struct tty_struct *linux_tty = tty->port.tty;
46944
46945 if (linux_tty != NULL) {
46946@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
46947 return;
46948
46949 mutex_lock(&tty->ipw_tty_mutex);
46950- if (tty->port.count == 0) {
46951+ if (atomic_read(&tty->port.count) == 0) {
46952 mutex_unlock(&tty->ipw_tty_mutex);
46953 return;
46954 }
46955@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
46956
46957 mutex_lock(&tty->ipw_tty_mutex);
46958
46959- if (!tty->port.count) {
46960+ if (!atomic_read(&tty->port.count)) {
46961 mutex_unlock(&tty->ipw_tty_mutex);
46962 return;
46963 }
46964@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
46965 return -ENODEV;
46966
46967 mutex_lock(&tty->ipw_tty_mutex);
46968- if (!tty->port.count) {
46969+ if (!atomic_read(&tty->port.count)) {
46970 mutex_unlock(&tty->ipw_tty_mutex);
46971 return -EINVAL;
46972 }
46973@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
46974 if (!tty)
46975 return -ENODEV;
46976
46977- if (!tty->port.count)
46978+ if (!atomic_read(&tty->port.count))
46979 return -EINVAL;
46980
46981 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
46982@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
46983 if (!tty)
46984 return 0;
46985
46986- if (!tty->port.count)
46987+ if (!atomic_read(&tty->port.count))
46988 return 0;
46989
46990 return tty->tx_bytes_queued;
46991@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
46992 if (!tty)
46993 return -ENODEV;
46994
46995- if (!tty->port.count)
46996+ if (!atomic_read(&tty->port.count))
46997 return -EINVAL;
46998
46999 return get_control_lines(tty);
47000@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
47001 if (!tty)
47002 return -ENODEV;
47003
47004- if (!tty->port.count)
47005+ if (!atomic_read(&tty->port.count))
47006 return -EINVAL;
47007
47008 return set_control_lines(tty, set, clear);
47009@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
47010 if (!tty)
47011 return -ENODEV;
47012
47013- if (!tty->port.count)
47014+ if (!atomic_read(&tty->port.count))
47015 return -EINVAL;
47016
47017 /* FIXME: Exactly how is the tty object locked here .. */
47018@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
47019 * are gone */
47020 mutex_lock(&ttyj->ipw_tty_mutex);
47021 }
47022- while (ttyj->port.count)
47023+ while (atomic_read(&ttyj->port.count))
47024 do_ipw_close(ttyj);
47025 ipwireless_disassociate_network_ttys(network,
47026 ttyj->channel_idx);
47027diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
47028index 1deaca4..c8582d4 100644
47029--- a/drivers/tty/moxa.c
47030+++ b/drivers/tty/moxa.c
47031@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
47032 }
47033
47034 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
47035- ch->port.count++;
47036+ atomic_inc(&ch->port.count);
47037 tty->driver_data = ch;
47038 tty_port_tty_set(&ch->port, tty);
47039 mutex_lock(&ch->port.mutex);
47040diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
47041index 6422390..49003ac8 100644
47042--- a/drivers/tty/n_gsm.c
47043+++ b/drivers/tty/n_gsm.c
47044@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
47045 spin_lock_init(&dlci->lock);
47046 mutex_init(&dlci->mutex);
47047 dlci->fifo = &dlci->_fifo;
47048- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
47049+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
47050 kfree(dlci);
47051 return NULL;
47052 }
47053@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
47054 struct gsm_dlci *dlci = tty->driver_data;
47055 struct tty_port *port = &dlci->port;
47056
47057- port->count++;
47058+ atomic_inc(&port->count);
47059 dlci_get(dlci);
47060 dlci_get(dlci->gsm->dlci[0]);
47061 mux_get(dlci->gsm);
47062diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
47063index 6c7fe90..9241dab 100644
47064--- a/drivers/tty/n_tty.c
47065+++ b/drivers/tty/n_tty.c
47066@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
47067 {
47068 *ops = tty_ldisc_N_TTY;
47069 ops->owner = NULL;
47070- ops->refcount = ops->flags = 0;
47071+ atomic_set(&ops->refcount, 0);
47072+ ops->flags = 0;
47073 }
47074 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
47075diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
47076index abfd990..5ab5da9 100644
47077--- a/drivers/tty/pty.c
47078+++ b/drivers/tty/pty.c
47079@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
47080 panic("Couldn't register Unix98 pts driver");
47081
47082 /* Now create the /dev/ptmx special device */
47083+ pax_open_kernel();
47084 tty_default_fops(&ptmx_fops);
47085- ptmx_fops.open = ptmx_open;
47086+ *(void **)&ptmx_fops.open = ptmx_open;
47087+ pax_close_kernel();
47088
47089 cdev_init(&ptmx_cdev, &ptmx_fops);
47090 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
47091diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
47092index 354564e..fe50d9a 100644
47093--- a/drivers/tty/rocket.c
47094+++ b/drivers/tty/rocket.c
47095@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
47096 tty->driver_data = info;
47097 tty_port_tty_set(port, tty);
47098
47099- if (port->count++ == 0) {
47100+ if (atomic_inc_return(&port->count) == 1) {
47101 atomic_inc(&rp_num_ports_open);
47102
47103 #ifdef ROCKET_DEBUG_OPEN
47104@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
47105 #endif
47106 }
47107 #ifdef ROCKET_DEBUG_OPEN
47108- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
47109+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
47110 #endif
47111
47112 /*
47113@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
47114 spin_unlock_irqrestore(&info->port.lock, flags);
47115 return;
47116 }
47117- if (info->port.count)
47118+ if (atomic_read(&info->port.count))
47119 atomic_dec(&rp_num_ports_open);
47120 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
47121 spin_unlock_irqrestore(&info->port.lock, flags);
47122diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
47123index 1002054..dd644a8 100644
47124--- a/drivers/tty/serial/kgdboc.c
47125+++ b/drivers/tty/serial/kgdboc.c
47126@@ -24,8 +24,9 @@
47127 #define MAX_CONFIG_LEN 40
47128
47129 static struct kgdb_io kgdboc_io_ops;
47130+static struct kgdb_io kgdboc_io_ops_console;
47131
47132-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
47133+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
47134 static int configured = -1;
47135
47136 static char config[MAX_CONFIG_LEN];
47137@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
47138 kgdboc_unregister_kbd();
47139 if (configured == 1)
47140 kgdb_unregister_io_module(&kgdboc_io_ops);
47141+ else if (configured == 2)
47142+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
47143 }
47144
47145 static int configure_kgdboc(void)
47146@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
47147 int err;
47148 char *cptr = config;
47149 struct console *cons;
47150+ int is_console = 0;
47151
47152 err = kgdboc_option_setup(config);
47153 if (err || !strlen(config) || isspace(config[0]))
47154 goto noconfig;
47155
47156 err = -ENODEV;
47157- kgdboc_io_ops.is_console = 0;
47158 kgdb_tty_driver = NULL;
47159
47160 kgdboc_use_kms = 0;
47161@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
47162 int idx;
47163 if (cons->device && cons->device(cons, &idx) == p &&
47164 idx == tty_line) {
47165- kgdboc_io_ops.is_console = 1;
47166+ is_console = 1;
47167 break;
47168 }
47169 cons = cons->next;
47170@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
47171 kgdb_tty_line = tty_line;
47172
47173 do_register:
47174- err = kgdb_register_io_module(&kgdboc_io_ops);
47175+ if (is_console) {
47176+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
47177+ configured = 2;
47178+ } else {
47179+ err = kgdb_register_io_module(&kgdboc_io_ops);
47180+ configured = 1;
47181+ }
47182 if (err)
47183 goto noconfig;
47184
47185@@ -205,8 +214,6 @@ do_register:
47186 if (err)
47187 goto nmi_con_failed;
47188
47189- configured = 1;
47190-
47191 return 0;
47192
47193 nmi_con_failed:
47194@@ -223,7 +230,7 @@ noconfig:
47195 static int __init init_kgdboc(void)
47196 {
47197 /* Already configured? */
47198- if (configured == 1)
47199+ if (configured >= 1)
47200 return 0;
47201
47202 return configure_kgdboc();
47203@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
47204 if (config[len - 1] == '\n')
47205 config[len - 1] = '\0';
47206
47207- if (configured == 1)
47208+ if (configured >= 1)
47209 cleanup_kgdboc();
47210
47211 /* Go and configure with the new params. */
47212@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
47213 .post_exception = kgdboc_post_exp_handler,
47214 };
47215
47216+static struct kgdb_io kgdboc_io_ops_console = {
47217+ .name = "kgdboc",
47218+ .read_char = kgdboc_get_char,
47219+ .write_char = kgdboc_put_char,
47220+ .pre_exception = kgdboc_pre_exp_handler,
47221+ .post_exception = kgdboc_post_exp_handler,
47222+ .is_console = 1
47223+};
47224+
47225 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
47226 /* This is only available if kgdboc is a built in for early debugging */
47227 static int __init kgdboc_early_init(char *opt)
47228diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
47229index 0c8a9fa..234a95f 100644
47230--- a/drivers/tty/serial/samsung.c
47231+++ b/drivers/tty/serial/samsung.c
47232@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
47233 }
47234 }
47235
47236+static int s3c64xx_serial_startup(struct uart_port *port);
47237 static int s3c24xx_serial_startup(struct uart_port *port)
47238 {
47239 struct s3c24xx_uart_port *ourport = to_ourport(port);
47240 int ret;
47241
47242+ /* Startup sequence is different for s3c64xx and higher SoC's */
47243+ if (s3c24xx_serial_has_interrupt_mask(port))
47244+ return s3c64xx_serial_startup(port);
47245+
47246 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
47247 port->mapbase, port->membase);
47248
47249@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
47250 /* setup info for port */
47251 port->dev = &platdev->dev;
47252
47253- /* Startup sequence is different for s3c64xx and higher SoC's */
47254- if (s3c24xx_serial_has_interrupt_mask(port))
47255- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
47256-
47257 port->uartclk = 1;
47258
47259 if (cfg->uart_flags & UPF_CONS_FLOW) {
47260diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
47261index f87dbfd..42ad4b1 100644
47262--- a/drivers/tty/serial/serial_core.c
47263+++ b/drivers/tty/serial/serial_core.c
47264@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
47265 uart_flush_buffer(tty);
47266 uart_shutdown(tty, state);
47267 spin_lock_irqsave(&port->lock, flags);
47268- port->count = 0;
47269+ atomic_set(&port->count, 0);
47270 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
47271 spin_unlock_irqrestore(&port->lock, flags);
47272 tty_port_tty_set(port, NULL);
47273@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47274 goto end;
47275 }
47276
47277- port->count++;
47278+ atomic_inc(&port->count);
47279 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
47280 retval = -ENXIO;
47281 goto err_dec_count;
47282@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47283 /*
47284 * Make sure the device is in D0 state.
47285 */
47286- if (port->count == 1)
47287+ if (atomic_read(&port->count) == 1)
47288 uart_change_pm(state, UART_PM_STATE_ON);
47289
47290 /*
47291@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47292 end:
47293 return retval;
47294 err_dec_count:
47295- port->count--;
47296+ atomic_inc(&port->count);
47297 mutex_unlock(&port->mutex);
47298 goto end;
47299 }
47300diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
47301index 8eaf1ab..85c030d 100644
47302--- a/drivers/tty/synclink.c
47303+++ b/drivers/tty/synclink.c
47304@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
47305
47306 if (debug_level >= DEBUG_LEVEL_INFO)
47307 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
47308- __FILE__,__LINE__, info->device_name, info->port.count);
47309+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
47310
47311 if (tty_port_close_start(&info->port, tty, filp) == 0)
47312 goto cleanup;
47313@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
47314 cleanup:
47315 if (debug_level >= DEBUG_LEVEL_INFO)
47316 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
47317- tty->driver->name, info->port.count);
47318+ tty->driver->name, atomic_read(&info->port.count));
47319
47320 } /* end of mgsl_close() */
47321
47322@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
47323
47324 mgsl_flush_buffer(tty);
47325 shutdown(info);
47326-
47327- info->port.count = 0;
47328+
47329+ atomic_set(&info->port.count, 0);
47330 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47331 info->port.tty = NULL;
47332
47333@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47334
47335 if (debug_level >= DEBUG_LEVEL_INFO)
47336 printk("%s(%d):block_til_ready before block on %s count=%d\n",
47337- __FILE__,__LINE__, tty->driver->name, port->count );
47338+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47339
47340 spin_lock_irqsave(&info->irq_spinlock, flags);
47341 if (!tty_hung_up_p(filp)) {
47342 extra_count = true;
47343- port->count--;
47344+ atomic_dec(&port->count);
47345 }
47346 spin_unlock_irqrestore(&info->irq_spinlock, flags);
47347 port->blocked_open++;
47348@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47349
47350 if (debug_level >= DEBUG_LEVEL_INFO)
47351 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
47352- __FILE__,__LINE__, tty->driver->name, port->count );
47353+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47354
47355 tty_unlock(tty);
47356 schedule();
47357@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47358
47359 /* FIXME: Racy on hangup during close wait */
47360 if (extra_count)
47361- port->count++;
47362+ atomic_inc(&port->count);
47363 port->blocked_open--;
47364
47365 if (debug_level >= DEBUG_LEVEL_INFO)
47366 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
47367- __FILE__,__LINE__, tty->driver->name, port->count );
47368+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47369
47370 if (!retval)
47371 port->flags |= ASYNC_NORMAL_ACTIVE;
47372@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
47373
47374 if (debug_level >= DEBUG_LEVEL_INFO)
47375 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
47376- __FILE__,__LINE__,tty->driver->name, info->port.count);
47377+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
47378
47379 /* If port is closing, signal caller to try again */
47380 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47381@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
47382 spin_unlock_irqrestore(&info->netlock, flags);
47383 goto cleanup;
47384 }
47385- info->port.count++;
47386+ atomic_inc(&info->port.count);
47387 spin_unlock_irqrestore(&info->netlock, flags);
47388
47389- if (info->port.count == 1) {
47390+ if (atomic_read(&info->port.count) == 1) {
47391 /* 1st open on this device, init hardware */
47392 retval = startup(info);
47393 if (retval < 0)
47394@@ -3446,8 +3446,8 @@ cleanup:
47395 if (retval) {
47396 if (tty->count == 1)
47397 info->port.tty = NULL; /* tty layer will release tty struct */
47398- if(info->port.count)
47399- info->port.count--;
47400+ if (atomic_read(&info->port.count))
47401+ atomic_dec(&info->port.count);
47402 }
47403
47404 return retval;
47405@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47406 unsigned short new_crctype;
47407
47408 /* return error if TTY interface open */
47409- if (info->port.count)
47410+ if (atomic_read(&info->port.count))
47411 return -EBUSY;
47412
47413 switch (encoding)
47414@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
47415
47416 /* arbitrate between network and tty opens */
47417 spin_lock_irqsave(&info->netlock, flags);
47418- if (info->port.count != 0 || info->netcount != 0) {
47419+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47420 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
47421 spin_unlock_irqrestore(&info->netlock, flags);
47422 return -EBUSY;
47423@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47424 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
47425
47426 /* return error if TTY interface open */
47427- if (info->port.count)
47428+ if (atomic_read(&info->port.count))
47429 return -EBUSY;
47430
47431 if (cmd != SIOCWANDEV)
47432diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
47433index 1abf946..1ee34fc 100644
47434--- a/drivers/tty/synclink_gt.c
47435+++ b/drivers/tty/synclink_gt.c
47436@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
47437 tty->driver_data = info;
47438 info->port.tty = tty;
47439
47440- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
47441+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
47442
47443 /* If port is closing, signal caller to try again */
47444 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47445@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
47446 mutex_unlock(&info->port.mutex);
47447 goto cleanup;
47448 }
47449- info->port.count++;
47450+ atomic_inc(&info->port.count);
47451 spin_unlock_irqrestore(&info->netlock, flags);
47452
47453- if (info->port.count == 1) {
47454+ if (atomic_read(&info->port.count) == 1) {
47455 /* 1st open on this device, init hardware */
47456 retval = startup(info);
47457 if (retval < 0) {
47458@@ -715,8 +715,8 @@ cleanup:
47459 if (retval) {
47460 if (tty->count == 1)
47461 info->port.tty = NULL; /* tty layer will release tty struct */
47462- if(info->port.count)
47463- info->port.count--;
47464+ if(atomic_read(&info->port.count))
47465+ atomic_dec(&info->port.count);
47466 }
47467
47468 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
47469@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47470
47471 if (sanity_check(info, tty->name, "close"))
47472 return;
47473- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
47474+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
47475
47476 if (tty_port_close_start(&info->port, tty, filp) == 0)
47477 goto cleanup;
47478@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47479 tty_port_close_end(&info->port, tty);
47480 info->port.tty = NULL;
47481 cleanup:
47482- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
47483+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
47484 }
47485
47486 static void hangup(struct tty_struct *tty)
47487@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
47488 shutdown(info);
47489
47490 spin_lock_irqsave(&info->port.lock, flags);
47491- info->port.count = 0;
47492+ atomic_set(&info->port.count, 0);
47493 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47494 info->port.tty = NULL;
47495 spin_unlock_irqrestore(&info->port.lock, flags);
47496@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47497 unsigned short new_crctype;
47498
47499 /* return error if TTY interface open */
47500- if (info->port.count)
47501+ if (atomic_read(&info->port.count))
47502 return -EBUSY;
47503
47504 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
47505@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
47506
47507 /* arbitrate between network and tty opens */
47508 spin_lock_irqsave(&info->netlock, flags);
47509- if (info->port.count != 0 || info->netcount != 0) {
47510+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47511 DBGINFO(("%s hdlc_open busy\n", dev->name));
47512 spin_unlock_irqrestore(&info->netlock, flags);
47513 return -EBUSY;
47514@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47515 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
47516
47517 /* return error if TTY interface open */
47518- if (info->port.count)
47519+ if (atomic_read(&info->port.count))
47520 return -EBUSY;
47521
47522 if (cmd != SIOCWANDEV)
47523@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
47524 if (port == NULL)
47525 continue;
47526 spin_lock(&port->lock);
47527- if ((port->port.count || port->netcount) &&
47528+ if ((atomic_read(&port->port.count) || port->netcount) &&
47529 port->pending_bh && !port->bh_running &&
47530 !port->bh_requested) {
47531 DBGISR(("%s bh queued\n", port->device_name));
47532@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47533 spin_lock_irqsave(&info->lock, flags);
47534 if (!tty_hung_up_p(filp)) {
47535 extra_count = true;
47536- port->count--;
47537+ atomic_dec(&port->count);
47538 }
47539 spin_unlock_irqrestore(&info->lock, flags);
47540 port->blocked_open++;
47541@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47542 remove_wait_queue(&port->open_wait, &wait);
47543
47544 if (extra_count)
47545- port->count++;
47546+ atomic_inc(&port->count);
47547 port->blocked_open--;
47548
47549 if (!retval)
47550diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
47551index ff17138..e38b41e 100644
47552--- a/drivers/tty/synclinkmp.c
47553+++ b/drivers/tty/synclinkmp.c
47554@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
47555
47556 if (debug_level >= DEBUG_LEVEL_INFO)
47557 printk("%s(%d):%s open(), old ref count = %d\n",
47558- __FILE__,__LINE__,tty->driver->name, info->port.count);
47559+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
47560
47561 /* If port is closing, signal caller to try again */
47562 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47563@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
47564 spin_unlock_irqrestore(&info->netlock, flags);
47565 goto cleanup;
47566 }
47567- info->port.count++;
47568+ atomic_inc(&info->port.count);
47569 spin_unlock_irqrestore(&info->netlock, flags);
47570
47571- if (info->port.count == 1) {
47572+ if (atomic_read(&info->port.count) == 1) {
47573 /* 1st open on this device, init hardware */
47574 retval = startup(info);
47575 if (retval < 0)
47576@@ -796,8 +796,8 @@ cleanup:
47577 if (retval) {
47578 if (tty->count == 1)
47579 info->port.tty = NULL; /* tty layer will release tty struct */
47580- if(info->port.count)
47581- info->port.count--;
47582+ if(atomic_read(&info->port.count))
47583+ atomic_dec(&info->port.count);
47584 }
47585
47586 return retval;
47587@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47588
47589 if (debug_level >= DEBUG_LEVEL_INFO)
47590 printk("%s(%d):%s close() entry, count=%d\n",
47591- __FILE__,__LINE__, info->device_name, info->port.count);
47592+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
47593
47594 if (tty_port_close_start(&info->port, tty, filp) == 0)
47595 goto cleanup;
47596@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47597 cleanup:
47598 if (debug_level >= DEBUG_LEVEL_INFO)
47599 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
47600- tty->driver->name, info->port.count);
47601+ tty->driver->name, atomic_read(&info->port.count));
47602 }
47603
47604 /* Called by tty_hangup() when a hangup is signaled.
47605@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
47606 shutdown(info);
47607
47608 spin_lock_irqsave(&info->port.lock, flags);
47609- info->port.count = 0;
47610+ atomic_set(&info->port.count, 0);
47611 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47612 info->port.tty = NULL;
47613 spin_unlock_irqrestore(&info->port.lock, flags);
47614@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47615 unsigned short new_crctype;
47616
47617 /* return error if TTY interface open */
47618- if (info->port.count)
47619+ if (atomic_read(&info->port.count))
47620 return -EBUSY;
47621
47622 switch (encoding)
47623@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
47624
47625 /* arbitrate between network and tty opens */
47626 spin_lock_irqsave(&info->netlock, flags);
47627- if (info->port.count != 0 || info->netcount != 0) {
47628+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47629 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
47630 spin_unlock_irqrestore(&info->netlock, flags);
47631 return -EBUSY;
47632@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47633 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
47634
47635 /* return error if TTY interface open */
47636- if (info->port.count)
47637+ if (atomic_read(&info->port.count))
47638 return -EBUSY;
47639
47640 if (cmd != SIOCWANDEV)
47641@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
47642 * do not request bottom half processing if the
47643 * device is not open in a normal mode.
47644 */
47645- if ( port && (port->port.count || port->netcount) &&
47646+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
47647 port->pending_bh && !port->bh_running &&
47648 !port->bh_requested ) {
47649 if ( debug_level >= DEBUG_LEVEL_ISR )
47650@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47651
47652 if (debug_level >= DEBUG_LEVEL_INFO)
47653 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
47654- __FILE__,__LINE__, tty->driver->name, port->count );
47655+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47656
47657 spin_lock_irqsave(&info->lock, flags);
47658 if (!tty_hung_up_p(filp)) {
47659 extra_count = true;
47660- port->count--;
47661+ atomic_dec(&port->count);
47662 }
47663 spin_unlock_irqrestore(&info->lock, flags);
47664 port->blocked_open++;
47665@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47666
47667 if (debug_level >= DEBUG_LEVEL_INFO)
47668 printk("%s(%d):%s block_til_ready() count=%d\n",
47669- __FILE__,__LINE__, tty->driver->name, port->count );
47670+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47671
47672 tty_unlock(tty);
47673 schedule();
47674@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47675 remove_wait_queue(&port->open_wait, &wait);
47676
47677 if (extra_count)
47678- port->count++;
47679+ atomic_inc(&port->count);
47680 port->blocked_open--;
47681
47682 if (debug_level >= DEBUG_LEVEL_INFO)
47683 printk("%s(%d):%s block_til_ready() after, count=%d\n",
47684- __FILE__,__LINE__, tty->driver->name, port->count );
47685+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47686
47687 if (!retval)
47688 port->flags |= ASYNC_NORMAL_ACTIVE;
47689diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
47690index b51c154..17d55d1 100644
47691--- a/drivers/tty/sysrq.c
47692+++ b/drivers/tty/sysrq.c
47693@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
47694 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
47695 size_t count, loff_t *ppos)
47696 {
47697- if (count) {
47698+ if (count && capable(CAP_SYS_ADMIN)) {
47699 char c;
47700
47701 if (get_user(c, buf))
47702diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
47703index 4476682..d77e748 100644
47704--- a/drivers/tty/tty_io.c
47705+++ b/drivers/tty/tty_io.c
47706@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
47707
47708 void tty_default_fops(struct file_operations *fops)
47709 {
47710- *fops = tty_fops;
47711+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
47712 }
47713
47714 /*
47715diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
47716index 1afe192..73d2c20 100644
47717--- a/drivers/tty/tty_ldisc.c
47718+++ b/drivers/tty/tty_ldisc.c
47719@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
47720 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47721 tty_ldiscs[disc] = new_ldisc;
47722 new_ldisc->num = disc;
47723- new_ldisc->refcount = 0;
47724+ atomic_set(&new_ldisc->refcount, 0);
47725 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47726
47727 return ret;
47728@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
47729 return -EINVAL;
47730
47731 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47732- if (tty_ldiscs[disc]->refcount)
47733+ if (atomic_read(&tty_ldiscs[disc]->refcount))
47734 ret = -EBUSY;
47735 else
47736 tty_ldiscs[disc] = NULL;
47737@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
47738 if (ldops) {
47739 ret = ERR_PTR(-EAGAIN);
47740 if (try_module_get(ldops->owner)) {
47741- ldops->refcount++;
47742+ atomic_inc(&ldops->refcount);
47743 ret = ldops;
47744 }
47745 }
47746@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
47747 unsigned long flags;
47748
47749 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47750- ldops->refcount--;
47751+ atomic_dec(&ldops->refcount);
47752 module_put(ldops->owner);
47753 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47754 }
47755@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
47756 /* unreleased reader reference(s) will cause this WARN */
47757 WARN_ON(!atomic_dec_and_test(&ld->users));
47758
47759- ld->ops->refcount--;
47760+ atomic_dec(&ld->ops->refcount);
47761 module_put(ld->ops->owner);
47762 kfree(ld);
47763 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47764diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
47765index f597e88..b7f68ed 100644
47766--- a/drivers/tty/tty_port.c
47767+++ b/drivers/tty/tty_port.c
47768@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
47769 unsigned long flags;
47770
47771 spin_lock_irqsave(&port->lock, flags);
47772- port->count = 0;
47773+ atomic_set(&port->count, 0);
47774 port->flags &= ~ASYNC_NORMAL_ACTIVE;
47775 tty = port->tty;
47776 if (tty)
47777@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
47778 /* The port lock protects the port counts */
47779 spin_lock_irqsave(&port->lock, flags);
47780 if (!tty_hung_up_p(filp))
47781- port->count--;
47782+ atomic_dec(&port->count);
47783 port->blocked_open++;
47784 spin_unlock_irqrestore(&port->lock, flags);
47785
47786@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
47787 we must not mess that up further */
47788 spin_lock_irqsave(&port->lock, flags);
47789 if (!tty_hung_up_p(filp))
47790- port->count++;
47791+ atomic_inc(&port->count);
47792 port->blocked_open--;
47793 if (retval == 0)
47794 port->flags |= ASYNC_NORMAL_ACTIVE;
47795@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
47796 return 0;
47797 }
47798
47799- if (tty->count == 1 && port->count != 1) {
47800+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
47801 printk(KERN_WARNING
47802 "tty_port_close_start: tty->count = 1 port count = %d.\n",
47803- port->count);
47804- port->count = 1;
47805+ atomic_read(&port->count));
47806+ atomic_set(&port->count, 1);
47807 }
47808- if (--port->count < 0) {
47809+ if (atomic_dec_return(&port->count) < 0) {
47810 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
47811- port->count);
47812- port->count = 0;
47813+ atomic_read(&port->count));
47814+ atomic_set(&port->count, 0);
47815 }
47816
47817- if (port->count) {
47818+ if (atomic_read(&port->count)) {
47819 spin_unlock_irqrestore(&port->lock, flags);
47820 if (port->ops->drop)
47821 port->ops->drop(port);
47822@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
47823 {
47824 spin_lock_irq(&port->lock);
47825 if (!tty_hung_up_p(filp))
47826- ++port->count;
47827+ atomic_inc(&port->count);
47828 spin_unlock_irq(&port->lock);
47829 tty_port_tty_set(port, tty);
47830
47831diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
47832index a9af1b9a..1e08e7f 100644
47833--- a/drivers/tty/vt/keyboard.c
47834+++ b/drivers/tty/vt/keyboard.c
47835@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
47836 kbd->kbdmode == VC_OFF) &&
47837 value != KVAL(K_SAK))
47838 return; /* SAK is allowed even in raw mode */
47839+
47840+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47841+ {
47842+ void *func = fn_handler[value];
47843+ if (func == fn_show_state || func == fn_show_ptregs ||
47844+ func == fn_show_mem)
47845+ return;
47846+ }
47847+#endif
47848+
47849 fn_handler[value](vc);
47850 }
47851
47852@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
47853 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
47854 return -EFAULT;
47855
47856- if (!capable(CAP_SYS_TTY_CONFIG))
47857- perm = 0;
47858-
47859 switch (cmd) {
47860 case KDGKBENT:
47861 /* Ensure another thread doesn't free it under us */
47862@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
47863 spin_unlock_irqrestore(&kbd_event_lock, flags);
47864 return put_user(val, &user_kbe->kb_value);
47865 case KDSKBENT:
47866+ if (!capable(CAP_SYS_TTY_CONFIG))
47867+ perm = 0;
47868+
47869 if (!perm)
47870 return -EPERM;
47871 if (!i && v == K_NOSUCHMAP) {
47872@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
47873 int i, j, k;
47874 int ret;
47875
47876- if (!capable(CAP_SYS_TTY_CONFIG))
47877- perm = 0;
47878-
47879 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
47880 if (!kbs) {
47881 ret = -ENOMEM;
47882@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
47883 kfree(kbs);
47884 return ((p && *p) ? -EOVERFLOW : 0);
47885 case KDSKBSENT:
47886+ if (!capable(CAP_SYS_TTY_CONFIG))
47887+ perm = 0;
47888+
47889 if (!perm) {
47890 ret = -EPERM;
47891 goto reterr;
47892diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
47893index b645c47..a55c182 100644
47894--- a/drivers/uio/uio.c
47895+++ b/drivers/uio/uio.c
47896@@ -25,6 +25,7 @@
47897 #include <linux/kobject.h>
47898 #include <linux/cdev.h>
47899 #include <linux/uio_driver.h>
47900+#include <asm/local.h>
47901
47902 #define UIO_MAX_DEVICES (1U << MINORBITS)
47903
47904@@ -32,10 +33,10 @@ struct uio_device {
47905 struct module *owner;
47906 struct device *dev;
47907 int minor;
47908- atomic_t event;
47909+ atomic_unchecked_t event;
47910 struct fasync_struct *async_queue;
47911 wait_queue_head_t wait;
47912- int vma_count;
47913+ local_t vma_count;
47914 struct uio_info *info;
47915 struct kobject *map_dir;
47916 struct kobject *portio_dir;
47917@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
47918 struct device_attribute *attr, char *buf)
47919 {
47920 struct uio_device *idev = dev_get_drvdata(dev);
47921- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
47922+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
47923 }
47924
47925 static struct device_attribute uio_class_attributes[] = {
47926@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
47927 {
47928 struct uio_device *idev = info->uio_dev;
47929
47930- atomic_inc(&idev->event);
47931+ atomic_inc_unchecked(&idev->event);
47932 wake_up_interruptible(&idev->wait);
47933 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
47934 }
47935@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
47936 }
47937
47938 listener->dev = idev;
47939- listener->event_count = atomic_read(&idev->event);
47940+ listener->event_count = atomic_read_unchecked(&idev->event);
47941 filep->private_data = listener;
47942
47943 if (idev->info->open) {
47944@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
47945 return -EIO;
47946
47947 poll_wait(filep, &idev->wait, wait);
47948- if (listener->event_count != atomic_read(&idev->event))
47949+ if (listener->event_count != atomic_read_unchecked(&idev->event))
47950 return POLLIN | POLLRDNORM;
47951 return 0;
47952 }
47953@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
47954 do {
47955 set_current_state(TASK_INTERRUPTIBLE);
47956
47957- event_count = atomic_read(&idev->event);
47958+ event_count = atomic_read_unchecked(&idev->event);
47959 if (event_count != listener->event_count) {
47960 if (copy_to_user(buf, &event_count, count))
47961 retval = -EFAULT;
47962@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
47963 static void uio_vma_open(struct vm_area_struct *vma)
47964 {
47965 struct uio_device *idev = vma->vm_private_data;
47966- idev->vma_count++;
47967+ local_inc(&idev->vma_count);
47968 }
47969
47970 static void uio_vma_close(struct vm_area_struct *vma)
47971 {
47972 struct uio_device *idev = vma->vm_private_data;
47973- idev->vma_count--;
47974+ local_dec(&idev->vma_count);
47975 }
47976
47977 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
47978@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
47979 idev->owner = owner;
47980 idev->info = info;
47981 init_waitqueue_head(&idev->wait);
47982- atomic_set(&idev->event, 0);
47983+ atomic_set_unchecked(&idev->event, 0);
47984
47985 ret = uio_get_minor(idev);
47986 if (ret)
47987diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
47988index 8a7eb77..c00402f 100644
47989--- a/drivers/usb/atm/cxacru.c
47990+++ b/drivers/usb/atm/cxacru.c
47991@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
47992 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
47993 if (ret < 2)
47994 return -EINVAL;
47995- if (index < 0 || index > 0x7f)
47996+ if (index > 0x7f)
47997 return -EINVAL;
47998 pos += tmp;
47999
48000diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
48001index d3527dd..26effa2 100644
48002--- a/drivers/usb/atm/usbatm.c
48003+++ b/drivers/usb/atm/usbatm.c
48004@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
48005 if (printk_ratelimit())
48006 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
48007 __func__, vpi, vci);
48008- atomic_inc(&vcc->stats->rx_err);
48009+ atomic_inc_unchecked(&vcc->stats->rx_err);
48010 return;
48011 }
48012
48013@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
48014 if (length > ATM_MAX_AAL5_PDU) {
48015 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
48016 __func__, length, vcc);
48017- atomic_inc(&vcc->stats->rx_err);
48018+ atomic_inc_unchecked(&vcc->stats->rx_err);
48019 goto out;
48020 }
48021
48022@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
48023 if (sarb->len < pdu_length) {
48024 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
48025 __func__, pdu_length, sarb->len, vcc);
48026- atomic_inc(&vcc->stats->rx_err);
48027+ atomic_inc_unchecked(&vcc->stats->rx_err);
48028 goto out;
48029 }
48030
48031 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
48032 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
48033 __func__, vcc);
48034- atomic_inc(&vcc->stats->rx_err);
48035+ atomic_inc_unchecked(&vcc->stats->rx_err);
48036 goto out;
48037 }
48038
48039@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
48040 if (printk_ratelimit())
48041 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
48042 __func__, length);
48043- atomic_inc(&vcc->stats->rx_drop);
48044+ atomic_inc_unchecked(&vcc->stats->rx_drop);
48045 goto out;
48046 }
48047
48048@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
48049
48050 vcc->push(vcc, skb);
48051
48052- atomic_inc(&vcc->stats->rx);
48053+ atomic_inc_unchecked(&vcc->stats->rx);
48054 out:
48055 skb_trim(sarb, 0);
48056 }
48057@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
48058 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
48059
48060 usbatm_pop(vcc, skb);
48061- atomic_inc(&vcc->stats->tx);
48062+ atomic_inc_unchecked(&vcc->stats->tx);
48063
48064 skb = skb_dequeue(&instance->sndqueue);
48065 }
48066@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
48067 if (!left--)
48068 return sprintf(page,
48069 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
48070- atomic_read(&atm_dev->stats.aal5.tx),
48071- atomic_read(&atm_dev->stats.aal5.tx_err),
48072- atomic_read(&atm_dev->stats.aal5.rx),
48073- atomic_read(&atm_dev->stats.aal5.rx_err),
48074- atomic_read(&atm_dev->stats.aal5.rx_drop));
48075+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
48076+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
48077+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
48078+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
48079+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
48080
48081 if (!left--) {
48082 if (instance->disconnected)
48083diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
48084index 2a3bbdf..91d72cf 100644
48085--- a/drivers/usb/core/devices.c
48086+++ b/drivers/usb/core/devices.c
48087@@ -126,7 +126,7 @@ static const char format_endpt[] =
48088 * time it gets called.
48089 */
48090 static struct device_connect_event {
48091- atomic_t count;
48092+ atomic_unchecked_t count;
48093 wait_queue_head_t wait;
48094 } device_event = {
48095 .count = ATOMIC_INIT(1),
48096@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
48097
48098 void usbfs_conn_disc_event(void)
48099 {
48100- atomic_add(2, &device_event.count);
48101+ atomic_add_unchecked(2, &device_event.count);
48102 wake_up(&device_event.wait);
48103 }
48104
48105@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
48106
48107 poll_wait(file, &device_event.wait, wait);
48108
48109- event_count = atomic_read(&device_event.count);
48110+ event_count = atomic_read_unchecked(&device_event.count);
48111 if (file->f_version != event_count) {
48112 file->f_version = event_count;
48113 return POLLIN | POLLRDNORM;
48114diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
48115index d53547d..6a22d02 100644
48116--- a/drivers/usb/core/hcd.c
48117+++ b/drivers/usb/core/hcd.c
48118@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
48119 */
48120 usb_get_urb(urb);
48121 atomic_inc(&urb->use_count);
48122- atomic_inc(&urb->dev->urbnum);
48123+ atomic_inc_unchecked(&urb->dev->urbnum);
48124 usbmon_urb_submit(&hcd->self, urb);
48125
48126 /* NOTE requirements on root-hub callers (usbfs and the hub
48127@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
48128 urb->hcpriv = NULL;
48129 INIT_LIST_HEAD(&urb->urb_list);
48130 atomic_dec(&urb->use_count);
48131- atomic_dec(&urb->dev->urbnum);
48132+ atomic_dec_unchecked(&urb->dev->urbnum);
48133 if (atomic_read(&urb->reject))
48134 wake_up(&usb_kill_urb_queue);
48135 usb_put_urb(urb);
48136diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
48137index da2905a..834a569 100644
48138--- a/drivers/usb/core/hub.c
48139+++ b/drivers/usb/core/hub.c
48140@@ -27,6 +27,7 @@
48141 #include <linux/freezer.h>
48142 #include <linux/random.h>
48143 #include <linux/pm_qos.h>
48144+#include <linux/grsecurity.h>
48145
48146 #include <asm/uaccess.h>
48147 #include <asm/byteorder.h>
48148@@ -4424,6 +4425,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
48149 goto done;
48150 return;
48151 }
48152+
48153+ if (gr_handle_new_usb())
48154+ goto done;
48155+
48156 if (hub_is_superspeed(hub->hdev))
48157 unit_load = 150;
48158 else
48159diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
48160index 444d30e..f15c850 100644
48161--- a/drivers/usb/core/message.c
48162+++ b/drivers/usb/core/message.c
48163@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
48164 * method can wait for it to complete. Since you don't have a handle on the
48165 * URB used, you can't cancel the request.
48166 */
48167-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
48168+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
48169 __u8 requesttype, __u16 value, __u16 index, void *data,
48170 __u16 size, int timeout)
48171 {
48172diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
48173index aa38db4..0a08682 100644
48174--- a/drivers/usb/core/sysfs.c
48175+++ b/drivers/usb/core/sysfs.c
48176@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
48177 struct usb_device *udev;
48178
48179 udev = to_usb_device(dev);
48180- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
48181+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
48182 }
48183 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
48184
48185diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
48186index b10da72..43aa0b2 100644
48187--- a/drivers/usb/core/usb.c
48188+++ b/drivers/usb/core/usb.c
48189@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
48190 set_dev_node(&dev->dev, dev_to_node(bus->controller));
48191 dev->state = USB_STATE_ATTACHED;
48192 dev->lpm_disable_count = 1;
48193- atomic_set(&dev->urbnum, 0);
48194+ atomic_set_unchecked(&dev->urbnum, 0);
48195
48196 INIT_LIST_HEAD(&dev->ep0.urb_list);
48197 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
48198diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
48199index f77083f..f3e2e34 100644
48200--- a/drivers/usb/dwc3/gadget.c
48201+++ b/drivers/usb/dwc3/gadget.c
48202@@ -550,8 +550,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
48203 if (!usb_endpoint_xfer_isoc(desc))
48204 return 0;
48205
48206- memset(&trb_link, 0, sizeof(trb_link));
48207-
48208 /* Link TRB for ISOC. The HWO bit is never reset */
48209 trb_st_hw = &dep->trb_pool[0];
48210
48211diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
48212index 5e29dde..eca992f 100644
48213--- a/drivers/usb/early/ehci-dbgp.c
48214+++ b/drivers/usb/early/ehci-dbgp.c
48215@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
48216
48217 #ifdef CONFIG_KGDB
48218 static struct kgdb_io kgdbdbgp_io_ops;
48219-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
48220+static struct kgdb_io kgdbdbgp_io_ops_console;
48221+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
48222 #else
48223 #define dbgp_kgdb_mode (0)
48224 #endif
48225@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
48226 .write_char = kgdbdbgp_write_char,
48227 };
48228
48229+static struct kgdb_io kgdbdbgp_io_ops_console = {
48230+ .name = "kgdbdbgp",
48231+ .read_char = kgdbdbgp_read_char,
48232+ .write_char = kgdbdbgp_write_char,
48233+ .is_console = 1
48234+};
48235+
48236 static int kgdbdbgp_wait_time;
48237
48238 static int __init kgdbdbgp_parse_config(char *str)
48239@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
48240 ptr++;
48241 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
48242 }
48243- kgdb_register_io_module(&kgdbdbgp_io_ops);
48244- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
48245+ if (early_dbgp_console.index != -1)
48246+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
48247+ else
48248+ kgdb_register_io_module(&kgdbdbgp_io_ops);
48249
48250 return 0;
48251 }
48252diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
48253index b369292..9f3ba40 100644
48254--- a/drivers/usb/gadget/u_serial.c
48255+++ b/drivers/usb/gadget/u_serial.c
48256@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
48257 spin_lock_irq(&port->port_lock);
48258
48259 /* already open? Great. */
48260- if (port->port.count) {
48261+ if (atomic_read(&port->port.count)) {
48262 status = 0;
48263- port->port.count++;
48264+ atomic_inc(&port->port.count);
48265
48266 /* currently opening/closing? wait ... */
48267 } else if (port->openclose) {
48268@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
48269 tty->driver_data = port;
48270 port->port.tty = tty;
48271
48272- port->port.count = 1;
48273+ atomic_set(&port->port.count, 1);
48274 port->openclose = false;
48275
48276 /* if connected, start the I/O stream */
48277@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
48278
48279 spin_lock_irq(&port->port_lock);
48280
48281- if (port->port.count != 1) {
48282- if (port->port.count == 0)
48283+ if (atomic_read(&port->port.count) != 1) {
48284+ if (atomic_read(&port->port.count) == 0)
48285 WARN_ON(1);
48286 else
48287- --port->port.count;
48288+ atomic_dec(&port->port.count);
48289 goto exit;
48290 }
48291
48292@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
48293 * and sleep if necessary
48294 */
48295 port->openclose = true;
48296- port->port.count = 0;
48297+ atomic_set(&port->port.count, 0);
48298
48299 gser = port->port_usb;
48300 if (gser && gser->disconnect)
48301@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
48302 int cond;
48303
48304 spin_lock_irq(&port->port_lock);
48305- cond = (port->port.count == 0) && !port->openclose;
48306+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
48307 spin_unlock_irq(&port->port_lock);
48308 return cond;
48309 }
48310@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
48311 /* if it's already open, start I/O ... and notify the serial
48312 * protocol about open/close status (connect/disconnect).
48313 */
48314- if (port->port.count) {
48315+ if (atomic_read(&port->port.count)) {
48316 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
48317 gs_start_io(port);
48318 if (gser->connect)
48319@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
48320
48321 port->port_usb = NULL;
48322 gser->ioport = NULL;
48323- if (port->port.count > 0 || port->openclose) {
48324+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
48325 wake_up_interruptible(&port->drain_wait);
48326 if (port->port.tty)
48327 tty_hangup(port->port.tty);
48328@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
48329
48330 /* finally, free any unused/unusable I/O buffers */
48331 spin_lock_irqsave(&port->port_lock, flags);
48332- if (port->port.count == 0 && !port->openclose)
48333+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
48334 gs_buf_free(&port->port_write_buf);
48335 gs_free_requests(gser->out, &port->read_pool, NULL);
48336 gs_free_requests(gser->out, &port->read_queue, NULL);
48337diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
48338index 5f3bcd3..bfca43f 100644
48339--- a/drivers/usb/serial/console.c
48340+++ b/drivers/usb/serial/console.c
48341@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
48342
48343 info->port = port;
48344
48345- ++port->port.count;
48346+ atomic_inc(&port->port.count);
48347 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
48348 if (serial->type->set_termios) {
48349 /*
48350@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
48351 }
48352 /* Now that any required fake tty operations are completed restore
48353 * the tty port count */
48354- --port->port.count;
48355+ atomic_dec(&port->port.count);
48356 /* The console is special in terms of closing the device so
48357 * indicate this port is now acting as a system console. */
48358 port->port.console = 1;
48359@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
48360 free_tty:
48361 kfree(tty);
48362 reset_open_count:
48363- port->port.count = 0;
48364+ atomic_set(&port->port.count, 0);
48365 usb_autopm_put_interface(serial->interface);
48366 error_get_interface:
48367 usb_serial_put(serial);
48368diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
48369index 75f70f0..d467e1a 100644
48370--- a/drivers/usb/storage/usb.h
48371+++ b/drivers/usb/storage/usb.h
48372@@ -63,7 +63,7 @@ struct us_unusual_dev {
48373 __u8 useProtocol;
48374 __u8 useTransport;
48375 int (*initFunction)(struct us_data *);
48376-};
48377+} __do_const;
48378
48379
48380 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
48381diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
48382index d6bea3e..60b250e 100644
48383--- a/drivers/usb/wusbcore/wa-hc.h
48384+++ b/drivers/usb/wusbcore/wa-hc.h
48385@@ -192,7 +192,7 @@ struct wahc {
48386 struct list_head xfer_delayed_list;
48387 spinlock_t xfer_list_lock;
48388 struct work_struct xfer_work;
48389- atomic_t xfer_id_count;
48390+ atomic_unchecked_t xfer_id_count;
48391 };
48392
48393
48394@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
48395 INIT_LIST_HEAD(&wa->xfer_delayed_list);
48396 spin_lock_init(&wa->xfer_list_lock);
48397 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
48398- atomic_set(&wa->xfer_id_count, 1);
48399+ atomic_set_unchecked(&wa->xfer_id_count, 1);
48400 }
48401
48402 /**
48403diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
48404index 028fc83..65bb105 100644
48405--- a/drivers/usb/wusbcore/wa-xfer.c
48406+++ b/drivers/usb/wusbcore/wa-xfer.c
48407@@ -296,7 +296,7 @@ out:
48408 */
48409 static void wa_xfer_id_init(struct wa_xfer *xfer)
48410 {
48411- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
48412+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
48413 }
48414
48415 /*
48416diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
48417index 6d78736..65be90e 100644
48418--- a/drivers/vfio/vfio.c
48419+++ b/drivers/vfio/vfio.c
48420@@ -486,7 +486,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
48421 return 0;
48422
48423 /* TODO Prevent device auto probing */
48424- WARN("Device %s added to live group %d!\n", dev_name(dev),
48425+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
48426 iommu_group_id(group->iommu_group));
48427
48428 return 0;
48429@@ -506,7 +506,7 @@ static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
48430 if (likely(!device))
48431 return 0;
48432
48433- WARN("Device %s removed from live group %d!\n", dev_name(dev),
48434+ WARN(1, "Device %s removed from live group %d!\n", dev_name(dev),
48435 iommu_group_id(group->iommu_group));
48436
48437 vfio_device_put(device);
48438diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
48439index 5174eba..86e764a 100644
48440--- a/drivers/vhost/vringh.c
48441+++ b/drivers/vhost/vringh.c
48442@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
48443
48444 static inline int putu16_kern(u16 *p, u16 val)
48445 {
48446- ACCESS_ONCE(*p) = val;
48447+ ACCESS_ONCE_RW(*p) = val;
48448 return 0;
48449 }
48450
48451diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
48452index 8c55011..eed4ae1a 100644
48453--- a/drivers/video/aty/aty128fb.c
48454+++ b/drivers/video/aty/aty128fb.c
48455@@ -149,7 +149,7 @@ enum {
48456 };
48457
48458 /* Must match above enum */
48459-static char * const r128_family[] = {
48460+static const char * const r128_family[] = {
48461 "AGP",
48462 "PCI",
48463 "PRO AGP",
48464diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
48465index 4f27fdc..d3537e6 100644
48466--- a/drivers/video/aty/atyfb_base.c
48467+++ b/drivers/video/aty/atyfb_base.c
48468@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
48469 par->accel_flags = var->accel_flags; /* hack */
48470
48471 if (var->accel_flags) {
48472- info->fbops->fb_sync = atyfb_sync;
48473+ pax_open_kernel();
48474+ *(void **)&info->fbops->fb_sync = atyfb_sync;
48475+ pax_close_kernel();
48476 info->flags &= ~FBINFO_HWACCEL_DISABLED;
48477 } else {
48478- info->fbops->fb_sync = NULL;
48479+ pax_open_kernel();
48480+ *(void **)&info->fbops->fb_sync = NULL;
48481+ pax_close_kernel();
48482 info->flags |= FBINFO_HWACCEL_DISABLED;
48483 }
48484
48485diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
48486index 95ec042..e6affdd 100644
48487--- a/drivers/video/aty/mach64_cursor.c
48488+++ b/drivers/video/aty/mach64_cursor.c
48489@@ -7,6 +7,7 @@
48490 #include <linux/string.h>
48491
48492 #include <asm/io.h>
48493+#include <asm/pgtable.h>
48494
48495 #ifdef __sparc__
48496 #include <asm/fbio.h>
48497@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
48498 info->sprite.buf_align = 16; /* and 64 lines tall. */
48499 info->sprite.flags = FB_PIXMAP_IO;
48500
48501- info->fbops->fb_cursor = atyfb_cursor;
48502+ pax_open_kernel();
48503+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
48504+ pax_close_kernel();
48505
48506 return 0;
48507 }
48508diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
48509index c74e7aa..e3c2790 100644
48510--- a/drivers/video/backlight/backlight.c
48511+++ b/drivers/video/backlight/backlight.c
48512@@ -304,7 +304,7 @@ struct backlight_device *backlight_device_register(const char *name,
48513 new_bd->dev.class = backlight_class;
48514 new_bd->dev.parent = parent;
48515 new_bd->dev.release = bl_device_release;
48516- dev_set_name(&new_bd->dev, name);
48517+ dev_set_name(&new_bd->dev, "%s", name);
48518 dev_set_drvdata(&new_bd->dev, devdata);
48519
48520 /* Set default properties */
48521diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
48522index bca6ccc..252107e 100644
48523--- a/drivers/video/backlight/kb3886_bl.c
48524+++ b/drivers/video/backlight/kb3886_bl.c
48525@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
48526 static unsigned long kb3886bl_flags;
48527 #define KB3886BL_SUSPENDED 0x01
48528
48529-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
48530+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
48531 {
48532 .ident = "Sahara Touch-iT",
48533 .matches = {
48534diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
48535index 34fb6bd..3649fd9 100644
48536--- a/drivers/video/backlight/lcd.c
48537+++ b/drivers/video/backlight/lcd.c
48538@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
48539 new_ld->dev.class = lcd_class;
48540 new_ld->dev.parent = parent;
48541 new_ld->dev.release = lcd_device_release;
48542- dev_set_name(&new_ld->dev, name);
48543+ dev_set_name(&new_ld->dev, "%s", name);
48544 dev_set_drvdata(&new_ld->dev, devdata);
48545
48546 rc = device_register(&new_ld->dev);
48547diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
48548index 900aa4e..6d49418 100644
48549--- a/drivers/video/fb_defio.c
48550+++ b/drivers/video/fb_defio.c
48551@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
48552
48553 BUG_ON(!fbdefio);
48554 mutex_init(&fbdefio->lock);
48555- info->fbops->fb_mmap = fb_deferred_io_mmap;
48556+ pax_open_kernel();
48557+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
48558+ pax_close_kernel();
48559 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
48560 INIT_LIST_HEAD(&fbdefio->pagelist);
48561 if (fbdefio->delay == 0) /* set a default of 1 s */
48562@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
48563 page->mapping = NULL;
48564 }
48565
48566- info->fbops->fb_mmap = NULL;
48567+ *(void **)&info->fbops->fb_mmap = NULL;
48568 mutex_destroy(&fbdefio->lock);
48569 }
48570 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
48571diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
48572index 5c3960d..15cf8fc 100644
48573--- a/drivers/video/fbcmap.c
48574+++ b/drivers/video/fbcmap.c
48575@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
48576 rc = -ENODEV;
48577 goto out;
48578 }
48579- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
48580- !info->fbops->fb_setcmap)) {
48581+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
48582 rc = -EINVAL;
48583 goto out1;
48584 }
48585diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
48586index 098bfc6..796841d 100644
48587--- a/drivers/video/fbmem.c
48588+++ b/drivers/video/fbmem.c
48589@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
48590 image->dx += image->width + 8;
48591 }
48592 } else if (rotate == FB_ROTATE_UD) {
48593- for (x = 0; x < num && image->dx >= 0; x++) {
48594+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
48595 info->fbops->fb_imageblit(info, image);
48596 image->dx -= image->width + 8;
48597 }
48598@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
48599 image->dy += image->height + 8;
48600 }
48601 } else if (rotate == FB_ROTATE_CCW) {
48602- for (x = 0; x < num && image->dy >= 0; x++) {
48603+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
48604 info->fbops->fb_imageblit(info, image);
48605 image->dy -= image->height + 8;
48606 }
48607@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
48608 return -EFAULT;
48609 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
48610 return -EINVAL;
48611- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
48612+ if (con2fb.framebuffer >= FB_MAX)
48613 return -EINVAL;
48614 if (!registered_fb[con2fb.framebuffer])
48615 request_module("fb%d", con2fb.framebuffer);
48616diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
48617index 7672d2e..b56437f 100644
48618--- a/drivers/video/i810/i810_accel.c
48619+++ b/drivers/video/i810/i810_accel.c
48620@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
48621 }
48622 }
48623 printk("ringbuffer lockup!!!\n");
48624+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
48625 i810_report_error(mmio);
48626 par->dev_flags |= LOCKUP;
48627 info->pixmap.scan_align = 1;
48628diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
48629index 3c14e43..eafa544 100644
48630--- a/drivers/video/logo/logo_linux_clut224.ppm
48631+++ b/drivers/video/logo/logo_linux_clut224.ppm
48632@@ -1,1604 +1,1123 @@
48633 P3
48634-# Standard 224-color Linux logo
48635 80 80
48636 255
48637- 0 0 0 0 0 0 0 0 0 0 0 0
48638- 0 0 0 0 0 0 0 0 0 0 0 0
48639- 0 0 0 0 0 0 0 0 0 0 0 0
48640- 0 0 0 0 0 0 0 0 0 0 0 0
48641- 0 0 0 0 0 0 0 0 0 0 0 0
48642- 0 0 0 0 0 0 0 0 0 0 0 0
48643- 0 0 0 0 0 0 0 0 0 0 0 0
48644- 0 0 0 0 0 0 0 0 0 0 0 0
48645- 0 0 0 0 0 0 0 0 0 0 0 0
48646- 6 6 6 6 6 6 10 10 10 10 10 10
48647- 10 10 10 6 6 6 6 6 6 6 6 6
48648- 0 0 0 0 0 0 0 0 0 0 0 0
48649- 0 0 0 0 0 0 0 0 0 0 0 0
48650- 0 0 0 0 0 0 0 0 0 0 0 0
48651- 0 0 0 0 0 0 0 0 0 0 0 0
48652- 0 0 0 0 0 0 0 0 0 0 0 0
48653- 0 0 0 0 0 0 0 0 0 0 0 0
48654- 0 0 0 0 0 0 0 0 0 0 0 0
48655- 0 0 0 0 0 0 0 0 0 0 0 0
48656- 0 0 0 0 0 0 0 0 0 0 0 0
48657- 0 0 0 0 0 0 0 0 0 0 0 0
48658- 0 0 0 0 0 0 0 0 0 0 0 0
48659- 0 0 0 0 0 0 0 0 0 0 0 0
48660- 0 0 0 0 0 0 0 0 0 0 0 0
48661- 0 0 0 0 0 0 0 0 0 0 0 0
48662- 0 0 0 0 0 0 0 0 0 0 0 0
48663- 0 0 0 0 0 0 0 0 0 0 0 0
48664- 0 0 0 0 0 0 0 0 0 0 0 0
48665- 0 0 0 6 6 6 10 10 10 14 14 14
48666- 22 22 22 26 26 26 30 30 30 34 34 34
48667- 30 30 30 30 30 30 26 26 26 18 18 18
48668- 14 14 14 10 10 10 6 6 6 0 0 0
48669- 0 0 0 0 0 0 0 0 0 0 0 0
48670- 0 0 0 0 0 0 0 0 0 0 0 0
48671- 0 0 0 0 0 0 0 0 0 0 0 0
48672- 0 0 0 0 0 0 0 0 0 0 0 0
48673- 0 0 0 0 0 0 0 0 0 0 0 0
48674- 0 0 0 0 0 0 0 0 0 0 0 0
48675- 0 0 0 0 0 0 0 0 0 0 0 0
48676- 0 0 0 0 0 0 0 0 0 0 0 0
48677- 0 0 0 0 0 0 0 0 0 0 0 0
48678- 0 0 0 0 0 1 0 0 1 0 0 0
48679- 0 0 0 0 0 0 0 0 0 0 0 0
48680- 0 0 0 0 0 0 0 0 0 0 0 0
48681- 0 0 0 0 0 0 0 0 0 0 0 0
48682- 0 0 0 0 0 0 0 0 0 0 0 0
48683- 0 0 0 0 0 0 0 0 0 0 0 0
48684- 0 0 0 0 0 0 0 0 0 0 0 0
48685- 6 6 6 14 14 14 26 26 26 42 42 42
48686- 54 54 54 66 66 66 78 78 78 78 78 78
48687- 78 78 78 74 74 74 66 66 66 54 54 54
48688- 42 42 42 26 26 26 18 18 18 10 10 10
48689- 6 6 6 0 0 0 0 0 0 0 0 0
48690- 0 0 0 0 0 0 0 0 0 0 0 0
48691- 0 0 0 0 0 0 0 0 0 0 0 0
48692- 0 0 0 0 0 0 0 0 0 0 0 0
48693- 0 0 0 0 0 0 0 0 0 0 0 0
48694- 0 0 0 0 0 0 0 0 0 0 0 0
48695- 0 0 0 0 0 0 0 0 0 0 0 0
48696- 0 0 0 0 0 0 0 0 0 0 0 0
48697- 0 0 0 0 0 0 0 0 0 0 0 0
48698- 0 0 1 0 0 0 0 0 0 0 0 0
48699- 0 0 0 0 0 0 0 0 0 0 0 0
48700- 0 0 0 0 0 0 0 0 0 0 0 0
48701- 0 0 0 0 0 0 0 0 0 0 0 0
48702- 0 0 0 0 0 0 0 0 0 0 0 0
48703- 0 0 0 0 0 0 0 0 0 0 0 0
48704- 0 0 0 0 0 0 0 0 0 10 10 10
48705- 22 22 22 42 42 42 66 66 66 86 86 86
48706- 66 66 66 38 38 38 38 38 38 22 22 22
48707- 26 26 26 34 34 34 54 54 54 66 66 66
48708- 86 86 86 70 70 70 46 46 46 26 26 26
48709- 14 14 14 6 6 6 0 0 0 0 0 0
48710- 0 0 0 0 0 0 0 0 0 0 0 0
48711- 0 0 0 0 0 0 0 0 0 0 0 0
48712- 0 0 0 0 0 0 0 0 0 0 0 0
48713- 0 0 0 0 0 0 0 0 0 0 0 0
48714- 0 0 0 0 0 0 0 0 0 0 0 0
48715- 0 0 0 0 0 0 0 0 0 0 0 0
48716- 0 0 0 0 0 0 0 0 0 0 0 0
48717- 0 0 0 0 0 0 0 0 0 0 0 0
48718- 0 0 1 0 0 1 0 0 1 0 0 0
48719- 0 0 0 0 0 0 0 0 0 0 0 0
48720- 0 0 0 0 0 0 0 0 0 0 0 0
48721- 0 0 0 0 0 0 0 0 0 0 0 0
48722- 0 0 0 0 0 0 0 0 0 0 0 0
48723- 0 0 0 0 0 0 0 0 0 0 0 0
48724- 0 0 0 0 0 0 10 10 10 26 26 26
48725- 50 50 50 82 82 82 58 58 58 6 6 6
48726- 2 2 6 2 2 6 2 2 6 2 2 6
48727- 2 2 6 2 2 6 2 2 6 2 2 6
48728- 6 6 6 54 54 54 86 86 86 66 66 66
48729- 38 38 38 18 18 18 6 6 6 0 0 0
48730- 0 0 0 0 0 0 0 0 0 0 0 0
48731- 0 0 0 0 0 0 0 0 0 0 0 0
48732- 0 0 0 0 0 0 0 0 0 0 0 0
48733- 0 0 0 0 0 0 0 0 0 0 0 0
48734- 0 0 0 0 0 0 0 0 0 0 0 0
48735- 0 0 0 0 0 0 0 0 0 0 0 0
48736- 0 0 0 0 0 0 0 0 0 0 0 0
48737- 0 0 0 0 0 0 0 0 0 0 0 0
48738- 0 0 0 0 0 0 0 0 0 0 0 0
48739- 0 0 0 0 0 0 0 0 0 0 0 0
48740- 0 0 0 0 0 0 0 0 0 0 0 0
48741- 0 0 0 0 0 0 0 0 0 0 0 0
48742- 0 0 0 0 0 0 0 0 0 0 0 0
48743- 0 0 0 0 0 0 0 0 0 0 0 0
48744- 0 0 0 6 6 6 22 22 22 50 50 50
48745- 78 78 78 34 34 34 2 2 6 2 2 6
48746- 2 2 6 2 2 6 2 2 6 2 2 6
48747- 2 2 6 2 2 6 2 2 6 2 2 6
48748- 2 2 6 2 2 6 6 6 6 70 70 70
48749- 78 78 78 46 46 46 22 22 22 6 6 6
48750- 0 0 0 0 0 0 0 0 0 0 0 0
48751- 0 0 0 0 0 0 0 0 0 0 0 0
48752- 0 0 0 0 0 0 0 0 0 0 0 0
48753- 0 0 0 0 0 0 0 0 0 0 0 0
48754- 0 0 0 0 0 0 0 0 0 0 0 0
48755- 0 0 0 0 0 0 0 0 0 0 0 0
48756- 0 0 0 0 0 0 0 0 0 0 0 0
48757- 0 0 0 0 0 0 0 0 0 0 0 0
48758- 0 0 1 0 0 1 0 0 1 0 0 0
48759- 0 0 0 0 0 0 0 0 0 0 0 0
48760- 0 0 0 0 0 0 0 0 0 0 0 0
48761- 0 0 0 0 0 0 0 0 0 0 0 0
48762- 0 0 0 0 0 0 0 0 0 0 0 0
48763- 0 0 0 0 0 0 0 0 0 0 0 0
48764- 6 6 6 18 18 18 42 42 42 82 82 82
48765- 26 26 26 2 2 6 2 2 6 2 2 6
48766- 2 2 6 2 2 6 2 2 6 2 2 6
48767- 2 2 6 2 2 6 2 2 6 14 14 14
48768- 46 46 46 34 34 34 6 6 6 2 2 6
48769- 42 42 42 78 78 78 42 42 42 18 18 18
48770- 6 6 6 0 0 0 0 0 0 0 0 0
48771- 0 0 0 0 0 0 0 0 0 0 0 0
48772- 0 0 0 0 0 0 0 0 0 0 0 0
48773- 0 0 0 0 0 0 0 0 0 0 0 0
48774- 0 0 0 0 0 0 0 0 0 0 0 0
48775- 0 0 0 0 0 0 0 0 0 0 0 0
48776- 0 0 0 0 0 0 0 0 0 0 0 0
48777- 0 0 0 0 0 0 0 0 0 0 0 0
48778- 0 0 1 0 0 0 0 0 1 0 0 0
48779- 0 0 0 0 0 0 0 0 0 0 0 0
48780- 0 0 0 0 0 0 0 0 0 0 0 0
48781- 0 0 0 0 0 0 0 0 0 0 0 0
48782- 0 0 0 0 0 0 0 0 0 0 0 0
48783- 0 0 0 0 0 0 0 0 0 0 0 0
48784- 10 10 10 30 30 30 66 66 66 58 58 58
48785- 2 2 6 2 2 6 2 2 6 2 2 6
48786- 2 2 6 2 2 6 2 2 6 2 2 6
48787- 2 2 6 2 2 6 2 2 6 26 26 26
48788- 86 86 86 101 101 101 46 46 46 10 10 10
48789- 2 2 6 58 58 58 70 70 70 34 34 34
48790- 10 10 10 0 0 0 0 0 0 0 0 0
48791- 0 0 0 0 0 0 0 0 0 0 0 0
48792- 0 0 0 0 0 0 0 0 0 0 0 0
48793- 0 0 0 0 0 0 0 0 0 0 0 0
48794- 0 0 0 0 0 0 0 0 0 0 0 0
48795- 0 0 0 0 0 0 0 0 0 0 0 0
48796- 0 0 0 0 0 0 0 0 0 0 0 0
48797- 0 0 0 0 0 0 0 0 0 0 0 0
48798- 0 0 1 0 0 1 0 0 1 0 0 0
48799- 0 0 0 0 0 0 0 0 0 0 0 0
48800- 0 0 0 0 0 0 0 0 0 0 0 0
48801- 0 0 0 0 0 0 0 0 0 0 0 0
48802- 0 0 0 0 0 0 0 0 0 0 0 0
48803- 0 0 0 0 0 0 0 0 0 0 0 0
48804- 14 14 14 42 42 42 86 86 86 10 10 10
48805- 2 2 6 2 2 6 2 2 6 2 2 6
48806- 2 2 6 2 2 6 2 2 6 2 2 6
48807- 2 2 6 2 2 6 2 2 6 30 30 30
48808- 94 94 94 94 94 94 58 58 58 26 26 26
48809- 2 2 6 6 6 6 78 78 78 54 54 54
48810- 22 22 22 6 6 6 0 0 0 0 0 0
48811- 0 0 0 0 0 0 0 0 0 0 0 0
48812- 0 0 0 0 0 0 0 0 0 0 0 0
48813- 0 0 0 0 0 0 0 0 0 0 0 0
48814- 0 0 0 0 0 0 0 0 0 0 0 0
48815- 0 0 0 0 0 0 0 0 0 0 0 0
48816- 0 0 0 0 0 0 0 0 0 0 0 0
48817- 0 0 0 0 0 0 0 0 0 0 0 0
48818- 0 0 0 0 0 0 0 0 0 0 0 0
48819- 0 0 0 0 0 0 0 0 0 0 0 0
48820- 0 0 0 0 0 0 0 0 0 0 0 0
48821- 0 0 0 0 0 0 0 0 0 0 0 0
48822- 0 0 0 0 0 0 0 0 0 0 0 0
48823- 0 0 0 0 0 0 0 0 0 6 6 6
48824- 22 22 22 62 62 62 62 62 62 2 2 6
48825- 2 2 6 2 2 6 2 2 6 2 2 6
48826- 2 2 6 2 2 6 2 2 6 2 2 6
48827- 2 2 6 2 2 6 2 2 6 26 26 26
48828- 54 54 54 38 38 38 18 18 18 10 10 10
48829- 2 2 6 2 2 6 34 34 34 82 82 82
48830- 38 38 38 14 14 14 0 0 0 0 0 0
48831- 0 0 0 0 0 0 0 0 0 0 0 0
48832- 0 0 0 0 0 0 0 0 0 0 0 0
48833- 0 0 0 0 0 0 0 0 0 0 0 0
48834- 0 0 0 0 0 0 0 0 0 0 0 0
48835- 0 0 0 0 0 0 0 0 0 0 0 0
48836- 0 0 0 0 0 0 0 0 0 0 0 0
48837- 0 0 0 0 0 0 0 0 0 0 0 0
48838- 0 0 0 0 0 1 0 0 1 0 0 0
48839- 0 0 0 0 0 0 0 0 0 0 0 0
48840- 0 0 0 0 0 0 0 0 0 0 0 0
48841- 0 0 0 0 0 0 0 0 0 0 0 0
48842- 0 0 0 0 0 0 0 0 0 0 0 0
48843- 0 0 0 0 0 0 0 0 0 6 6 6
48844- 30 30 30 78 78 78 30 30 30 2 2 6
48845- 2 2 6 2 2 6 2 2 6 2 2 6
48846- 2 2 6 2 2 6 2 2 6 2 2 6
48847- 2 2 6 2 2 6 2 2 6 10 10 10
48848- 10 10 10 2 2 6 2 2 6 2 2 6
48849- 2 2 6 2 2 6 2 2 6 78 78 78
48850- 50 50 50 18 18 18 6 6 6 0 0 0
48851- 0 0 0 0 0 0 0 0 0 0 0 0
48852- 0 0 0 0 0 0 0 0 0 0 0 0
48853- 0 0 0 0 0 0 0 0 0 0 0 0
48854- 0 0 0 0 0 0 0 0 0 0 0 0
48855- 0 0 0 0 0 0 0 0 0 0 0 0
48856- 0 0 0 0 0 0 0 0 0 0 0 0
48857- 0 0 0 0 0 0 0 0 0 0 0 0
48858- 0 0 1 0 0 0 0 0 0 0 0 0
48859- 0 0 0 0 0 0 0 0 0 0 0 0
48860- 0 0 0 0 0 0 0 0 0 0 0 0
48861- 0 0 0 0 0 0 0 0 0 0 0 0
48862- 0 0 0 0 0 0 0 0 0 0 0 0
48863- 0 0 0 0 0 0 0 0 0 10 10 10
48864- 38 38 38 86 86 86 14 14 14 2 2 6
48865- 2 2 6 2 2 6 2 2 6 2 2 6
48866- 2 2 6 2 2 6 2 2 6 2 2 6
48867- 2 2 6 2 2 6 2 2 6 2 2 6
48868- 2 2 6 2 2 6 2 2 6 2 2 6
48869- 2 2 6 2 2 6 2 2 6 54 54 54
48870- 66 66 66 26 26 26 6 6 6 0 0 0
48871- 0 0 0 0 0 0 0 0 0 0 0 0
48872- 0 0 0 0 0 0 0 0 0 0 0 0
48873- 0 0 0 0 0 0 0 0 0 0 0 0
48874- 0 0 0 0 0 0 0 0 0 0 0 0
48875- 0 0 0 0 0 0 0 0 0 0 0 0
48876- 0 0 0 0 0 0 0 0 0 0 0 0
48877- 0 0 0 0 0 0 0 0 0 0 0 0
48878- 0 0 0 0 0 1 0 0 1 0 0 0
48879- 0 0 0 0 0 0 0 0 0 0 0 0
48880- 0 0 0 0 0 0 0 0 0 0 0 0
48881- 0 0 0 0 0 0 0 0 0 0 0 0
48882- 0 0 0 0 0 0 0 0 0 0 0 0
48883- 0 0 0 0 0 0 0 0 0 14 14 14
48884- 42 42 42 82 82 82 2 2 6 2 2 6
48885- 2 2 6 6 6 6 10 10 10 2 2 6
48886- 2 2 6 2 2 6 2 2 6 2 2 6
48887- 2 2 6 2 2 6 2 2 6 6 6 6
48888- 14 14 14 10 10 10 2 2 6 2 2 6
48889- 2 2 6 2 2 6 2 2 6 18 18 18
48890- 82 82 82 34 34 34 10 10 10 0 0 0
48891- 0 0 0 0 0 0 0 0 0 0 0 0
48892- 0 0 0 0 0 0 0 0 0 0 0 0
48893- 0 0 0 0 0 0 0 0 0 0 0 0
48894- 0 0 0 0 0 0 0 0 0 0 0 0
48895- 0 0 0 0 0 0 0 0 0 0 0 0
48896- 0 0 0 0 0 0 0 0 0 0 0 0
48897- 0 0 0 0 0 0 0 0 0 0 0 0
48898- 0 0 1 0 0 0 0 0 0 0 0 0
48899- 0 0 0 0 0 0 0 0 0 0 0 0
48900- 0 0 0 0 0 0 0 0 0 0 0 0
48901- 0 0 0 0 0 0 0 0 0 0 0 0
48902- 0 0 0 0 0 0 0 0 0 0 0 0
48903- 0 0 0 0 0 0 0 0 0 14 14 14
48904- 46 46 46 86 86 86 2 2 6 2 2 6
48905- 6 6 6 6 6 6 22 22 22 34 34 34
48906- 6 6 6 2 2 6 2 2 6 2 2 6
48907- 2 2 6 2 2 6 18 18 18 34 34 34
48908- 10 10 10 50 50 50 22 22 22 2 2 6
48909- 2 2 6 2 2 6 2 2 6 10 10 10
48910- 86 86 86 42 42 42 14 14 14 0 0 0
48911- 0 0 0 0 0 0 0 0 0 0 0 0
48912- 0 0 0 0 0 0 0 0 0 0 0 0
48913- 0 0 0 0 0 0 0 0 0 0 0 0
48914- 0 0 0 0 0 0 0 0 0 0 0 0
48915- 0 0 0 0 0 0 0 0 0 0 0 0
48916- 0 0 0 0 0 0 0 0 0 0 0 0
48917- 0 0 0 0 0 0 0 0 0 0 0 0
48918- 0 0 1 0 0 1 0 0 1 0 0 0
48919- 0 0 0 0 0 0 0 0 0 0 0 0
48920- 0 0 0 0 0 0 0 0 0 0 0 0
48921- 0 0 0 0 0 0 0 0 0 0 0 0
48922- 0 0 0 0 0 0 0 0 0 0 0 0
48923- 0 0 0 0 0 0 0 0 0 14 14 14
48924- 46 46 46 86 86 86 2 2 6 2 2 6
48925- 38 38 38 116 116 116 94 94 94 22 22 22
48926- 22 22 22 2 2 6 2 2 6 2 2 6
48927- 14 14 14 86 86 86 138 138 138 162 162 162
48928-154 154 154 38 38 38 26 26 26 6 6 6
48929- 2 2 6 2 2 6 2 2 6 2 2 6
48930- 86 86 86 46 46 46 14 14 14 0 0 0
48931- 0 0 0 0 0 0 0 0 0 0 0 0
48932- 0 0 0 0 0 0 0 0 0 0 0 0
48933- 0 0 0 0 0 0 0 0 0 0 0 0
48934- 0 0 0 0 0 0 0 0 0 0 0 0
48935- 0 0 0 0 0 0 0 0 0 0 0 0
48936- 0 0 0 0 0 0 0 0 0 0 0 0
48937- 0 0 0 0 0 0 0 0 0 0 0 0
48938- 0 0 0 0 0 0 0 0 0 0 0 0
48939- 0 0 0 0 0 0 0 0 0 0 0 0
48940- 0 0 0 0 0 0 0 0 0 0 0 0
48941- 0 0 0 0 0 0 0 0 0 0 0 0
48942- 0 0 0 0 0 0 0 0 0 0 0 0
48943- 0 0 0 0 0 0 0 0 0 14 14 14
48944- 46 46 46 86 86 86 2 2 6 14 14 14
48945-134 134 134 198 198 198 195 195 195 116 116 116
48946- 10 10 10 2 2 6 2 2 6 6 6 6
48947-101 98 89 187 187 187 210 210 210 218 218 218
48948-214 214 214 134 134 134 14 14 14 6 6 6
48949- 2 2 6 2 2 6 2 2 6 2 2 6
48950- 86 86 86 50 50 50 18 18 18 6 6 6
48951- 0 0 0 0 0 0 0 0 0 0 0 0
48952- 0 0 0 0 0 0 0 0 0 0 0 0
48953- 0 0 0 0 0 0 0 0 0 0 0 0
48954- 0 0 0 0 0 0 0 0 0 0 0 0
48955- 0 0 0 0 0 0 0 0 0 0 0 0
48956- 0 0 0 0 0 0 0 0 0 0 0 0
48957- 0 0 0 0 0 0 0 0 1 0 0 0
48958- 0 0 1 0 0 1 0 0 1 0 0 0
48959- 0 0 0 0 0 0 0 0 0 0 0 0
48960- 0 0 0 0 0 0 0 0 0 0 0 0
48961- 0 0 0 0 0 0 0 0 0 0 0 0
48962- 0 0 0 0 0 0 0 0 0 0 0 0
48963- 0 0 0 0 0 0 0 0 0 14 14 14
48964- 46 46 46 86 86 86 2 2 6 54 54 54
48965-218 218 218 195 195 195 226 226 226 246 246 246
48966- 58 58 58 2 2 6 2 2 6 30 30 30
48967-210 210 210 253 253 253 174 174 174 123 123 123
48968-221 221 221 234 234 234 74 74 74 2 2 6
48969- 2 2 6 2 2 6 2 2 6 2 2 6
48970- 70 70 70 58 58 58 22 22 22 6 6 6
48971- 0 0 0 0 0 0 0 0 0 0 0 0
48972- 0 0 0 0 0 0 0 0 0 0 0 0
48973- 0 0 0 0 0 0 0 0 0 0 0 0
48974- 0 0 0 0 0 0 0 0 0 0 0 0
48975- 0 0 0 0 0 0 0 0 0 0 0 0
48976- 0 0 0 0 0 0 0 0 0 0 0 0
48977- 0 0 0 0 0 0 0 0 0 0 0 0
48978- 0 0 0 0 0 0 0 0 0 0 0 0
48979- 0 0 0 0 0 0 0 0 0 0 0 0
48980- 0 0 0 0 0 0 0 0 0 0 0 0
48981- 0 0 0 0 0 0 0 0 0 0 0 0
48982- 0 0 0 0 0 0 0 0 0 0 0 0
48983- 0 0 0 0 0 0 0 0 0 14 14 14
48984- 46 46 46 82 82 82 2 2 6 106 106 106
48985-170 170 170 26 26 26 86 86 86 226 226 226
48986-123 123 123 10 10 10 14 14 14 46 46 46
48987-231 231 231 190 190 190 6 6 6 70 70 70
48988- 90 90 90 238 238 238 158 158 158 2 2 6
48989- 2 2 6 2 2 6 2 2 6 2 2 6
48990- 70 70 70 58 58 58 22 22 22 6 6 6
48991- 0 0 0 0 0 0 0 0 0 0 0 0
48992- 0 0 0 0 0 0 0 0 0 0 0 0
48993- 0 0 0 0 0 0 0 0 0 0 0 0
48994- 0 0 0 0 0 0 0 0 0 0 0 0
48995- 0 0 0 0 0 0 0 0 0 0 0 0
48996- 0 0 0 0 0 0 0 0 0 0 0 0
48997- 0 0 0 0 0 0 0 0 1 0 0 0
48998- 0 0 1 0 0 1 0 0 1 0 0 0
48999- 0 0 0 0 0 0 0 0 0 0 0 0
49000- 0 0 0 0 0 0 0 0 0 0 0 0
49001- 0 0 0 0 0 0 0 0 0 0 0 0
49002- 0 0 0 0 0 0 0 0 0 0 0 0
49003- 0 0 0 0 0 0 0 0 0 14 14 14
49004- 42 42 42 86 86 86 6 6 6 116 116 116
49005-106 106 106 6 6 6 70 70 70 149 149 149
49006-128 128 128 18 18 18 38 38 38 54 54 54
49007-221 221 221 106 106 106 2 2 6 14 14 14
49008- 46 46 46 190 190 190 198 198 198 2 2 6
49009- 2 2 6 2 2 6 2 2 6 2 2 6
49010- 74 74 74 62 62 62 22 22 22 6 6 6
49011- 0 0 0 0 0 0 0 0 0 0 0 0
49012- 0 0 0 0 0 0 0 0 0 0 0 0
49013- 0 0 0 0 0 0 0 0 0 0 0 0
49014- 0 0 0 0 0 0 0 0 0 0 0 0
49015- 0 0 0 0 0 0 0 0 0 0 0 0
49016- 0 0 0 0 0 0 0 0 0 0 0 0
49017- 0 0 0 0 0 0 0 0 1 0 0 0
49018- 0 0 1 0 0 0 0 0 1 0 0 0
49019- 0 0 0 0 0 0 0 0 0 0 0 0
49020- 0 0 0 0 0 0 0 0 0 0 0 0
49021- 0 0 0 0 0 0 0 0 0 0 0 0
49022- 0 0 0 0 0 0 0 0 0 0 0 0
49023- 0 0 0 0 0 0 0 0 0 14 14 14
49024- 42 42 42 94 94 94 14 14 14 101 101 101
49025-128 128 128 2 2 6 18 18 18 116 116 116
49026-118 98 46 121 92 8 121 92 8 98 78 10
49027-162 162 162 106 106 106 2 2 6 2 2 6
49028- 2 2 6 195 195 195 195 195 195 6 6 6
49029- 2 2 6 2 2 6 2 2 6 2 2 6
49030- 74 74 74 62 62 62 22 22 22 6 6 6
49031- 0 0 0 0 0 0 0 0 0 0 0 0
49032- 0 0 0 0 0 0 0 0 0 0 0 0
49033- 0 0 0 0 0 0 0 0 0 0 0 0
49034- 0 0 0 0 0 0 0 0 0 0 0 0
49035- 0 0 0 0 0 0 0 0 0 0 0 0
49036- 0 0 0 0 0 0 0 0 0 0 0 0
49037- 0 0 0 0 0 0 0 0 1 0 0 1
49038- 0 0 1 0 0 0 0 0 1 0 0 0
49039- 0 0 0 0 0 0 0 0 0 0 0 0
49040- 0 0 0 0 0 0 0 0 0 0 0 0
49041- 0 0 0 0 0 0 0 0 0 0 0 0
49042- 0 0 0 0 0 0 0 0 0 0 0 0
49043- 0 0 0 0 0 0 0 0 0 10 10 10
49044- 38 38 38 90 90 90 14 14 14 58 58 58
49045-210 210 210 26 26 26 54 38 6 154 114 10
49046-226 170 11 236 186 11 225 175 15 184 144 12
49047-215 174 15 175 146 61 37 26 9 2 2 6
49048- 70 70 70 246 246 246 138 138 138 2 2 6
49049- 2 2 6 2 2 6 2 2 6 2 2 6
49050- 70 70 70 66 66 66 26 26 26 6 6 6
49051- 0 0 0 0 0 0 0 0 0 0 0 0
49052- 0 0 0 0 0 0 0 0 0 0 0 0
49053- 0 0 0 0 0 0 0 0 0 0 0 0
49054- 0 0 0 0 0 0 0 0 0 0 0 0
49055- 0 0 0 0 0 0 0 0 0 0 0 0
49056- 0 0 0 0 0 0 0 0 0 0 0 0
49057- 0 0 0 0 0 0 0 0 0 0 0 0
49058- 0 0 0 0 0 0 0 0 0 0 0 0
49059- 0 0 0 0 0 0 0 0 0 0 0 0
49060- 0 0 0 0 0 0 0 0 0 0 0 0
49061- 0 0 0 0 0 0 0 0 0 0 0 0
49062- 0 0 0 0 0 0 0 0 0 0 0 0
49063- 0 0 0 0 0 0 0 0 0 10 10 10
49064- 38 38 38 86 86 86 14 14 14 10 10 10
49065-195 195 195 188 164 115 192 133 9 225 175 15
49066-239 182 13 234 190 10 232 195 16 232 200 30
49067-245 207 45 241 208 19 232 195 16 184 144 12
49068-218 194 134 211 206 186 42 42 42 2 2 6
49069- 2 2 6 2 2 6 2 2 6 2 2 6
49070- 50 50 50 74 74 74 30 30 30 6 6 6
49071- 0 0 0 0 0 0 0 0 0 0 0 0
49072- 0 0 0 0 0 0 0 0 0 0 0 0
49073- 0 0 0 0 0 0 0 0 0 0 0 0
49074- 0 0 0 0 0 0 0 0 0 0 0 0
49075- 0 0 0 0 0 0 0 0 0 0 0 0
49076- 0 0 0 0 0 0 0 0 0 0 0 0
49077- 0 0 0 0 0 0 0 0 0 0 0 0
49078- 0 0 0 0 0 0 0 0 0 0 0 0
49079- 0 0 0 0 0 0 0 0 0 0 0 0
49080- 0 0 0 0 0 0 0 0 0 0 0 0
49081- 0 0 0 0 0 0 0 0 0 0 0 0
49082- 0 0 0 0 0 0 0 0 0 0 0 0
49083- 0 0 0 0 0 0 0 0 0 10 10 10
49084- 34 34 34 86 86 86 14 14 14 2 2 6
49085-121 87 25 192 133 9 219 162 10 239 182 13
49086-236 186 11 232 195 16 241 208 19 244 214 54
49087-246 218 60 246 218 38 246 215 20 241 208 19
49088-241 208 19 226 184 13 121 87 25 2 2 6
49089- 2 2 6 2 2 6 2 2 6 2 2 6
49090- 50 50 50 82 82 82 34 34 34 10 10 10
49091- 0 0 0 0 0 0 0 0 0 0 0 0
49092- 0 0 0 0 0 0 0 0 0 0 0 0
49093- 0 0 0 0 0 0 0 0 0 0 0 0
49094- 0 0 0 0 0 0 0 0 0 0 0 0
49095- 0 0 0 0 0 0 0 0 0 0 0 0
49096- 0 0 0 0 0 0 0 0 0 0 0 0
49097- 0 0 0 0 0 0 0 0 0 0 0 0
49098- 0 0 0 0 0 0 0 0 0 0 0 0
49099- 0 0 0 0 0 0 0 0 0 0 0 0
49100- 0 0 0 0 0 0 0 0 0 0 0 0
49101- 0 0 0 0 0 0 0 0 0 0 0 0
49102- 0 0 0 0 0 0 0 0 0 0 0 0
49103- 0 0 0 0 0 0 0 0 0 10 10 10
49104- 34 34 34 82 82 82 30 30 30 61 42 6
49105-180 123 7 206 145 10 230 174 11 239 182 13
49106-234 190 10 238 202 15 241 208 19 246 218 74
49107-246 218 38 246 215 20 246 215 20 246 215 20
49108-226 184 13 215 174 15 184 144 12 6 6 6
49109- 2 2 6 2 2 6 2 2 6 2 2 6
49110- 26 26 26 94 94 94 42 42 42 14 14 14
49111- 0 0 0 0 0 0 0 0 0 0 0 0
49112- 0 0 0 0 0 0 0 0 0 0 0 0
49113- 0 0 0 0 0 0 0 0 0 0 0 0
49114- 0 0 0 0 0 0 0 0 0 0 0 0
49115- 0 0 0 0 0 0 0 0 0 0 0 0
49116- 0 0 0 0 0 0 0 0 0 0 0 0
49117- 0 0 0 0 0 0 0 0 0 0 0 0
49118- 0 0 0 0 0 0 0 0 0 0 0 0
49119- 0 0 0 0 0 0 0 0 0 0 0 0
49120- 0 0 0 0 0 0 0 0 0 0 0 0
49121- 0 0 0 0 0 0 0 0 0 0 0 0
49122- 0 0 0 0 0 0 0 0 0 0 0 0
49123- 0 0 0 0 0 0 0 0 0 10 10 10
49124- 30 30 30 78 78 78 50 50 50 104 69 6
49125-192 133 9 216 158 10 236 178 12 236 186 11
49126-232 195 16 241 208 19 244 214 54 245 215 43
49127-246 215 20 246 215 20 241 208 19 198 155 10
49128-200 144 11 216 158 10 156 118 10 2 2 6
49129- 2 2 6 2 2 6 2 2 6 2 2 6
49130- 6 6 6 90 90 90 54 54 54 18 18 18
49131- 6 6 6 0 0 0 0 0 0 0 0 0
49132- 0 0 0 0 0 0 0 0 0 0 0 0
49133- 0 0 0 0 0 0 0 0 0 0 0 0
49134- 0 0 0 0 0 0 0 0 0 0 0 0
49135- 0 0 0 0 0 0 0 0 0 0 0 0
49136- 0 0 0 0 0 0 0 0 0 0 0 0
49137- 0 0 0 0 0 0 0 0 0 0 0 0
49138- 0 0 0 0 0 0 0 0 0 0 0 0
49139- 0 0 0 0 0 0 0 0 0 0 0 0
49140- 0 0 0 0 0 0 0 0 0 0 0 0
49141- 0 0 0 0 0 0 0 0 0 0 0 0
49142- 0 0 0 0 0 0 0 0 0 0 0 0
49143- 0 0 0 0 0 0 0 0 0 10 10 10
49144- 30 30 30 78 78 78 46 46 46 22 22 22
49145-137 92 6 210 162 10 239 182 13 238 190 10
49146-238 202 15 241 208 19 246 215 20 246 215 20
49147-241 208 19 203 166 17 185 133 11 210 150 10
49148-216 158 10 210 150 10 102 78 10 2 2 6
49149- 6 6 6 54 54 54 14 14 14 2 2 6
49150- 2 2 6 62 62 62 74 74 74 30 30 30
49151- 10 10 10 0 0 0 0 0 0 0 0 0
49152- 0 0 0 0 0 0 0 0 0 0 0 0
49153- 0 0 0 0 0 0 0 0 0 0 0 0
49154- 0 0 0 0 0 0 0 0 0 0 0 0
49155- 0 0 0 0 0 0 0 0 0 0 0 0
49156- 0 0 0 0 0 0 0 0 0 0 0 0
49157- 0 0 0 0 0 0 0 0 0 0 0 0
49158- 0 0 0 0 0 0 0 0 0 0 0 0
49159- 0 0 0 0 0 0 0 0 0 0 0 0
49160- 0 0 0 0 0 0 0 0 0 0 0 0
49161- 0 0 0 0 0 0 0 0 0 0 0 0
49162- 0 0 0 0 0 0 0 0 0 0 0 0
49163- 0 0 0 0 0 0 0 0 0 10 10 10
49164- 34 34 34 78 78 78 50 50 50 6 6 6
49165- 94 70 30 139 102 15 190 146 13 226 184 13
49166-232 200 30 232 195 16 215 174 15 190 146 13
49167-168 122 10 192 133 9 210 150 10 213 154 11
49168-202 150 34 182 157 106 101 98 89 2 2 6
49169- 2 2 6 78 78 78 116 116 116 58 58 58
49170- 2 2 6 22 22 22 90 90 90 46 46 46
49171- 18 18 18 6 6 6 0 0 0 0 0 0
49172- 0 0 0 0 0 0 0 0 0 0 0 0
49173- 0 0 0 0 0 0 0 0 0 0 0 0
49174- 0 0 0 0 0 0 0 0 0 0 0 0
49175- 0 0 0 0 0 0 0 0 0 0 0 0
49176- 0 0 0 0 0 0 0 0 0 0 0 0
49177- 0 0 0 0 0 0 0 0 0 0 0 0
49178- 0 0 0 0 0 0 0 0 0 0 0 0
49179- 0 0 0 0 0 0 0 0 0 0 0 0
49180- 0 0 0 0 0 0 0 0 0 0 0 0
49181- 0 0 0 0 0 0 0 0 0 0 0 0
49182- 0 0 0 0 0 0 0 0 0 0 0 0
49183- 0 0 0 0 0 0 0 0 0 10 10 10
49184- 38 38 38 86 86 86 50 50 50 6 6 6
49185-128 128 128 174 154 114 156 107 11 168 122 10
49186-198 155 10 184 144 12 197 138 11 200 144 11
49187-206 145 10 206 145 10 197 138 11 188 164 115
49188-195 195 195 198 198 198 174 174 174 14 14 14
49189- 2 2 6 22 22 22 116 116 116 116 116 116
49190- 22 22 22 2 2 6 74 74 74 70 70 70
49191- 30 30 30 10 10 10 0 0 0 0 0 0
49192- 0 0 0 0 0 0 0 0 0 0 0 0
49193- 0 0 0 0 0 0 0 0 0 0 0 0
49194- 0 0 0 0 0 0 0 0 0 0 0 0
49195- 0 0 0 0 0 0 0 0 0 0 0 0
49196- 0 0 0 0 0 0 0 0 0 0 0 0
49197- 0 0 0 0 0 0 0 0 0 0 0 0
49198- 0 0 0 0 0 0 0 0 0 0 0 0
49199- 0 0 0 0 0 0 0 0 0 0 0 0
49200- 0 0 0 0 0 0 0 0 0 0 0 0
49201- 0 0 0 0 0 0 0 0 0 0 0 0
49202- 0 0 0 0 0 0 0 0 0 0 0 0
49203- 0 0 0 0 0 0 6 6 6 18 18 18
49204- 50 50 50 101 101 101 26 26 26 10 10 10
49205-138 138 138 190 190 190 174 154 114 156 107 11
49206-197 138 11 200 144 11 197 138 11 192 133 9
49207-180 123 7 190 142 34 190 178 144 187 187 187
49208-202 202 202 221 221 221 214 214 214 66 66 66
49209- 2 2 6 2 2 6 50 50 50 62 62 62
49210- 6 6 6 2 2 6 10 10 10 90 90 90
49211- 50 50 50 18 18 18 6 6 6 0 0 0
49212- 0 0 0 0 0 0 0 0 0 0 0 0
49213- 0 0 0 0 0 0 0 0 0 0 0 0
49214- 0 0 0 0 0 0 0 0 0 0 0 0
49215- 0 0 0 0 0 0 0 0 0 0 0 0
49216- 0 0 0 0 0 0 0 0 0 0 0 0
49217- 0 0 0 0 0 0 0 0 0 0 0 0
49218- 0 0 0 0 0 0 0 0 0 0 0 0
49219- 0 0 0 0 0 0 0 0 0 0 0 0
49220- 0 0 0 0 0 0 0 0 0 0 0 0
49221- 0 0 0 0 0 0 0 0 0 0 0 0
49222- 0 0 0 0 0 0 0 0 0 0 0 0
49223- 0 0 0 0 0 0 10 10 10 34 34 34
49224- 74 74 74 74 74 74 2 2 6 6 6 6
49225-144 144 144 198 198 198 190 190 190 178 166 146
49226-154 121 60 156 107 11 156 107 11 168 124 44
49227-174 154 114 187 187 187 190 190 190 210 210 210
49228-246 246 246 253 253 253 253 253 253 182 182 182
49229- 6 6 6 2 2 6 2 2 6 2 2 6
49230- 2 2 6 2 2 6 2 2 6 62 62 62
49231- 74 74 74 34 34 34 14 14 14 0 0 0
49232- 0 0 0 0 0 0 0 0 0 0 0 0
49233- 0 0 0 0 0 0 0 0 0 0 0 0
49234- 0 0 0 0 0 0 0 0 0 0 0 0
49235- 0 0 0 0 0 0 0 0 0 0 0 0
49236- 0 0 0 0 0 0 0 0 0 0 0 0
49237- 0 0 0 0 0 0 0 0 0 0 0 0
49238- 0 0 0 0 0 0 0 0 0 0 0 0
49239- 0 0 0 0 0 0 0 0 0 0 0 0
49240- 0 0 0 0 0 0 0 0 0 0 0 0
49241- 0 0 0 0 0 0 0 0 0 0 0 0
49242- 0 0 0 0 0 0 0 0 0 0 0 0
49243- 0 0 0 10 10 10 22 22 22 54 54 54
49244- 94 94 94 18 18 18 2 2 6 46 46 46
49245-234 234 234 221 221 221 190 190 190 190 190 190
49246-190 190 190 187 187 187 187 187 187 190 190 190
49247-190 190 190 195 195 195 214 214 214 242 242 242
49248-253 253 253 253 253 253 253 253 253 253 253 253
49249- 82 82 82 2 2 6 2 2 6 2 2 6
49250- 2 2 6 2 2 6 2 2 6 14 14 14
49251- 86 86 86 54 54 54 22 22 22 6 6 6
49252- 0 0 0 0 0 0 0 0 0 0 0 0
49253- 0 0 0 0 0 0 0 0 0 0 0 0
49254- 0 0 0 0 0 0 0 0 0 0 0 0
49255- 0 0 0 0 0 0 0 0 0 0 0 0
49256- 0 0 0 0 0 0 0 0 0 0 0 0
49257- 0 0 0 0 0 0 0 0 0 0 0 0
49258- 0 0 0 0 0 0 0 0 0 0 0 0
49259- 0 0 0 0 0 0 0 0 0 0 0 0
49260- 0 0 0 0 0 0 0 0 0 0 0 0
49261- 0 0 0 0 0 0 0 0 0 0 0 0
49262- 0 0 0 0 0 0 0 0 0 0 0 0
49263- 6 6 6 18 18 18 46 46 46 90 90 90
49264- 46 46 46 18 18 18 6 6 6 182 182 182
49265-253 253 253 246 246 246 206 206 206 190 190 190
49266-190 190 190 190 190 190 190 190 190 190 190 190
49267-206 206 206 231 231 231 250 250 250 253 253 253
49268-253 253 253 253 253 253 253 253 253 253 253 253
49269-202 202 202 14 14 14 2 2 6 2 2 6
49270- 2 2 6 2 2 6 2 2 6 2 2 6
49271- 42 42 42 86 86 86 42 42 42 18 18 18
49272- 6 6 6 0 0 0 0 0 0 0 0 0
49273- 0 0 0 0 0 0 0 0 0 0 0 0
49274- 0 0 0 0 0 0 0 0 0 0 0 0
49275- 0 0 0 0 0 0 0 0 0 0 0 0
49276- 0 0 0 0 0 0 0 0 0 0 0 0
49277- 0 0 0 0 0 0 0 0 0 0 0 0
49278- 0 0 0 0 0 0 0 0 0 0 0 0
49279- 0 0 0 0 0 0 0 0 0 0 0 0
49280- 0 0 0 0 0 0 0 0 0 0 0 0
49281- 0 0 0 0 0 0 0 0 0 0 0 0
49282- 0 0 0 0 0 0 0 0 0 6 6 6
49283- 14 14 14 38 38 38 74 74 74 66 66 66
49284- 2 2 6 6 6 6 90 90 90 250 250 250
49285-253 253 253 253 253 253 238 238 238 198 198 198
49286-190 190 190 190 190 190 195 195 195 221 221 221
49287-246 246 246 253 253 253 253 253 253 253 253 253
49288-253 253 253 253 253 253 253 253 253 253 253 253
49289-253 253 253 82 82 82 2 2 6 2 2 6
49290- 2 2 6 2 2 6 2 2 6 2 2 6
49291- 2 2 6 78 78 78 70 70 70 34 34 34
49292- 14 14 14 6 6 6 0 0 0 0 0 0
49293- 0 0 0 0 0 0 0 0 0 0 0 0
49294- 0 0 0 0 0 0 0 0 0 0 0 0
49295- 0 0 0 0 0 0 0 0 0 0 0 0
49296- 0 0 0 0 0 0 0 0 0 0 0 0
49297- 0 0 0 0 0 0 0 0 0 0 0 0
49298- 0 0 0 0 0 0 0 0 0 0 0 0
49299- 0 0 0 0 0 0 0 0 0 0 0 0
49300- 0 0 0 0 0 0 0 0 0 0 0 0
49301- 0 0 0 0 0 0 0 0 0 0 0 0
49302- 0 0 0 0 0 0 0 0 0 14 14 14
49303- 34 34 34 66 66 66 78 78 78 6 6 6
49304- 2 2 6 18 18 18 218 218 218 253 253 253
49305-253 253 253 253 253 253 253 253 253 246 246 246
49306-226 226 226 231 231 231 246 246 246 253 253 253
49307-253 253 253 253 253 253 253 253 253 253 253 253
49308-253 253 253 253 253 253 253 253 253 253 253 253
49309-253 253 253 178 178 178 2 2 6 2 2 6
49310- 2 2 6 2 2 6 2 2 6 2 2 6
49311- 2 2 6 18 18 18 90 90 90 62 62 62
49312- 30 30 30 10 10 10 0 0 0 0 0 0
49313- 0 0 0 0 0 0 0 0 0 0 0 0
49314- 0 0 0 0 0 0 0 0 0 0 0 0
49315- 0 0 0 0 0 0 0 0 0 0 0 0
49316- 0 0 0 0 0 0 0 0 0 0 0 0
49317- 0 0 0 0 0 0 0 0 0 0 0 0
49318- 0 0 0 0 0 0 0 0 0 0 0 0
49319- 0 0 0 0 0 0 0 0 0 0 0 0
49320- 0 0 0 0 0 0 0 0 0 0 0 0
49321- 0 0 0 0 0 0 0 0 0 0 0 0
49322- 0 0 0 0 0 0 10 10 10 26 26 26
49323- 58 58 58 90 90 90 18 18 18 2 2 6
49324- 2 2 6 110 110 110 253 253 253 253 253 253
49325-253 253 253 253 253 253 253 253 253 253 253 253
49326-250 250 250 253 253 253 253 253 253 253 253 253
49327-253 253 253 253 253 253 253 253 253 253 253 253
49328-253 253 253 253 253 253 253 253 253 253 253 253
49329-253 253 253 231 231 231 18 18 18 2 2 6
49330- 2 2 6 2 2 6 2 2 6 2 2 6
49331- 2 2 6 2 2 6 18 18 18 94 94 94
49332- 54 54 54 26 26 26 10 10 10 0 0 0
49333- 0 0 0 0 0 0 0 0 0 0 0 0
49334- 0 0 0 0 0 0 0 0 0 0 0 0
49335- 0 0 0 0 0 0 0 0 0 0 0 0
49336- 0 0 0 0 0 0 0 0 0 0 0 0
49337- 0 0 0 0 0 0 0 0 0 0 0 0
49338- 0 0 0 0 0 0 0 0 0 0 0 0
49339- 0 0 0 0 0 0 0 0 0 0 0 0
49340- 0 0 0 0 0 0 0 0 0 0 0 0
49341- 0 0 0 0 0 0 0 0 0 0 0 0
49342- 0 0 0 6 6 6 22 22 22 50 50 50
49343- 90 90 90 26 26 26 2 2 6 2 2 6
49344- 14 14 14 195 195 195 250 250 250 253 253 253
49345-253 253 253 253 253 253 253 253 253 253 253 253
49346-253 253 253 253 253 253 253 253 253 253 253 253
49347-253 253 253 253 253 253 253 253 253 253 253 253
49348-253 253 253 253 253 253 253 253 253 253 253 253
49349-250 250 250 242 242 242 54 54 54 2 2 6
49350- 2 2 6 2 2 6 2 2 6 2 2 6
49351- 2 2 6 2 2 6 2 2 6 38 38 38
49352- 86 86 86 50 50 50 22 22 22 6 6 6
49353- 0 0 0 0 0 0 0 0 0 0 0 0
49354- 0 0 0 0 0 0 0 0 0 0 0 0
49355- 0 0 0 0 0 0 0 0 0 0 0 0
49356- 0 0 0 0 0 0 0 0 0 0 0 0
49357- 0 0 0 0 0 0 0 0 0 0 0 0
49358- 0 0 0 0 0 0 0 0 0 0 0 0
49359- 0 0 0 0 0 0 0 0 0 0 0 0
49360- 0 0 0 0 0 0 0 0 0 0 0 0
49361- 0 0 0 0 0 0 0 0 0 0 0 0
49362- 6 6 6 14 14 14 38 38 38 82 82 82
49363- 34 34 34 2 2 6 2 2 6 2 2 6
49364- 42 42 42 195 195 195 246 246 246 253 253 253
49365-253 253 253 253 253 253 253 253 253 250 250 250
49366-242 242 242 242 242 242 250 250 250 253 253 253
49367-253 253 253 253 253 253 253 253 253 253 253 253
49368-253 253 253 250 250 250 246 246 246 238 238 238
49369-226 226 226 231 231 231 101 101 101 6 6 6
49370- 2 2 6 2 2 6 2 2 6 2 2 6
49371- 2 2 6 2 2 6 2 2 6 2 2 6
49372- 38 38 38 82 82 82 42 42 42 14 14 14
49373- 6 6 6 0 0 0 0 0 0 0 0 0
49374- 0 0 0 0 0 0 0 0 0 0 0 0
49375- 0 0 0 0 0 0 0 0 0 0 0 0
49376- 0 0 0 0 0 0 0 0 0 0 0 0
49377- 0 0 0 0 0 0 0 0 0 0 0 0
49378- 0 0 0 0 0 0 0 0 0 0 0 0
49379- 0 0 0 0 0 0 0 0 0 0 0 0
49380- 0 0 0 0 0 0 0 0 0 0 0 0
49381- 0 0 0 0 0 0 0 0 0 0 0 0
49382- 10 10 10 26 26 26 62 62 62 66 66 66
49383- 2 2 6 2 2 6 2 2 6 6 6 6
49384- 70 70 70 170 170 170 206 206 206 234 234 234
49385-246 246 246 250 250 250 250 250 250 238 238 238
49386-226 226 226 231 231 231 238 238 238 250 250 250
49387-250 250 250 250 250 250 246 246 246 231 231 231
49388-214 214 214 206 206 206 202 202 202 202 202 202
49389-198 198 198 202 202 202 182 182 182 18 18 18
49390- 2 2 6 2 2 6 2 2 6 2 2 6
49391- 2 2 6 2 2 6 2 2 6 2 2 6
49392- 2 2 6 62 62 62 66 66 66 30 30 30
49393- 10 10 10 0 0 0 0 0 0 0 0 0
49394- 0 0 0 0 0 0 0 0 0 0 0 0
49395- 0 0 0 0 0 0 0 0 0 0 0 0
49396- 0 0 0 0 0 0 0 0 0 0 0 0
49397- 0 0 0 0 0 0 0 0 0 0 0 0
49398- 0 0 0 0 0 0 0 0 0 0 0 0
49399- 0 0 0 0 0 0 0 0 0 0 0 0
49400- 0 0 0 0 0 0 0 0 0 0 0 0
49401- 0 0 0 0 0 0 0 0 0 0 0 0
49402- 14 14 14 42 42 42 82 82 82 18 18 18
49403- 2 2 6 2 2 6 2 2 6 10 10 10
49404- 94 94 94 182 182 182 218 218 218 242 242 242
49405-250 250 250 253 253 253 253 253 253 250 250 250
49406-234 234 234 253 253 253 253 253 253 253 253 253
49407-253 253 253 253 253 253 253 253 253 246 246 246
49408-238 238 238 226 226 226 210 210 210 202 202 202
49409-195 195 195 195 195 195 210 210 210 158 158 158
49410- 6 6 6 14 14 14 50 50 50 14 14 14
49411- 2 2 6 2 2 6 2 2 6 2 2 6
49412- 2 2 6 6 6 6 86 86 86 46 46 46
49413- 18 18 18 6 6 6 0 0 0 0 0 0
49414- 0 0 0 0 0 0 0 0 0 0 0 0
49415- 0 0 0 0 0 0 0 0 0 0 0 0
49416- 0 0 0 0 0 0 0 0 0 0 0 0
49417- 0 0 0 0 0 0 0 0 0 0 0 0
49418- 0 0 0 0 0 0 0 0 0 0 0 0
49419- 0 0 0 0 0 0 0 0 0 0 0 0
49420- 0 0 0 0 0 0 0 0 0 0 0 0
49421- 0 0 0 0 0 0 0 0 0 6 6 6
49422- 22 22 22 54 54 54 70 70 70 2 2 6
49423- 2 2 6 10 10 10 2 2 6 22 22 22
49424-166 166 166 231 231 231 250 250 250 253 253 253
49425-253 253 253 253 253 253 253 253 253 250 250 250
49426-242 242 242 253 253 253 253 253 253 253 253 253
49427-253 253 253 253 253 253 253 253 253 253 253 253
49428-253 253 253 253 253 253 253 253 253 246 246 246
49429-231 231 231 206 206 206 198 198 198 226 226 226
49430- 94 94 94 2 2 6 6 6 6 38 38 38
49431- 30 30 30 2 2 6 2 2 6 2 2 6
49432- 2 2 6 2 2 6 62 62 62 66 66 66
49433- 26 26 26 10 10 10 0 0 0 0 0 0
49434- 0 0 0 0 0 0 0 0 0 0 0 0
49435- 0 0 0 0 0 0 0 0 0 0 0 0
49436- 0 0 0 0 0 0 0 0 0 0 0 0
49437- 0 0 0 0 0 0 0 0 0 0 0 0
49438- 0 0 0 0 0 0 0 0 0 0 0 0
49439- 0 0 0 0 0 0 0 0 0 0 0 0
49440- 0 0 0 0 0 0 0 0 0 0 0 0
49441- 0 0 0 0 0 0 0 0 0 10 10 10
49442- 30 30 30 74 74 74 50 50 50 2 2 6
49443- 26 26 26 26 26 26 2 2 6 106 106 106
49444-238 238 238 253 253 253 253 253 253 253 253 253
49445-253 253 253 253 253 253 253 253 253 253 253 253
49446-253 253 253 253 253 253 253 253 253 253 253 253
49447-253 253 253 253 253 253 253 253 253 253 253 253
49448-253 253 253 253 253 253 253 253 253 253 253 253
49449-253 253 253 246 246 246 218 218 218 202 202 202
49450-210 210 210 14 14 14 2 2 6 2 2 6
49451- 30 30 30 22 22 22 2 2 6 2 2 6
49452- 2 2 6 2 2 6 18 18 18 86 86 86
49453- 42 42 42 14 14 14 0 0 0 0 0 0
49454- 0 0 0 0 0 0 0 0 0 0 0 0
49455- 0 0 0 0 0 0 0 0 0 0 0 0
49456- 0 0 0 0 0 0 0 0 0 0 0 0
49457- 0 0 0 0 0 0 0 0 0 0 0 0
49458- 0 0 0 0 0 0 0 0 0 0 0 0
49459- 0 0 0 0 0 0 0 0 0 0 0 0
49460- 0 0 0 0 0 0 0 0 0 0 0 0
49461- 0 0 0 0 0 0 0 0 0 14 14 14
49462- 42 42 42 90 90 90 22 22 22 2 2 6
49463- 42 42 42 2 2 6 18 18 18 218 218 218
49464-253 253 253 253 253 253 253 253 253 253 253 253
49465-253 253 253 253 253 253 253 253 253 253 253 253
49466-253 253 253 253 253 253 253 253 253 253 253 253
49467-253 253 253 253 253 253 253 253 253 253 253 253
49468-253 253 253 253 253 253 253 253 253 253 253 253
49469-253 253 253 253 253 253 250 250 250 221 221 221
49470-218 218 218 101 101 101 2 2 6 14 14 14
49471- 18 18 18 38 38 38 10 10 10 2 2 6
49472- 2 2 6 2 2 6 2 2 6 78 78 78
49473- 58 58 58 22 22 22 6 6 6 0 0 0
49474- 0 0 0 0 0 0 0 0 0 0 0 0
49475- 0 0 0 0 0 0 0 0 0 0 0 0
49476- 0 0 0 0 0 0 0 0 0 0 0 0
49477- 0 0 0 0 0 0 0 0 0 0 0 0
49478- 0 0 0 0 0 0 0 0 0 0 0 0
49479- 0 0 0 0 0 0 0 0 0 0 0 0
49480- 0 0 0 0 0 0 0 0 0 0 0 0
49481- 0 0 0 0 0 0 6 6 6 18 18 18
49482- 54 54 54 82 82 82 2 2 6 26 26 26
49483- 22 22 22 2 2 6 123 123 123 253 253 253
49484-253 253 253 253 253 253 253 253 253 253 253 253
49485-253 253 253 253 253 253 253 253 253 253 253 253
49486-253 253 253 253 253 253 253 253 253 253 253 253
49487-253 253 253 253 253 253 253 253 253 253 253 253
49488-253 253 253 253 253 253 253 253 253 253 253 253
49489-253 253 253 253 253 253 253 253 253 250 250 250
49490-238 238 238 198 198 198 6 6 6 38 38 38
49491- 58 58 58 26 26 26 38 38 38 2 2 6
49492- 2 2 6 2 2 6 2 2 6 46 46 46
49493- 78 78 78 30 30 30 10 10 10 0 0 0
49494- 0 0 0 0 0 0 0 0 0 0 0 0
49495- 0 0 0 0 0 0 0 0 0 0 0 0
49496- 0 0 0 0 0 0 0 0 0 0 0 0
49497- 0 0 0 0 0 0 0 0 0 0 0 0
49498- 0 0 0 0 0 0 0 0 0 0 0 0
49499- 0 0 0 0 0 0 0 0 0 0 0 0
49500- 0 0 0 0 0 0 0 0 0 0 0 0
49501- 0 0 0 0 0 0 10 10 10 30 30 30
49502- 74 74 74 58 58 58 2 2 6 42 42 42
49503- 2 2 6 22 22 22 231 231 231 253 253 253
49504-253 253 253 253 253 253 253 253 253 253 253 253
49505-253 253 253 253 253 253 253 253 253 250 250 250
49506-253 253 253 253 253 253 253 253 253 253 253 253
49507-253 253 253 253 253 253 253 253 253 253 253 253
49508-253 253 253 253 253 253 253 253 253 253 253 253
49509-253 253 253 253 253 253 253 253 253 253 253 253
49510-253 253 253 246 246 246 46 46 46 38 38 38
49511- 42 42 42 14 14 14 38 38 38 14 14 14
49512- 2 2 6 2 2 6 2 2 6 6 6 6
49513- 86 86 86 46 46 46 14 14 14 0 0 0
49514- 0 0 0 0 0 0 0 0 0 0 0 0
49515- 0 0 0 0 0 0 0 0 0 0 0 0
49516- 0 0 0 0 0 0 0 0 0 0 0 0
49517- 0 0 0 0 0 0 0 0 0 0 0 0
49518- 0 0 0 0 0 0 0 0 0 0 0 0
49519- 0 0 0 0 0 0 0 0 0 0 0 0
49520- 0 0 0 0 0 0 0 0 0 0 0 0
49521- 0 0 0 6 6 6 14 14 14 42 42 42
49522- 90 90 90 18 18 18 18 18 18 26 26 26
49523- 2 2 6 116 116 116 253 253 253 253 253 253
49524-253 253 253 253 253 253 253 253 253 253 253 253
49525-253 253 253 253 253 253 250 250 250 238 238 238
49526-253 253 253 253 253 253 253 253 253 253 253 253
49527-253 253 253 253 253 253 253 253 253 253 253 253
49528-253 253 253 253 253 253 253 253 253 253 253 253
49529-253 253 253 253 253 253 253 253 253 253 253 253
49530-253 253 253 253 253 253 94 94 94 6 6 6
49531- 2 2 6 2 2 6 10 10 10 34 34 34
49532- 2 2 6 2 2 6 2 2 6 2 2 6
49533- 74 74 74 58 58 58 22 22 22 6 6 6
49534- 0 0 0 0 0 0 0 0 0 0 0 0
49535- 0 0 0 0 0 0 0 0 0 0 0 0
49536- 0 0 0 0 0 0 0 0 0 0 0 0
49537- 0 0 0 0 0 0 0 0 0 0 0 0
49538- 0 0 0 0 0 0 0 0 0 0 0 0
49539- 0 0 0 0 0 0 0 0 0 0 0 0
49540- 0 0 0 0 0 0 0 0 0 0 0 0
49541- 0 0 0 10 10 10 26 26 26 66 66 66
49542- 82 82 82 2 2 6 38 38 38 6 6 6
49543- 14 14 14 210 210 210 253 253 253 253 253 253
49544-253 253 253 253 253 253 253 253 253 253 253 253
49545-253 253 253 253 253 253 246 246 246 242 242 242
49546-253 253 253 253 253 253 253 253 253 253 253 253
49547-253 253 253 253 253 253 253 253 253 253 253 253
49548-253 253 253 253 253 253 253 253 253 253 253 253
49549-253 253 253 253 253 253 253 253 253 253 253 253
49550-253 253 253 253 253 253 144 144 144 2 2 6
49551- 2 2 6 2 2 6 2 2 6 46 46 46
49552- 2 2 6 2 2 6 2 2 6 2 2 6
49553- 42 42 42 74 74 74 30 30 30 10 10 10
49554- 0 0 0 0 0 0 0 0 0 0 0 0
49555- 0 0 0 0 0 0 0 0 0 0 0 0
49556- 0 0 0 0 0 0 0 0 0 0 0 0
49557- 0 0 0 0 0 0 0 0 0 0 0 0
49558- 0 0 0 0 0 0 0 0 0 0 0 0
49559- 0 0 0 0 0 0 0 0 0 0 0 0
49560- 0 0 0 0 0 0 0 0 0 0 0 0
49561- 6 6 6 14 14 14 42 42 42 90 90 90
49562- 26 26 26 6 6 6 42 42 42 2 2 6
49563- 74 74 74 250 250 250 253 253 253 253 253 253
49564-253 253 253 253 253 253 253 253 253 253 253 253
49565-253 253 253 253 253 253 242 242 242 242 242 242
49566-253 253 253 253 253 253 253 253 253 253 253 253
49567-253 253 253 253 253 253 253 253 253 253 253 253
49568-253 253 253 253 253 253 253 253 253 253 253 253
49569-253 253 253 253 253 253 253 253 253 253 253 253
49570-253 253 253 253 253 253 182 182 182 2 2 6
49571- 2 2 6 2 2 6 2 2 6 46 46 46
49572- 2 2 6 2 2 6 2 2 6 2 2 6
49573- 10 10 10 86 86 86 38 38 38 10 10 10
49574- 0 0 0 0 0 0 0 0 0 0 0 0
49575- 0 0 0 0 0 0 0 0 0 0 0 0
49576- 0 0 0 0 0 0 0 0 0 0 0 0
49577- 0 0 0 0 0 0 0 0 0 0 0 0
49578- 0 0 0 0 0 0 0 0 0 0 0 0
49579- 0 0 0 0 0 0 0 0 0 0 0 0
49580- 0 0 0 0 0 0 0 0 0 0 0 0
49581- 10 10 10 26 26 26 66 66 66 82 82 82
49582- 2 2 6 22 22 22 18 18 18 2 2 6
49583-149 149 149 253 253 253 253 253 253 253 253 253
49584-253 253 253 253 253 253 253 253 253 253 253 253
49585-253 253 253 253 253 253 234 234 234 242 242 242
49586-253 253 253 253 253 253 253 253 253 253 253 253
49587-253 253 253 253 253 253 253 253 253 253 253 253
49588-253 253 253 253 253 253 253 253 253 253 253 253
49589-253 253 253 253 253 253 253 253 253 253 253 253
49590-253 253 253 253 253 253 206 206 206 2 2 6
49591- 2 2 6 2 2 6 2 2 6 38 38 38
49592- 2 2 6 2 2 6 2 2 6 2 2 6
49593- 6 6 6 86 86 86 46 46 46 14 14 14
49594- 0 0 0 0 0 0 0 0 0 0 0 0
49595- 0 0 0 0 0 0 0 0 0 0 0 0
49596- 0 0 0 0 0 0 0 0 0 0 0 0
49597- 0 0 0 0 0 0 0 0 0 0 0 0
49598- 0 0 0 0 0 0 0 0 0 0 0 0
49599- 0 0 0 0 0 0 0 0 0 0 0 0
49600- 0 0 0 0 0 0 0 0 0 6 6 6
49601- 18 18 18 46 46 46 86 86 86 18 18 18
49602- 2 2 6 34 34 34 10 10 10 6 6 6
49603-210 210 210 253 253 253 253 253 253 253 253 253
49604-253 253 253 253 253 253 253 253 253 253 253 253
49605-253 253 253 253 253 253 234 234 234 242 242 242
49606-253 253 253 253 253 253 253 253 253 253 253 253
49607-253 253 253 253 253 253 253 253 253 253 253 253
49608-253 253 253 253 253 253 253 253 253 253 253 253
49609-253 253 253 253 253 253 253 253 253 253 253 253
49610-253 253 253 253 253 253 221 221 221 6 6 6
49611- 2 2 6 2 2 6 6 6 6 30 30 30
49612- 2 2 6 2 2 6 2 2 6 2 2 6
49613- 2 2 6 82 82 82 54 54 54 18 18 18
49614- 6 6 6 0 0 0 0 0 0 0 0 0
49615- 0 0 0 0 0 0 0 0 0 0 0 0
49616- 0 0 0 0 0 0 0 0 0 0 0 0
49617- 0 0 0 0 0 0 0 0 0 0 0 0
49618- 0 0 0 0 0 0 0 0 0 0 0 0
49619- 0 0 0 0 0 0 0 0 0 0 0 0
49620- 0 0 0 0 0 0 0 0 0 10 10 10
49621- 26 26 26 66 66 66 62 62 62 2 2 6
49622- 2 2 6 38 38 38 10 10 10 26 26 26
49623-238 238 238 253 253 253 253 253 253 253 253 253
49624-253 253 253 253 253 253 253 253 253 253 253 253
49625-253 253 253 253 253 253 231 231 231 238 238 238
49626-253 253 253 253 253 253 253 253 253 253 253 253
49627-253 253 253 253 253 253 253 253 253 253 253 253
49628-253 253 253 253 253 253 253 253 253 253 253 253
49629-253 253 253 253 253 253 253 253 253 253 253 253
49630-253 253 253 253 253 253 231 231 231 6 6 6
49631- 2 2 6 2 2 6 10 10 10 30 30 30
49632- 2 2 6 2 2 6 2 2 6 2 2 6
49633- 2 2 6 66 66 66 58 58 58 22 22 22
49634- 6 6 6 0 0 0 0 0 0 0 0 0
49635- 0 0 0 0 0 0 0 0 0 0 0 0
49636- 0 0 0 0 0 0 0 0 0 0 0 0
49637- 0 0 0 0 0 0 0 0 0 0 0 0
49638- 0 0 0 0 0 0 0 0 0 0 0 0
49639- 0 0 0 0 0 0 0 0 0 0 0 0
49640- 0 0 0 0 0 0 0 0 0 10 10 10
49641- 38 38 38 78 78 78 6 6 6 2 2 6
49642- 2 2 6 46 46 46 14 14 14 42 42 42
49643-246 246 246 253 253 253 253 253 253 253 253 253
49644-253 253 253 253 253 253 253 253 253 253 253 253
49645-253 253 253 253 253 253 231 231 231 242 242 242
49646-253 253 253 253 253 253 253 253 253 253 253 253
49647-253 253 253 253 253 253 253 253 253 253 253 253
49648-253 253 253 253 253 253 253 253 253 253 253 253
49649-253 253 253 253 253 253 253 253 253 253 253 253
49650-253 253 253 253 253 253 234 234 234 10 10 10
49651- 2 2 6 2 2 6 22 22 22 14 14 14
49652- 2 2 6 2 2 6 2 2 6 2 2 6
49653- 2 2 6 66 66 66 62 62 62 22 22 22
49654- 6 6 6 0 0 0 0 0 0 0 0 0
49655- 0 0 0 0 0 0 0 0 0 0 0 0
49656- 0 0 0 0 0 0 0 0 0 0 0 0
49657- 0 0 0 0 0 0 0 0 0 0 0 0
49658- 0 0 0 0 0 0 0 0 0 0 0 0
49659- 0 0 0 0 0 0 0 0 0 0 0 0
49660- 0 0 0 0 0 0 6 6 6 18 18 18
49661- 50 50 50 74 74 74 2 2 6 2 2 6
49662- 14 14 14 70 70 70 34 34 34 62 62 62
49663-250 250 250 253 253 253 253 253 253 253 253 253
49664-253 253 253 253 253 253 253 253 253 253 253 253
49665-253 253 253 253 253 253 231 231 231 246 246 246
49666-253 253 253 253 253 253 253 253 253 253 253 253
49667-253 253 253 253 253 253 253 253 253 253 253 253
49668-253 253 253 253 253 253 253 253 253 253 253 253
49669-253 253 253 253 253 253 253 253 253 253 253 253
49670-253 253 253 253 253 253 234 234 234 14 14 14
49671- 2 2 6 2 2 6 30 30 30 2 2 6
49672- 2 2 6 2 2 6 2 2 6 2 2 6
49673- 2 2 6 66 66 66 62 62 62 22 22 22
49674- 6 6 6 0 0 0 0 0 0 0 0 0
49675- 0 0 0 0 0 0 0 0 0 0 0 0
49676- 0 0 0 0 0 0 0 0 0 0 0 0
49677- 0 0 0 0 0 0 0 0 0 0 0 0
49678- 0 0 0 0 0 0 0 0 0 0 0 0
49679- 0 0 0 0 0 0 0 0 0 0 0 0
49680- 0 0 0 0 0 0 6 6 6 18 18 18
49681- 54 54 54 62 62 62 2 2 6 2 2 6
49682- 2 2 6 30 30 30 46 46 46 70 70 70
49683-250 250 250 253 253 253 253 253 253 253 253 253
49684-253 253 253 253 253 253 253 253 253 253 253 253
49685-253 253 253 253 253 253 231 231 231 246 246 246
49686-253 253 253 253 253 253 253 253 253 253 253 253
49687-253 253 253 253 253 253 253 253 253 253 253 253
49688-253 253 253 253 253 253 253 253 253 253 253 253
49689-253 253 253 253 253 253 253 253 253 253 253 253
49690-253 253 253 253 253 253 226 226 226 10 10 10
49691- 2 2 6 6 6 6 30 30 30 2 2 6
49692- 2 2 6 2 2 6 2 2 6 2 2 6
49693- 2 2 6 66 66 66 58 58 58 22 22 22
49694- 6 6 6 0 0 0 0 0 0 0 0 0
49695- 0 0 0 0 0 0 0 0 0 0 0 0
49696- 0 0 0 0 0 0 0 0 0 0 0 0
49697- 0 0 0 0 0 0 0 0 0 0 0 0
49698- 0 0 0 0 0 0 0 0 0 0 0 0
49699- 0 0 0 0 0 0 0 0 0 0 0 0
49700- 0 0 0 0 0 0 6 6 6 22 22 22
49701- 58 58 58 62 62 62 2 2 6 2 2 6
49702- 2 2 6 2 2 6 30 30 30 78 78 78
49703-250 250 250 253 253 253 253 253 253 253 253 253
49704-253 253 253 253 253 253 253 253 253 253 253 253
49705-253 253 253 253 253 253 231 231 231 246 246 246
49706-253 253 253 253 253 253 253 253 253 253 253 253
49707-253 253 253 253 253 253 253 253 253 253 253 253
49708-253 253 253 253 253 253 253 253 253 253 253 253
49709-253 253 253 253 253 253 253 253 253 253 253 253
49710-253 253 253 253 253 253 206 206 206 2 2 6
49711- 22 22 22 34 34 34 18 14 6 22 22 22
49712- 26 26 26 18 18 18 6 6 6 2 2 6
49713- 2 2 6 82 82 82 54 54 54 18 18 18
49714- 6 6 6 0 0 0 0 0 0 0 0 0
49715- 0 0 0 0 0 0 0 0 0 0 0 0
49716- 0 0 0 0 0 0 0 0 0 0 0 0
49717- 0 0 0 0 0 0 0 0 0 0 0 0
49718- 0 0 0 0 0 0 0 0 0 0 0 0
49719- 0 0 0 0 0 0 0 0 0 0 0 0
49720- 0 0 0 0 0 0 6 6 6 26 26 26
49721- 62 62 62 106 106 106 74 54 14 185 133 11
49722-210 162 10 121 92 8 6 6 6 62 62 62
49723-238 238 238 253 253 253 253 253 253 253 253 253
49724-253 253 253 253 253 253 253 253 253 253 253 253
49725-253 253 253 253 253 253 231 231 231 246 246 246
49726-253 253 253 253 253 253 253 253 253 253 253 253
49727-253 253 253 253 253 253 253 253 253 253 253 253
49728-253 253 253 253 253 253 253 253 253 253 253 253
49729-253 253 253 253 253 253 253 253 253 253 253 253
49730-253 253 253 253 253 253 158 158 158 18 18 18
49731- 14 14 14 2 2 6 2 2 6 2 2 6
49732- 6 6 6 18 18 18 66 66 66 38 38 38
49733- 6 6 6 94 94 94 50 50 50 18 18 18
49734- 6 6 6 0 0 0 0 0 0 0 0 0
49735- 0 0 0 0 0 0 0 0 0 0 0 0
49736- 0 0 0 0 0 0 0 0 0 0 0 0
49737- 0 0 0 0 0 0 0 0 0 0 0 0
49738- 0 0 0 0 0 0 0 0 0 0 0 0
49739- 0 0 0 0 0 0 0 0 0 6 6 6
49740- 10 10 10 10 10 10 18 18 18 38 38 38
49741- 78 78 78 142 134 106 216 158 10 242 186 14
49742-246 190 14 246 190 14 156 118 10 10 10 10
49743- 90 90 90 238 238 238 253 253 253 253 253 253
49744-253 253 253 253 253 253 253 253 253 253 253 253
49745-253 253 253 253 253 253 231 231 231 250 250 250
49746-253 253 253 253 253 253 253 253 253 253 253 253
49747-253 253 253 253 253 253 253 253 253 253 253 253
49748-253 253 253 253 253 253 253 253 253 253 253 253
49749-253 253 253 253 253 253 253 253 253 246 230 190
49750-238 204 91 238 204 91 181 142 44 37 26 9
49751- 2 2 6 2 2 6 2 2 6 2 2 6
49752- 2 2 6 2 2 6 38 38 38 46 46 46
49753- 26 26 26 106 106 106 54 54 54 18 18 18
49754- 6 6 6 0 0 0 0 0 0 0 0 0
49755- 0 0 0 0 0 0 0 0 0 0 0 0
49756- 0 0 0 0 0 0 0 0 0 0 0 0
49757- 0 0 0 0 0 0 0 0 0 0 0 0
49758- 0 0 0 0 0 0 0 0 0 0 0 0
49759- 0 0 0 6 6 6 14 14 14 22 22 22
49760- 30 30 30 38 38 38 50 50 50 70 70 70
49761-106 106 106 190 142 34 226 170 11 242 186 14
49762-246 190 14 246 190 14 246 190 14 154 114 10
49763- 6 6 6 74 74 74 226 226 226 253 253 253
49764-253 253 253 253 253 253 253 253 253 253 253 253
49765-253 253 253 253 253 253 231 231 231 250 250 250
49766-253 253 253 253 253 253 253 253 253 253 253 253
49767-253 253 253 253 253 253 253 253 253 253 253 253
49768-253 253 253 253 253 253 253 253 253 253 253 253
49769-253 253 253 253 253 253 253 253 253 228 184 62
49770-241 196 14 241 208 19 232 195 16 38 30 10
49771- 2 2 6 2 2 6 2 2 6 2 2 6
49772- 2 2 6 6 6 6 30 30 30 26 26 26
49773-203 166 17 154 142 90 66 66 66 26 26 26
49774- 6 6 6 0 0 0 0 0 0 0 0 0
49775- 0 0 0 0 0 0 0 0 0 0 0 0
49776- 0 0 0 0 0 0 0 0 0 0 0 0
49777- 0 0 0 0 0 0 0 0 0 0 0 0
49778- 0 0 0 0 0 0 0 0 0 0 0 0
49779- 6 6 6 18 18 18 38 38 38 58 58 58
49780- 78 78 78 86 86 86 101 101 101 123 123 123
49781-175 146 61 210 150 10 234 174 13 246 186 14
49782-246 190 14 246 190 14 246 190 14 238 190 10
49783-102 78 10 2 2 6 46 46 46 198 198 198
49784-253 253 253 253 253 253 253 253 253 253 253 253
49785-253 253 253 253 253 253 234 234 234 242 242 242
49786-253 253 253 253 253 253 253 253 253 253 253 253
49787-253 253 253 253 253 253 253 253 253 253 253 253
49788-253 253 253 253 253 253 253 253 253 253 253 253
49789-253 253 253 253 253 253 253 253 253 224 178 62
49790-242 186 14 241 196 14 210 166 10 22 18 6
49791- 2 2 6 2 2 6 2 2 6 2 2 6
49792- 2 2 6 2 2 6 6 6 6 121 92 8
49793-238 202 15 232 195 16 82 82 82 34 34 34
49794- 10 10 10 0 0 0 0 0 0 0 0 0
49795- 0 0 0 0 0 0 0 0 0 0 0 0
49796- 0 0 0 0 0 0 0 0 0 0 0 0
49797- 0 0 0 0 0 0 0 0 0 0 0 0
49798- 0 0 0 0 0 0 0 0 0 0 0 0
49799- 14 14 14 38 38 38 70 70 70 154 122 46
49800-190 142 34 200 144 11 197 138 11 197 138 11
49801-213 154 11 226 170 11 242 186 14 246 190 14
49802-246 190 14 246 190 14 246 190 14 246 190 14
49803-225 175 15 46 32 6 2 2 6 22 22 22
49804-158 158 158 250 250 250 253 253 253 253 253 253
49805-253 253 253 253 253 253 253 253 253 253 253 253
49806-253 253 253 253 253 253 253 253 253 253 253 253
49807-253 253 253 253 253 253 253 253 253 253 253 253
49808-253 253 253 253 253 253 253 253 253 253 253 253
49809-253 253 253 250 250 250 242 242 242 224 178 62
49810-239 182 13 236 186 11 213 154 11 46 32 6
49811- 2 2 6 2 2 6 2 2 6 2 2 6
49812- 2 2 6 2 2 6 61 42 6 225 175 15
49813-238 190 10 236 186 11 112 100 78 42 42 42
49814- 14 14 14 0 0 0 0 0 0 0 0 0
49815- 0 0 0 0 0 0 0 0 0 0 0 0
49816- 0 0 0 0 0 0 0 0 0 0 0 0
49817- 0 0 0 0 0 0 0 0 0 0 0 0
49818- 0 0 0 0 0 0 0 0 0 6 6 6
49819- 22 22 22 54 54 54 154 122 46 213 154 11
49820-226 170 11 230 174 11 226 170 11 226 170 11
49821-236 178 12 242 186 14 246 190 14 246 190 14
49822-246 190 14 246 190 14 246 190 14 246 190 14
49823-241 196 14 184 144 12 10 10 10 2 2 6
49824- 6 6 6 116 116 116 242 242 242 253 253 253
49825-253 253 253 253 253 253 253 253 253 253 253 253
49826-253 253 253 253 253 253 253 253 253 253 253 253
49827-253 253 253 253 253 253 253 253 253 253 253 253
49828-253 253 253 253 253 253 253 253 253 253 253 253
49829-253 253 253 231 231 231 198 198 198 214 170 54
49830-236 178 12 236 178 12 210 150 10 137 92 6
49831- 18 14 6 2 2 6 2 2 6 2 2 6
49832- 6 6 6 70 47 6 200 144 11 236 178 12
49833-239 182 13 239 182 13 124 112 88 58 58 58
49834- 22 22 22 6 6 6 0 0 0 0 0 0
49835- 0 0 0 0 0 0 0 0 0 0 0 0
49836- 0 0 0 0 0 0 0 0 0 0 0 0
49837- 0 0 0 0 0 0 0 0 0 0 0 0
49838- 0 0 0 0 0 0 0 0 0 10 10 10
49839- 30 30 30 70 70 70 180 133 36 226 170 11
49840-239 182 13 242 186 14 242 186 14 246 186 14
49841-246 190 14 246 190 14 246 190 14 246 190 14
49842-246 190 14 246 190 14 246 190 14 246 190 14
49843-246 190 14 232 195 16 98 70 6 2 2 6
49844- 2 2 6 2 2 6 66 66 66 221 221 221
49845-253 253 253 253 253 253 253 253 253 253 253 253
49846-253 253 253 253 253 253 253 253 253 253 253 253
49847-253 253 253 253 253 253 253 253 253 253 253 253
49848-253 253 253 253 253 253 253 253 253 253 253 253
49849-253 253 253 206 206 206 198 198 198 214 166 58
49850-230 174 11 230 174 11 216 158 10 192 133 9
49851-163 110 8 116 81 8 102 78 10 116 81 8
49852-167 114 7 197 138 11 226 170 11 239 182 13
49853-242 186 14 242 186 14 162 146 94 78 78 78
49854- 34 34 34 14 14 14 6 6 6 0 0 0
49855- 0 0 0 0 0 0 0 0 0 0 0 0
49856- 0 0 0 0 0 0 0 0 0 0 0 0
49857- 0 0 0 0 0 0 0 0 0 0 0 0
49858- 0 0 0 0 0 0 0 0 0 6 6 6
49859- 30 30 30 78 78 78 190 142 34 226 170 11
49860-239 182 13 246 190 14 246 190 14 246 190 14
49861-246 190 14 246 190 14 246 190 14 246 190 14
49862-246 190 14 246 190 14 246 190 14 246 190 14
49863-246 190 14 241 196 14 203 166 17 22 18 6
49864- 2 2 6 2 2 6 2 2 6 38 38 38
49865-218 218 218 253 253 253 253 253 253 253 253 253
49866-253 253 253 253 253 253 253 253 253 253 253 253
49867-253 253 253 253 253 253 253 253 253 253 253 253
49868-253 253 253 253 253 253 253 253 253 253 253 253
49869-250 250 250 206 206 206 198 198 198 202 162 69
49870-226 170 11 236 178 12 224 166 10 210 150 10
49871-200 144 11 197 138 11 192 133 9 197 138 11
49872-210 150 10 226 170 11 242 186 14 246 190 14
49873-246 190 14 246 186 14 225 175 15 124 112 88
49874- 62 62 62 30 30 30 14 14 14 6 6 6
49875- 0 0 0 0 0 0 0 0 0 0 0 0
49876- 0 0 0 0 0 0 0 0 0 0 0 0
49877- 0 0 0 0 0 0 0 0 0 0 0 0
49878- 0 0 0 0 0 0 0 0 0 10 10 10
49879- 30 30 30 78 78 78 174 135 50 224 166 10
49880-239 182 13 246 190 14 246 190 14 246 190 14
49881-246 190 14 246 190 14 246 190 14 246 190 14
49882-246 190 14 246 190 14 246 190 14 246 190 14
49883-246 190 14 246 190 14 241 196 14 139 102 15
49884- 2 2 6 2 2 6 2 2 6 2 2 6
49885- 78 78 78 250 250 250 253 253 253 253 253 253
49886-253 253 253 253 253 253 253 253 253 253 253 253
49887-253 253 253 253 253 253 253 253 253 253 253 253
49888-253 253 253 253 253 253 253 253 253 253 253 253
49889-250 250 250 214 214 214 198 198 198 190 150 46
49890-219 162 10 236 178 12 234 174 13 224 166 10
49891-216 158 10 213 154 11 213 154 11 216 158 10
49892-226 170 11 239 182 13 246 190 14 246 190 14
49893-246 190 14 246 190 14 242 186 14 206 162 42
49894-101 101 101 58 58 58 30 30 30 14 14 14
49895- 6 6 6 0 0 0 0 0 0 0 0 0
49896- 0 0 0 0 0 0 0 0 0 0 0 0
49897- 0 0 0 0 0 0 0 0 0 0 0 0
49898- 0 0 0 0 0 0 0 0 0 10 10 10
49899- 30 30 30 74 74 74 174 135 50 216 158 10
49900-236 178 12 246 190 14 246 190 14 246 190 14
49901-246 190 14 246 190 14 246 190 14 246 190 14
49902-246 190 14 246 190 14 246 190 14 246 190 14
49903-246 190 14 246 190 14 241 196 14 226 184 13
49904- 61 42 6 2 2 6 2 2 6 2 2 6
49905- 22 22 22 238 238 238 253 253 253 253 253 253
49906-253 253 253 253 253 253 253 253 253 253 253 253
49907-253 253 253 253 253 253 253 253 253 253 253 253
49908-253 253 253 253 253 253 253 253 253 253 253 253
49909-253 253 253 226 226 226 187 187 187 180 133 36
49910-216 158 10 236 178 12 239 182 13 236 178 12
49911-230 174 11 226 170 11 226 170 11 230 174 11
49912-236 178 12 242 186 14 246 190 14 246 190 14
49913-246 190 14 246 190 14 246 186 14 239 182 13
49914-206 162 42 106 106 106 66 66 66 34 34 34
49915- 14 14 14 6 6 6 0 0 0 0 0 0
49916- 0 0 0 0 0 0 0 0 0 0 0 0
49917- 0 0 0 0 0 0 0 0 0 0 0 0
49918- 0 0 0 0 0 0 0 0 0 6 6 6
49919- 26 26 26 70 70 70 163 133 67 213 154 11
49920-236 178 12 246 190 14 246 190 14 246 190 14
49921-246 190 14 246 190 14 246 190 14 246 190 14
49922-246 190 14 246 190 14 246 190 14 246 190 14
49923-246 190 14 246 190 14 246 190 14 241 196 14
49924-190 146 13 18 14 6 2 2 6 2 2 6
49925- 46 46 46 246 246 246 253 253 253 253 253 253
49926-253 253 253 253 253 253 253 253 253 253 253 253
49927-253 253 253 253 253 253 253 253 253 253 253 253
49928-253 253 253 253 253 253 253 253 253 253 253 253
49929-253 253 253 221 221 221 86 86 86 156 107 11
49930-216 158 10 236 178 12 242 186 14 246 186 14
49931-242 186 14 239 182 13 239 182 13 242 186 14
49932-242 186 14 246 186 14 246 190 14 246 190 14
49933-246 190 14 246 190 14 246 190 14 246 190 14
49934-242 186 14 225 175 15 142 122 72 66 66 66
49935- 30 30 30 10 10 10 0 0 0 0 0 0
49936- 0 0 0 0 0 0 0 0 0 0 0 0
49937- 0 0 0 0 0 0 0 0 0 0 0 0
49938- 0 0 0 0 0 0 0 0 0 6 6 6
49939- 26 26 26 70 70 70 163 133 67 210 150 10
49940-236 178 12 246 190 14 246 190 14 246 190 14
49941-246 190 14 246 190 14 246 190 14 246 190 14
49942-246 190 14 246 190 14 246 190 14 246 190 14
49943-246 190 14 246 190 14 246 190 14 246 190 14
49944-232 195 16 121 92 8 34 34 34 106 106 106
49945-221 221 221 253 253 253 253 253 253 253 253 253
49946-253 253 253 253 253 253 253 253 253 253 253 253
49947-253 253 253 253 253 253 253 253 253 253 253 253
49948-253 253 253 253 253 253 253 253 253 253 253 253
49949-242 242 242 82 82 82 18 14 6 163 110 8
49950-216 158 10 236 178 12 242 186 14 246 190 14
49951-246 190 14 246 190 14 246 190 14 246 190 14
49952-246 190 14 246 190 14 246 190 14 246 190 14
49953-246 190 14 246 190 14 246 190 14 246 190 14
49954-246 190 14 246 190 14 242 186 14 163 133 67
49955- 46 46 46 18 18 18 6 6 6 0 0 0
49956- 0 0 0 0 0 0 0 0 0 0 0 0
49957- 0 0 0 0 0 0 0 0 0 0 0 0
49958- 0 0 0 0 0 0 0 0 0 10 10 10
49959- 30 30 30 78 78 78 163 133 67 210 150 10
49960-236 178 12 246 186 14 246 190 14 246 190 14
49961-246 190 14 246 190 14 246 190 14 246 190 14
49962-246 190 14 246 190 14 246 190 14 246 190 14
49963-246 190 14 246 190 14 246 190 14 246 190 14
49964-241 196 14 215 174 15 190 178 144 253 253 253
49965-253 253 253 253 253 253 253 253 253 253 253 253
49966-253 253 253 253 253 253 253 253 253 253 253 253
49967-253 253 253 253 253 253 253 253 253 253 253 253
49968-253 253 253 253 253 253 253 253 253 218 218 218
49969- 58 58 58 2 2 6 22 18 6 167 114 7
49970-216 158 10 236 178 12 246 186 14 246 190 14
49971-246 190 14 246 190 14 246 190 14 246 190 14
49972-246 190 14 246 190 14 246 190 14 246 190 14
49973-246 190 14 246 190 14 246 190 14 246 190 14
49974-246 190 14 246 186 14 242 186 14 190 150 46
49975- 54 54 54 22 22 22 6 6 6 0 0 0
49976- 0 0 0 0 0 0 0 0 0 0 0 0
49977- 0 0 0 0 0 0 0 0 0 0 0 0
49978- 0 0 0 0 0 0 0 0 0 14 14 14
49979- 38 38 38 86 86 86 180 133 36 213 154 11
49980-236 178 12 246 186 14 246 190 14 246 190 14
49981-246 190 14 246 190 14 246 190 14 246 190 14
49982-246 190 14 246 190 14 246 190 14 246 190 14
49983-246 190 14 246 190 14 246 190 14 246 190 14
49984-246 190 14 232 195 16 190 146 13 214 214 214
49985-253 253 253 253 253 253 253 253 253 253 253 253
49986-253 253 253 253 253 253 253 253 253 253 253 253
49987-253 253 253 253 253 253 253 253 253 253 253 253
49988-253 253 253 250 250 250 170 170 170 26 26 26
49989- 2 2 6 2 2 6 37 26 9 163 110 8
49990-219 162 10 239 182 13 246 186 14 246 190 14
49991-246 190 14 246 190 14 246 190 14 246 190 14
49992-246 190 14 246 190 14 246 190 14 246 190 14
49993-246 190 14 246 190 14 246 190 14 246 190 14
49994-246 186 14 236 178 12 224 166 10 142 122 72
49995- 46 46 46 18 18 18 6 6 6 0 0 0
49996- 0 0 0 0 0 0 0 0 0 0 0 0
49997- 0 0 0 0 0 0 0 0 0 0 0 0
49998- 0 0 0 0 0 0 6 6 6 18 18 18
49999- 50 50 50 109 106 95 192 133 9 224 166 10
50000-242 186 14 246 190 14 246 190 14 246 190 14
50001-246 190 14 246 190 14 246 190 14 246 190 14
50002-246 190 14 246 190 14 246 190 14 246 190 14
50003-246 190 14 246 190 14 246 190 14 246 190 14
50004-242 186 14 226 184 13 210 162 10 142 110 46
50005-226 226 226 253 253 253 253 253 253 253 253 253
50006-253 253 253 253 253 253 253 253 253 253 253 253
50007-253 253 253 253 253 253 253 253 253 253 253 253
50008-198 198 198 66 66 66 2 2 6 2 2 6
50009- 2 2 6 2 2 6 50 34 6 156 107 11
50010-219 162 10 239 182 13 246 186 14 246 190 14
50011-246 190 14 246 190 14 246 190 14 246 190 14
50012-246 190 14 246 190 14 246 190 14 246 190 14
50013-246 190 14 246 190 14 246 190 14 242 186 14
50014-234 174 13 213 154 11 154 122 46 66 66 66
50015- 30 30 30 10 10 10 0 0 0 0 0 0
50016- 0 0 0 0 0 0 0 0 0 0 0 0
50017- 0 0 0 0 0 0 0 0 0 0 0 0
50018- 0 0 0 0 0 0 6 6 6 22 22 22
50019- 58 58 58 154 121 60 206 145 10 234 174 13
50020-242 186 14 246 186 14 246 190 14 246 190 14
50021-246 190 14 246 190 14 246 190 14 246 190 14
50022-246 190 14 246 190 14 246 190 14 246 190 14
50023-246 190 14 246 190 14 246 190 14 246 190 14
50024-246 186 14 236 178 12 210 162 10 163 110 8
50025- 61 42 6 138 138 138 218 218 218 250 250 250
50026-253 253 253 253 253 253 253 253 253 250 250 250
50027-242 242 242 210 210 210 144 144 144 66 66 66
50028- 6 6 6 2 2 6 2 2 6 2 2 6
50029- 2 2 6 2 2 6 61 42 6 163 110 8
50030-216 158 10 236 178 12 246 190 14 246 190 14
50031-246 190 14 246 190 14 246 190 14 246 190 14
50032-246 190 14 246 190 14 246 190 14 246 190 14
50033-246 190 14 239 182 13 230 174 11 216 158 10
50034-190 142 34 124 112 88 70 70 70 38 38 38
50035- 18 18 18 6 6 6 0 0 0 0 0 0
50036- 0 0 0 0 0 0 0 0 0 0 0 0
50037- 0 0 0 0 0 0 0 0 0 0 0 0
50038- 0 0 0 0 0 0 6 6 6 22 22 22
50039- 62 62 62 168 124 44 206 145 10 224 166 10
50040-236 178 12 239 182 13 242 186 14 242 186 14
50041-246 186 14 246 190 14 246 190 14 246 190 14
50042-246 190 14 246 190 14 246 190 14 246 190 14
50043-246 190 14 246 190 14 246 190 14 246 190 14
50044-246 190 14 236 178 12 216 158 10 175 118 6
50045- 80 54 7 2 2 6 6 6 6 30 30 30
50046- 54 54 54 62 62 62 50 50 50 38 38 38
50047- 14 14 14 2 2 6 2 2 6 2 2 6
50048- 2 2 6 2 2 6 2 2 6 2 2 6
50049- 2 2 6 6 6 6 80 54 7 167 114 7
50050-213 154 11 236 178 12 246 190 14 246 190 14
50051-246 190 14 246 190 14 246 190 14 246 190 14
50052-246 190 14 242 186 14 239 182 13 239 182 13
50053-230 174 11 210 150 10 174 135 50 124 112 88
50054- 82 82 82 54 54 54 34 34 34 18 18 18
50055- 6 6 6 0 0 0 0 0 0 0 0 0
50056- 0 0 0 0 0 0 0 0 0 0 0 0
50057- 0 0 0 0 0 0 0 0 0 0 0 0
50058- 0 0 0 0 0 0 6 6 6 18 18 18
50059- 50 50 50 158 118 36 192 133 9 200 144 11
50060-216 158 10 219 162 10 224 166 10 226 170 11
50061-230 174 11 236 178 12 239 182 13 239 182 13
50062-242 186 14 246 186 14 246 190 14 246 190 14
50063-246 190 14 246 190 14 246 190 14 246 190 14
50064-246 186 14 230 174 11 210 150 10 163 110 8
50065-104 69 6 10 10 10 2 2 6 2 2 6
50066- 2 2 6 2 2 6 2 2 6 2 2 6
50067- 2 2 6 2 2 6 2 2 6 2 2 6
50068- 2 2 6 2 2 6 2 2 6 2 2 6
50069- 2 2 6 6 6 6 91 60 6 167 114 7
50070-206 145 10 230 174 11 242 186 14 246 190 14
50071-246 190 14 246 190 14 246 186 14 242 186 14
50072-239 182 13 230 174 11 224 166 10 213 154 11
50073-180 133 36 124 112 88 86 86 86 58 58 58
50074- 38 38 38 22 22 22 10 10 10 6 6 6
50075- 0 0 0 0 0 0 0 0 0 0 0 0
50076- 0 0 0 0 0 0 0 0 0 0 0 0
50077- 0 0 0 0 0 0 0 0 0 0 0 0
50078- 0 0 0 0 0 0 0 0 0 14 14 14
50079- 34 34 34 70 70 70 138 110 50 158 118 36
50080-167 114 7 180 123 7 192 133 9 197 138 11
50081-200 144 11 206 145 10 213 154 11 219 162 10
50082-224 166 10 230 174 11 239 182 13 242 186 14
50083-246 186 14 246 186 14 246 186 14 246 186 14
50084-239 182 13 216 158 10 185 133 11 152 99 6
50085-104 69 6 18 14 6 2 2 6 2 2 6
50086- 2 2 6 2 2 6 2 2 6 2 2 6
50087- 2 2 6 2 2 6 2 2 6 2 2 6
50088- 2 2 6 2 2 6 2 2 6 2 2 6
50089- 2 2 6 6 6 6 80 54 7 152 99 6
50090-192 133 9 219 162 10 236 178 12 239 182 13
50091-246 186 14 242 186 14 239 182 13 236 178 12
50092-224 166 10 206 145 10 192 133 9 154 121 60
50093- 94 94 94 62 62 62 42 42 42 22 22 22
50094- 14 14 14 6 6 6 0 0 0 0 0 0
50095- 0 0 0 0 0 0 0 0 0 0 0 0
50096- 0 0 0 0 0 0 0 0 0 0 0 0
50097- 0 0 0 0 0 0 0 0 0 0 0 0
50098- 0 0 0 0 0 0 0 0 0 6 6 6
50099- 18 18 18 34 34 34 58 58 58 78 78 78
50100-101 98 89 124 112 88 142 110 46 156 107 11
50101-163 110 8 167 114 7 175 118 6 180 123 7
50102-185 133 11 197 138 11 210 150 10 219 162 10
50103-226 170 11 236 178 12 236 178 12 234 174 13
50104-219 162 10 197 138 11 163 110 8 130 83 6
50105- 91 60 6 10 10 10 2 2 6 2 2 6
50106- 18 18 18 38 38 38 38 38 38 38 38 38
50107- 38 38 38 38 38 38 38 38 38 38 38 38
50108- 38 38 38 38 38 38 26 26 26 2 2 6
50109- 2 2 6 6 6 6 70 47 6 137 92 6
50110-175 118 6 200 144 11 219 162 10 230 174 11
50111-234 174 13 230 174 11 219 162 10 210 150 10
50112-192 133 9 163 110 8 124 112 88 82 82 82
50113- 50 50 50 30 30 30 14 14 14 6 6 6
50114- 0 0 0 0 0 0 0 0 0 0 0 0
50115- 0 0 0 0 0 0 0 0 0 0 0 0
50116- 0 0 0 0 0 0 0 0 0 0 0 0
50117- 0 0 0 0 0 0 0 0 0 0 0 0
50118- 0 0 0 0 0 0 0 0 0 0 0 0
50119- 6 6 6 14 14 14 22 22 22 34 34 34
50120- 42 42 42 58 58 58 74 74 74 86 86 86
50121-101 98 89 122 102 70 130 98 46 121 87 25
50122-137 92 6 152 99 6 163 110 8 180 123 7
50123-185 133 11 197 138 11 206 145 10 200 144 11
50124-180 123 7 156 107 11 130 83 6 104 69 6
50125- 50 34 6 54 54 54 110 110 110 101 98 89
50126- 86 86 86 82 82 82 78 78 78 78 78 78
50127- 78 78 78 78 78 78 78 78 78 78 78 78
50128- 78 78 78 82 82 82 86 86 86 94 94 94
50129-106 106 106 101 101 101 86 66 34 124 80 6
50130-156 107 11 180 123 7 192 133 9 200 144 11
50131-206 145 10 200 144 11 192 133 9 175 118 6
50132-139 102 15 109 106 95 70 70 70 42 42 42
50133- 22 22 22 10 10 10 0 0 0 0 0 0
50134- 0 0 0 0 0 0 0 0 0 0 0 0
50135- 0 0 0 0 0 0 0 0 0 0 0 0
50136- 0 0 0 0 0 0 0 0 0 0 0 0
50137- 0 0 0 0 0 0 0 0 0 0 0 0
50138- 0 0 0 0 0 0 0 0 0 0 0 0
50139- 0 0 0 0 0 0 6 6 6 10 10 10
50140- 14 14 14 22 22 22 30 30 30 38 38 38
50141- 50 50 50 62 62 62 74 74 74 90 90 90
50142-101 98 89 112 100 78 121 87 25 124 80 6
50143-137 92 6 152 99 6 152 99 6 152 99 6
50144-138 86 6 124 80 6 98 70 6 86 66 30
50145-101 98 89 82 82 82 58 58 58 46 46 46
50146- 38 38 38 34 34 34 34 34 34 34 34 34
50147- 34 34 34 34 34 34 34 34 34 34 34 34
50148- 34 34 34 34 34 34 38 38 38 42 42 42
50149- 54 54 54 82 82 82 94 86 76 91 60 6
50150-134 86 6 156 107 11 167 114 7 175 118 6
50151-175 118 6 167 114 7 152 99 6 121 87 25
50152-101 98 89 62 62 62 34 34 34 18 18 18
50153- 6 6 6 0 0 0 0 0 0 0 0 0
50154- 0 0 0 0 0 0 0 0 0 0 0 0
50155- 0 0 0 0 0 0 0 0 0 0 0 0
50156- 0 0 0 0 0 0 0 0 0 0 0 0
50157- 0 0 0 0 0 0 0 0 0 0 0 0
50158- 0 0 0 0 0 0 0 0 0 0 0 0
50159- 0 0 0 0 0 0 0 0 0 0 0 0
50160- 0 0 0 6 6 6 6 6 6 10 10 10
50161- 18 18 18 22 22 22 30 30 30 42 42 42
50162- 50 50 50 66 66 66 86 86 86 101 98 89
50163-106 86 58 98 70 6 104 69 6 104 69 6
50164-104 69 6 91 60 6 82 62 34 90 90 90
50165- 62 62 62 38 38 38 22 22 22 14 14 14
50166- 10 10 10 10 10 10 10 10 10 10 10 10
50167- 10 10 10 10 10 10 6 6 6 10 10 10
50168- 10 10 10 10 10 10 10 10 10 14 14 14
50169- 22 22 22 42 42 42 70 70 70 89 81 66
50170- 80 54 7 104 69 6 124 80 6 137 92 6
50171-134 86 6 116 81 8 100 82 52 86 86 86
50172- 58 58 58 30 30 30 14 14 14 6 6 6
50173- 0 0 0 0 0 0 0 0 0 0 0 0
50174- 0 0 0 0 0 0 0 0 0 0 0 0
50175- 0 0 0 0 0 0 0 0 0 0 0 0
50176- 0 0 0 0 0 0 0 0 0 0 0 0
50177- 0 0 0 0 0 0 0 0 0 0 0 0
50178- 0 0 0 0 0 0 0 0 0 0 0 0
50179- 0 0 0 0 0 0 0 0 0 0 0 0
50180- 0 0 0 0 0 0 0 0 0 0 0 0
50181- 0 0 0 6 6 6 10 10 10 14 14 14
50182- 18 18 18 26 26 26 38 38 38 54 54 54
50183- 70 70 70 86 86 86 94 86 76 89 81 66
50184- 89 81 66 86 86 86 74 74 74 50 50 50
50185- 30 30 30 14 14 14 6 6 6 0 0 0
50186- 0 0 0 0 0 0 0 0 0 0 0 0
50187- 0 0 0 0 0 0 0 0 0 0 0 0
50188- 0 0 0 0 0 0 0 0 0 0 0 0
50189- 6 6 6 18 18 18 34 34 34 58 58 58
50190- 82 82 82 89 81 66 89 81 66 89 81 66
50191- 94 86 66 94 86 76 74 74 74 50 50 50
50192- 26 26 26 14 14 14 6 6 6 0 0 0
50193- 0 0 0 0 0 0 0 0 0 0 0 0
50194- 0 0 0 0 0 0 0 0 0 0 0 0
50195- 0 0 0 0 0 0 0 0 0 0 0 0
50196- 0 0 0 0 0 0 0 0 0 0 0 0
50197- 0 0 0 0 0 0 0 0 0 0 0 0
50198- 0 0 0 0 0 0 0 0 0 0 0 0
50199- 0 0 0 0 0 0 0 0 0 0 0 0
50200- 0 0 0 0 0 0 0 0 0 0 0 0
50201- 0 0 0 0 0 0 0 0 0 0 0 0
50202- 6 6 6 6 6 6 14 14 14 18 18 18
50203- 30 30 30 38 38 38 46 46 46 54 54 54
50204- 50 50 50 42 42 42 30 30 30 18 18 18
50205- 10 10 10 0 0 0 0 0 0 0 0 0
50206- 0 0 0 0 0 0 0 0 0 0 0 0
50207- 0 0 0 0 0 0 0 0 0 0 0 0
50208- 0 0 0 0 0 0 0 0 0 0 0 0
50209- 0 0 0 6 6 6 14 14 14 26 26 26
50210- 38 38 38 50 50 50 58 58 58 58 58 58
50211- 54 54 54 42 42 42 30 30 30 18 18 18
50212- 10 10 10 0 0 0 0 0 0 0 0 0
50213- 0 0 0 0 0 0 0 0 0 0 0 0
50214- 0 0 0 0 0 0 0 0 0 0 0 0
50215- 0 0 0 0 0 0 0 0 0 0 0 0
50216- 0 0 0 0 0 0 0 0 0 0 0 0
50217- 0 0 0 0 0 0 0 0 0 0 0 0
50218- 0 0 0 0 0 0 0 0 0 0 0 0
50219- 0 0 0 0 0 0 0 0 0 0 0 0
50220- 0 0 0 0 0 0 0 0 0 0 0 0
50221- 0 0 0 0 0 0 0 0 0 0 0 0
50222- 0 0 0 0 0 0 0 0 0 6 6 6
50223- 6 6 6 10 10 10 14 14 14 18 18 18
50224- 18 18 18 14 14 14 10 10 10 6 6 6
50225- 0 0 0 0 0 0 0 0 0 0 0 0
50226- 0 0 0 0 0 0 0 0 0 0 0 0
50227- 0 0 0 0 0 0 0 0 0 0 0 0
50228- 0 0 0 0 0 0 0 0 0 0 0 0
50229- 0 0 0 0 0 0 0 0 0 6 6 6
50230- 14 14 14 18 18 18 22 22 22 22 22 22
50231- 18 18 18 14 14 14 10 10 10 6 6 6
50232- 0 0 0 0 0 0 0 0 0 0 0 0
50233- 0 0 0 0 0 0 0 0 0 0 0 0
50234- 0 0 0 0 0 0 0 0 0 0 0 0
50235- 0 0 0 0 0 0 0 0 0 0 0 0
50236- 0 0 0 0 0 0 0 0 0 0 0 0
50237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50250+4 4 4 4 4 4
50251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50264+4 4 4 4 4 4
50265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50278+4 4 4 4 4 4
50279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50292+4 4 4 4 4 4
50293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50306+4 4 4 4 4 4
50307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50320+4 4 4 4 4 4
50321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50325+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
50326+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
50327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50330+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
50331+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50332+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
50333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50334+4 4 4 4 4 4
50335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50339+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
50340+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
50341+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50344+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
50345+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
50346+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
50347+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50348+4 4 4 4 4 4
50349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50353+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
50354+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
50355+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50358+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
50359+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
50360+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
50361+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
50362+4 4 4 4 4 4
50363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50366+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
50367+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
50368+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
50369+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
50370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50371+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
50372+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
50373+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
50374+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
50375+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
50376+4 4 4 4 4 4
50377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50380+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
50381+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
50382+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
50383+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
50384+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
50385+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
50386+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
50387+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
50388+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
50389+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
50390+4 4 4 4 4 4
50391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
50394+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
50395+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
50396+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
50397+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
50398+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
50399+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
50400+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
50401+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
50402+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
50403+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
50404+4 4 4 4 4 4
50405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50407+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
50408+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
50409+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
50410+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
50411+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
50412+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
50413+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
50414+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
50415+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
50416+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
50417+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
50418+4 4 4 4 4 4
50419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50421+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
50422+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
50423+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
50424+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
50425+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
50426+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
50427+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
50428+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
50429+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
50430+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
50431+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
50432+4 4 4 4 4 4
50433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50435+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
50436+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
50437+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
50438+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
50439+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
50440+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
50441+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
50442+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
50443+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
50444+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
50445+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50446+4 4 4 4 4 4
50447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50449+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
50450+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
50451+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
50452+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
50453+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
50454+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
50455+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
50456+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
50457+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
50458+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
50459+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
50460+4 4 4 4 4 4
50461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50462+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
50463+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
50464+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
50465+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
50466+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
50467+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
50468+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
50469+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
50470+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
50471+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
50472+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
50473+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
50474+4 4 4 4 4 4
50475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50476+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
50477+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
50478+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
50479+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
50480+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
50481+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
50482+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
50483+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
50484+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
50485+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
50486+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
50487+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
50488+0 0 0 4 4 4
50489+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
50490+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
50491+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
50492+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
50493+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
50494+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
50495+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
50496+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
50497+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
50498+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
50499+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
50500+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
50501+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
50502+2 0 0 0 0 0
50503+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
50504+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
50505+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
50506+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
50507+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
50508+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
50509+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
50510+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
50511+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
50512+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
50513+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
50514+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
50515+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
50516+37 38 37 0 0 0
50517+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
50518+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
50519+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
50520+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
50521+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
50522+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
50523+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
50524+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
50525+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
50526+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
50527+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
50528+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
50529+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
50530+85 115 134 4 0 0
50531+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
50532+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
50533+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
50534+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
50535+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
50536+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
50537+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
50538+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
50539+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
50540+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
50541+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
50542+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
50543+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
50544+60 73 81 4 0 0
50545+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
50546+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
50547+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
50548+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
50549+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
50550+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
50551+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
50552+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
50553+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
50554+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
50555+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
50556+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
50557+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
50558+16 19 21 4 0 0
50559+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
50560+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
50561+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
50562+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
50563+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
50564+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
50565+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
50566+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
50567+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
50568+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
50569+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
50570+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
50571+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
50572+4 0 0 4 3 3
50573+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
50574+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
50575+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
50576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
50577+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
50578+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
50579+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
50580+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
50581+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
50582+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
50583+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
50584+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
50585+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
50586+3 2 2 4 4 4
50587+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
50588+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
50589+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
50590+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
50591+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
50592+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
50593+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
50594+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
50595+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
50596+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
50597+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
50598+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
50599+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
50600+4 4 4 4 4 4
50601+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
50602+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
50603+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
50604+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
50605+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
50606+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
50607+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
50608+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
50609+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
50610+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
50611+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
50612+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
50613+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
50614+4 4 4 4 4 4
50615+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
50616+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
50617+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
50618+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
50619+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
50620+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
50621+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
50622+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
50623+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
50624+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
50625+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
50626+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
50627+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
50628+5 5 5 5 5 5
50629+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
50630+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
50631+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
50632+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
50633+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
50634+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50635+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
50636+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
50637+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
50638+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
50639+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
50640+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
50641+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
50642+5 5 5 4 4 4
50643+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
50644+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
50645+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
50646+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
50647+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50648+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
50649+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
50650+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
50651+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
50652+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
50653+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
50654+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
50655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50656+4 4 4 4 4 4
50657+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
50658+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
50659+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
50660+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
50661+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
50662+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50663+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50664+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
50665+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
50666+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
50667+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
50668+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
50669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50670+4 4 4 4 4 4
50671+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
50672+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
50673+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
50674+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
50675+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50676+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
50677+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
50678+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
50679+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
50680+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
50681+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
50682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50684+4 4 4 4 4 4
50685+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
50686+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
50687+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
50688+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
50689+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50690+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50691+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50692+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
50693+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
50694+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
50695+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
50696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50698+4 4 4 4 4 4
50699+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
50700+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
50701+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
50702+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
50703+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50704+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
50705+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
50706+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
50707+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
50708+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
50709+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50712+4 4 4 4 4 4
50713+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
50714+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
50715+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
50716+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
50717+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50718+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
50719+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
50720+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
50721+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
50722+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
50723+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
50724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50726+4 4 4 4 4 4
50727+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
50728+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
50729+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
50730+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
50731+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50732+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
50733+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
50734+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
50735+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
50736+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
50737+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
50738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50740+4 4 4 4 4 4
50741+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
50742+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
50743+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
50744+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
50745+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
50746+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
50747+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
50748+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
50749+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
50750+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
50751+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50754+4 4 4 4 4 4
50755+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
50756+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
50757+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
50758+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
50759+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50760+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
50761+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
50762+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
50763+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
50764+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
50765+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50768+4 4 4 4 4 4
50769+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
50770+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
50771+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
50772+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
50773+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50774+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
50775+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
50776+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
50777+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
50778+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
50779+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50782+4 4 4 4 4 4
50783+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
50784+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
50785+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
50786+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
50787+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50788+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
50789+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
50790+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
50791+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
50792+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50793+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50796+4 4 4 4 4 4
50797+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
50798+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
50799+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
50800+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
50801+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
50802+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
50803+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
50804+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
50805+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50806+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50807+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50810+4 4 4 4 4 4
50811+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
50812+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
50813+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
50814+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
50815+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50816+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
50817+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
50818+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
50819+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50820+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50821+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50824+4 4 4 4 4 4
50825+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
50826+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
50827+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
50828+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
50829+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
50830+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
50831+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
50832+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
50833+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50834+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50835+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50838+4 4 4 4 4 4
50839+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
50840+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
50841+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50842+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
50843+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
50844+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
50845+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
50846+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
50847+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
50848+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50849+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50852+4 4 4 4 4 4
50853+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
50854+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
50855+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
50856+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
50857+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
50858+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
50859+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
50860+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
50861+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50862+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50863+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50866+4 4 4 4 4 4
50867+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
50868+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
50869+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50870+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
50871+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
50872+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
50873+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
50874+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
50875+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
50876+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50877+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50880+4 4 4 4 4 4
50881+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
50882+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
50883+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
50884+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
50885+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
50886+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
50887+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
50888+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
50889+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50890+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50891+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50894+4 4 4 4 4 4
50895+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50896+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
50897+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50898+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
50899+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
50900+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
50901+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
50902+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
50903+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50904+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50905+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50908+4 4 4 4 4 4
50909+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
50910+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
50911+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
50912+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
50913+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
50914+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
50915+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50916+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
50917+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50918+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50919+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50922+4 4 4 4 4 4
50923+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50924+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
50925+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
50926+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
50927+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
50928+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
50929+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50930+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
50931+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50932+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50933+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50936+4 4 4 4 4 4
50937+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
50938+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
50939+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
50940+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
50941+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
50942+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
50943+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
50944+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
50945+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
50946+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50947+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50950+4 4 4 4 4 4
50951+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50952+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
50953+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
50954+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
50955+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
50956+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
50957+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
50958+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
50959+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
50960+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50961+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50964+4 4 4 4 4 4
50965+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
50966+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
50967+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
50968+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
50969+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
50970+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
50971+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
50972+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
50973+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
50974+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50975+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50978+4 4 4 4 4 4
50979+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50980+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
50981+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
50982+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
50983+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
50984+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
50985+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
50986+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
50987+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
50988+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50989+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50992+4 4 4 4 4 4
50993+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
50994+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
50995+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
50996+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
50997+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
50998+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
50999+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
51000+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
51001+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
51002+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
51003+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
51004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51006+4 4 4 4 4 4
51007+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
51008+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
51009+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
51010+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
51011+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
51012+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
51013+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
51014+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
51015+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
51016+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
51017+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51020+4 4 4 4 4 4
51021+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
51022+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
51023+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
51024+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
51025+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
51026+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
51027+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51028+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
51029+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
51030+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
51031+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51034+4 4 4 4 4 4
51035+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
51036+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
51037+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
51038+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
51039+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
51040+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
51041+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
51042+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
51043+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
51044+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
51045+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51048+4 4 4 4 4 4
51049+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
51050+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
51051+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
51052+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
51053+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
51054+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
51055+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
51056+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
51057+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
51058+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
51059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51062+4 4 4 4 4 4
51063+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51064+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
51065+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
51066+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
51067+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
51068+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
51069+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
51070+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
51071+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
51072+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
51073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51076+4 4 4 4 4 4
51077+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
51078+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
51079+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
51080+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
51081+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
51082+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
51083+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
51084+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
51085+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
51086+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
51087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51090+4 4 4 4 4 4
51091+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
51092+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
51093+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
51094+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
51095+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
51096+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
51097+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
51098+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
51099+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
51100+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51104+4 4 4 4 4 4
51105+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
51106+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51107+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
51108+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
51109+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
51110+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
51111+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
51112+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
51113+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
51114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51118+4 4 4 4 4 4
51119+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
51120+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
51121+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
51122+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
51123+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
51124+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
51125+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
51126+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
51127+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
51128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51132+4 4 4 4 4 4
51133+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
51134+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
51135+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
51136+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
51137+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
51138+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
51139+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
51140+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
51141+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51146+4 4 4 4 4 4
51147+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
51148+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
51149+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
51150+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
51151+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
51152+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
51153+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
51154+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
51155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51160+4 4 4 4 4 4
51161+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
51162+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
51163+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
51164+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
51165+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
51166+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
51167+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
51168+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
51169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51174+4 4 4 4 4 4
51175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51176+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
51177+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51178+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
51179+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
51180+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
51181+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
51182+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
51183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51188+4 4 4 4 4 4
51189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51190+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
51191+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
51192+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
51193+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
51194+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
51195+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
51196+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
51197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51202+4 4 4 4 4 4
51203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51204+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
51205+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
51206+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
51207+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
51208+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
51209+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
51210+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51216+4 4 4 4 4 4
51217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51219+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
51220+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
51221+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
51222+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
51223+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
51224+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51230+4 4 4 4 4 4
51231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51234+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51235+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
51236+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
51237+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
51238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51244+4 4 4 4 4 4
51245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51248+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
51249+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
51250+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
51251+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
51252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51258+4 4 4 4 4 4
51259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51262+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
51263+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51264+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
51265+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
51266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51272+4 4 4 4 4 4
51273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51276+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
51277+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
51278+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
51279+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
51280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51286+4 4 4 4 4 4
51287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51291+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
51292+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51293+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
51294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51300+4 4 4 4 4 4
51301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51305+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
51306+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
51307+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
51308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51314+4 4 4 4 4 4
51315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51319+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
51320+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
51321+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51328+4 4 4 4 4 4
51329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51333+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
51334+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
51335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51342+4 4 4 4 4 4
51343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51347+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
51348+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
51349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51356+4 4 4 4 4 4
51357diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
51358index fe92eed..106e085 100644
51359--- a/drivers/video/mb862xx/mb862xxfb_accel.c
51360+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
51361@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
51362 struct mb862xxfb_par *par = info->par;
51363
51364 if (info->var.bits_per_pixel == 32) {
51365- info->fbops->fb_fillrect = cfb_fillrect;
51366- info->fbops->fb_copyarea = cfb_copyarea;
51367- info->fbops->fb_imageblit = cfb_imageblit;
51368+ pax_open_kernel();
51369+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
51370+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
51371+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
51372+ pax_close_kernel();
51373 } else {
51374 outreg(disp, GC_L0EM, 3);
51375- info->fbops->fb_fillrect = mb86290fb_fillrect;
51376- info->fbops->fb_copyarea = mb86290fb_copyarea;
51377- info->fbops->fb_imageblit = mb86290fb_imageblit;
51378+ pax_open_kernel();
51379+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
51380+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
51381+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
51382+ pax_close_kernel();
51383 }
51384 outreg(draw, GDC_REG_DRAW_BASE, 0);
51385 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
51386diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
51387index ff22871..b129bed 100644
51388--- a/drivers/video/nvidia/nvidia.c
51389+++ b/drivers/video/nvidia/nvidia.c
51390@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
51391 info->fix.line_length = (info->var.xres_virtual *
51392 info->var.bits_per_pixel) >> 3;
51393 if (info->var.accel_flags) {
51394- info->fbops->fb_imageblit = nvidiafb_imageblit;
51395- info->fbops->fb_fillrect = nvidiafb_fillrect;
51396- info->fbops->fb_copyarea = nvidiafb_copyarea;
51397- info->fbops->fb_sync = nvidiafb_sync;
51398+ pax_open_kernel();
51399+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
51400+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
51401+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
51402+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
51403+ pax_close_kernel();
51404 info->pixmap.scan_align = 4;
51405 info->flags &= ~FBINFO_HWACCEL_DISABLED;
51406 info->flags |= FBINFO_READS_FAST;
51407 NVResetGraphics(info);
51408 } else {
51409- info->fbops->fb_imageblit = cfb_imageblit;
51410- info->fbops->fb_fillrect = cfb_fillrect;
51411- info->fbops->fb_copyarea = cfb_copyarea;
51412- info->fbops->fb_sync = NULL;
51413+ pax_open_kernel();
51414+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
51415+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
51416+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
51417+ *(void **)&info->fbops->fb_sync = NULL;
51418+ pax_close_kernel();
51419 info->pixmap.scan_align = 1;
51420 info->flags |= FBINFO_HWACCEL_DISABLED;
51421 info->flags &= ~FBINFO_READS_FAST;
51422@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
51423 info->pixmap.size = 8 * 1024;
51424 info->pixmap.flags = FB_PIXMAP_SYSTEM;
51425
51426- if (!hwcur)
51427- info->fbops->fb_cursor = NULL;
51428+ if (!hwcur) {
51429+ pax_open_kernel();
51430+ *(void **)&info->fbops->fb_cursor = NULL;
51431+ pax_close_kernel();
51432+ }
51433
51434 info->var.accel_flags = (!noaccel);
51435
51436diff --git a/drivers/video/output.c b/drivers/video/output.c
51437index 0d6f2cd..6285b97 100644
51438--- a/drivers/video/output.c
51439+++ b/drivers/video/output.c
51440@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
51441 new_dev->props = op;
51442 new_dev->dev.class = &video_output_class;
51443 new_dev->dev.parent = dev;
51444- dev_set_name(&new_dev->dev, name);
51445+ dev_set_name(&new_dev->dev, "%s", name);
51446 dev_set_drvdata(&new_dev->dev, devdata);
51447 ret_code = device_register(&new_dev->dev);
51448 if (ret_code) {
51449diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
51450index 05c2dc3..ea1f391 100644
51451--- a/drivers/video/s1d13xxxfb.c
51452+++ b/drivers/video/s1d13xxxfb.c
51453@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
51454
51455 switch(prod_id) {
51456 case S1D13506_PROD_ID: /* activate acceleration */
51457- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
51458- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
51459+ pax_open_kernel();
51460+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
51461+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
51462+ pax_close_kernel();
51463 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
51464 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
51465 break;
51466diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
51467index b2b33fc..f9f4658 100644
51468--- a/drivers/video/smscufx.c
51469+++ b/drivers/video/smscufx.c
51470@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
51471 fb_deferred_io_cleanup(info);
51472 kfree(info->fbdefio);
51473 info->fbdefio = NULL;
51474- info->fbops->fb_mmap = ufx_ops_mmap;
51475+ pax_open_kernel();
51476+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
51477+ pax_close_kernel();
51478 }
51479
51480 pr_debug("released /dev/fb%d user=%d count=%d",
51481diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
51482index ec03e72..f578436 100644
51483--- a/drivers/video/udlfb.c
51484+++ b/drivers/video/udlfb.c
51485@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
51486 dlfb_urb_completion(urb);
51487
51488 error:
51489- atomic_add(bytes_sent, &dev->bytes_sent);
51490- atomic_add(bytes_identical, &dev->bytes_identical);
51491- atomic_add(width*height*2, &dev->bytes_rendered);
51492+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
51493+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
51494+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
51495 end_cycles = get_cycles();
51496- atomic_add(((unsigned int) ((end_cycles - start_cycles)
51497+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
51498 >> 10)), /* Kcycles */
51499 &dev->cpu_kcycles_used);
51500
51501@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
51502 dlfb_urb_completion(urb);
51503
51504 error:
51505- atomic_add(bytes_sent, &dev->bytes_sent);
51506- atomic_add(bytes_identical, &dev->bytes_identical);
51507- atomic_add(bytes_rendered, &dev->bytes_rendered);
51508+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
51509+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
51510+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
51511 end_cycles = get_cycles();
51512- atomic_add(((unsigned int) ((end_cycles - start_cycles)
51513+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
51514 >> 10)), /* Kcycles */
51515 &dev->cpu_kcycles_used);
51516 }
51517@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
51518 fb_deferred_io_cleanup(info);
51519 kfree(info->fbdefio);
51520 info->fbdefio = NULL;
51521- info->fbops->fb_mmap = dlfb_ops_mmap;
51522+ pax_open_kernel();
51523+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
51524+ pax_close_kernel();
51525 }
51526
51527 pr_warn("released /dev/fb%d user=%d count=%d\n",
51528@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
51529 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51530 struct dlfb_data *dev = fb_info->par;
51531 return snprintf(buf, PAGE_SIZE, "%u\n",
51532- atomic_read(&dev->bytes_rendered));
51533+ atomic_read_unchecked(&dev->bytes_rendered));
51534 }
51535
51536 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
51537@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
51538 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51539 struct dlfb_data *dev = fb_info->par;
51540 return snprintf(buf, PAGE_SIZE, "%u\n",
51541- atomic_read(&dev->bytes_identical));
51542+ atomic_read_unchecked(&dev->bytes_identical));
51543 }
51544
51545 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
51546@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
51547 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51548 struct dlfb_data *dev = fb_info->par;
51549 return snprintf(buf, PAGE_SIZE, "%u\n",
51550- atomic_read(&dev->bytes_sent));
51551+ atomic_read_unchecked(&dev->bytes_sent));
51552 }
51553
51554 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
51555@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
51556 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51557 struct dlfb_data *dev = fb_info->par;
51558 return snprintf(buf, PAGE_SIZE, "%u\n",
51559- atomic_read(&dev->cpu_kcycles_used));
51560+ atomic_read_unchecked(&dev->cpu_kcycles_used));
51561 }
51562
51563 static ssize_t edid_show(
51564@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
51565 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51566 struct dlfb_data *dev = fb_info->par;
51567
51568- atomic_set(&dev->bytes_rendered, 0);
51569- atomic_set(&dev->bytes_identical, 0);
51570- atomic_set(&dev->bytes_sent, 0);
51571- atomic_set(&dev->cpu_kcycles_used, 0);
51572+ atomic_set_unchecked(&dev->bytes_rendered, 0);
51573+ atomic_set_unchecked(&dev->bytes_identical, 0);
51574+ atomic_set_unchecked(&dev->bytes_sent, 0);
51575+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
51576
51577 return count;
51578 }
51579diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
51580index e328a61..1b08ecb 100644
51581--- a/drivers/video/uvesafb.c
51582+++ b/drivers/video/uvesafb.c
51583@@ -19,6 +19,7 @@
51584 #include <linux/io.h>
51585 #include <linux/mutex.h>
51586 #include <linux/slab.h>
51587+#include <linux/moduleloader.h>
51588 #include <video/edid.h>
51589 #include <video/uvesafb.h>
51590 #ifdef CONFIG_X86
51591@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
51592 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
51593 par->pmi_setpal = par->ypan = 0;
51594 } else {
51595+
51596+#ifdef CONFIG_PAX_KERNEXEC
51597+#ifdef CONFIG_MODULES
51598+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
51599+#endif
51600+ if (!par->pmi_code) {
51601+ par->pmi_setpal = par->ypan = 0;
51602+ return 0;
51603+ }
51604+#endif
51605+
51606 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
51607 + task->t.regs.edi);
51608+
51609+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51610+ pax_open_kernel();
51611+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
51612+ pax_close_kernel();
51613+
51614+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
51615+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
51616+#else
51617 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
51618 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
51619+#endif
51620+
51621 printk(KERN_INFO "uvesafb: protected mode interface info at "
51622 "%04x:%04x\n",
51623 (u16)task->t.regs.es, (u16)task->t.regs.edi);
51624@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
51625 par->ypan = ypan;
51626
51627 if (par->pmi_setpal || par->ypan) {
51628+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
51629 if (__supported_pte_mask & _PAGE_NX) {
51630 par->pmi_setpal = par->ypan = 0;
51631 printk(KERN_WARNING "uvesafb: NX protection is actively."
51632 "We have better not to use the PMI.\n");
51633- } else {
51634+ } else
51635+#endif
51636 uvesafb_vbe_getpmi(task, par);
51637- }
51638 }
51639 #else
51640 /* The protected mode interface is not available on non-x86. */
51641@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
51642 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
51643
51644 /* Disable blanking if the user requested so. */
51645- if (!blank)
51646- info->fbops->fb_blank = NULL;
51647+ if (!blank) {
51648+ pax_open_kernel();
51649+ *(void **)&info->fbops->fb_blank = NULL;
51650+ pax_close_kernel();
51651+ }
51652
51653 /*
51654 * Find out how much IO memory is required for the mode with
51655@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
51656 info->flags = FBINFO_FLAG_DEFAULT |
51657 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
51658
51659- if (!par->ypan)
51660- info->fbops->fb_pan_display = NULL;
51661+ if (!par->ypan) {
51662+ pax_open_kernel();
51663+ *(void **)&info->fbops->fb_pan_display = NULL;
51664+ pax_close_kernel();
51665+ }
51666 }
51667
51668 static void uvesafb_init_mtrr(struct fb_info *info)
51669@@ -1836,6 +1866,11 @@ out:
51670 if (par->vbe_modes)
51671 kfree(par->vbe_modes);
51672
51673+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51674+ if (par->pmi_code)
51675+ module_free_exec(NULL, par->pmi_code);
51676+#endif
51677+
51678 framebuffer_release(info);
51679 return err;
51680 }
51681@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
51682 kfree(par->vbe_state_orig);
51683 if (par->vbe_state_saved)
51684 kfree(par->vbe_state_saved);
51685+
51686+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51687+ if (par->pmi_code)
51688+ module_free_exec(NULL, par->pmi_code);
51689+#endif
51690+
51691 }
51692
51693 framebuffer_release(info);
51694diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
51695index 501b340..d80aa17 100644
51696--- a/drivers/video/vesafb.c
51697+++ b/drivers/video/vesafb.c
51698@@ -9,6 +9,7 @@
51699 */
51700
51701 #include <linux/module.h>
51702+#include <linux/moduleloader.h>
51703 #include <linux/kernel.h>
51704 #include <linux/errno.h>
51705 #include <linux/string.h>
51706@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
51707 static int vram_total __initdata; /* Set total amount of memory */
51708 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
51709 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
51710-static void (*pmi_start)(void) __read_mostly;
51711-static void (*pmi_pal) (void) __read_mostly;
51712+static void (*pmi_start)(void) __read_only;
51713+static void (*pmi_pal) (void) __read_only;
51714 static int depth __read_mostly;
51715 static int vga_compat __read_mostly;
51716 /* --------------------------------------------------------------------- */
51717@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
51718 unsigned int size_vmode;
51719 unsigned int size_remap;
51720 unsigned int size_total;
51721+ void *pmi_code = NULL;
51722
51723 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
51724 return -ENODEV;
51725@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
51726 size_remap = size_total;
51727 vesafb_fix.smem_len = size_remap;
51728
51729-#ifndef __i386__
51730- screen_info.vesapm_seg = 0;
51731-#endif
51732-
51733 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
51734 printk(KERN_WARNING
51735 "vesafb: cannot reserve video memory at 0x%lx\n",
51736@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
51737 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
51738 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
51739
51740+#ifdef __i386__
51741+
51742+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51743+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
51744+ if (!pmi_code)
51745+#elif !defined(CONFIG_PAX_KERNEXEC)
51746+ if (0)
51747+#endif
51748+
51749+#endif
51750+ screen_info.vesapm_seg = 0;
51751+
51752 if (screen_info.vesapm_seg) {
51753- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
51754- screen_info.vesapm_seg,screen_info.vesapm_off);
51755+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
51756+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
51757 }
51758
51759 if (screen_info.vesapm_seg < 0xc000)
51760@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
51761
51762 if (ypan || pmi_setpal) {
51763 unsigned short *pmi_base;
51764+
51765 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
51766- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
51767- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
51768+
51769+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51770+ pax_open_kernel();
51771+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
51772+#else
51773+ pmi_code = pmi_base;
51774+#endif
51775+
51776+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
51777+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
51778+
51779+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51780+ pmi_start = ktva_ktla(pmi_start);
51781+ pmi_pal = ktva_ktla(pmi_pal);
51782+ pax_close_kernel();
51783+#endif
51784+
51785 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
51786 if (pmi_base[3]) {
51787 printk(KERN_INFO "vesafb: pmi: ports = ");
51788@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
51789 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
51790 (ypan ? FBINFO_HWACCEL_YPAN : 0);
51791
51792- if (!ypan)
51793- info->fbops->fb_pan_display = NULL;
51794+ if (!ypan) {
51795+ pax_open_kernel();
51796+ *(void **)&info->fbops->fb_pan_display = NULL;
51797+ pax_close_kernel();
51798+ }
51799
51800 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
51801 err = -ENOMEM;
51802@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
51803 info->node, info->fix.id);
51804 return 0;
51805 err:
51806+
51807+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51808+ module_free_exec(NULL, pmi_code);
51809+#endif
51810+
51811 if (info->screen_base)
51812 iounmap(info->screen_base);
51813 framebuffer_release(info);
51814diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
51815index 88714ae..16c2e11 100644
51816--- a/drivers/video/via/via_clock.h
51817+++ b/drivers/video/via/via_clock.h
51818@@ -56,7 +56,7 @@ struct via_clock {
51819
51820 void (*set_engine_pll_state)(u8 state);
51821 void (*set_engine_pll)(struct via_pll_config config);
51822-};
51823+} __no_const;
51824
51825
51826 static inline u32 get_pll_internal_frequency(u32 ref_freq,
51827diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
51828index fef20db..d28b1ab 100644
51829--- a/drivers/xen/xenfs/xenstored.c
51830+++ b/drivers/xen/xenfs/xenstored.c
51831@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
51832 static int xsd_kva_open(struct inode *inode, struct file *file)
51833 {
51834 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
51835+#ifdef CONFIG_GRKERNSEC_HIDESYM
51836+ NULL);
51837+#else
51838 xen_store_interface);
51839+#endif
51840+
51841 if (!file->private_data)
51842 return -ENOMEM;
51843 return 0;
51844diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
51845index 055562c..fdfb10d 100644
51846--- a/fs/9p/vfs_addr.c
51847+++ b/fs/9p/vfs_addr.c
51848@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
51849
51850 retval = v9fs_file_write_internal(inode,
51851 v9inode->writeback_fid,
51852- (__force const char __user *)buffer,
51853+ (const char __force_user *)buffer,
51854 len, &offset, 0);
51855 if (retval > 0)
51856 retval = 0;
51857diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
51858index d86edc8..40ff2fb 100644
51859--- a/fs/9p/vfs_inode.c
51860+++ b/fs/9p/vfs_inode.c
51861@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
51862 void
51863 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
51864 {
51865- char *s = nd_get_link(nd);
51866+ const char *s = nd_get_link(nd);
51867
51868 p9_debug(P9_DEBUG_VFS, " %s %s\n",
51869 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
51870diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
51871index 370b24c..ff0be7b 100644
51872--- a/fs/Kconfig.binfmt
51873+++ b/fs/Kconfig.binfmt
51874@@ -103,7 +103,7 @@ config HAVE_AOUT
51875
51876 config BINFMT_AOUT
51877 tristate "Kernel support for a.out and ECOFF binaries"
51878- depends on HAVE_AOUT
51879+ depends on HAVE_AOUT && BROKEN
51880 ---help---
51881 A.out (Assembler.OUTput) is a set of formats for libraries and
51882 executables used in the earliest versions of UNIX. Linux used
51883diff --git a/fs/aio.c b/fs/aio.c
51884index 2bbcacf..8614116 100644
51885--- a/fs/aio.c
51886+++ b/fs/aio.c
51887@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
51888 size += sizeof(struct io_event) * nr_events;
51889 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
51890
51891- if (nr_pages < 0)
51892+ if (nr_pages <= 0)
51893 return -EINVAL;
51894
51895 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
51896@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
51897 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
51898 {
51899 ssize_t ret;
51900+ struct iovec iovstack;
51901
51902 kiocb->ki_nr_segs = kiocb->ki_nbytes;
51903
51904@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
51905 if (compat)
51906 ret = compat_rw_copy_check_uvector(rw,
51907 (struct compat_iovec __user *)kiocb->ki_buf,
51908- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
51909+ kiocb->ki_nr_segs, 1, &iovstack,
51910 &kiocb->ki_iovec);
51911 else
51912 #endif
51913 ret = rw_copy_check_uvector(rw,
51914 (struct iovec __user *)kiocb->ki_buf,
51915- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
51916+ kiocb->ki_nr_segs, 1, &iovstack,
51917 &kiocb->ki_iovec);
51918 if (ret < 0)
51919 return ret;
51920
51921+ if (kiocb->ki_iovec == &iovstack) {
51922+ kiocb->ki_inline_vec = iovstack;
51923+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
51924+ }
51925+
51926 /* ki_nbytes now reflect bytes instead of segs */
51927 kiocb->ki_nbytes = ret;
51928 return 0;
51929diff --git a/fs/attr.c b/fs/attr.c
51930index 1449adb..a2038c2 100644
51931--- a/fs/attr.c
51932+++ b/fs/attr.c
51933@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
51934 unsigned long limit;
51935
51936 limit = rlimit(RLIMIT_FSIZE);
51937+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
51938 if (limit != RLIM_INFINITY && offset > limit)
51939 goto out_sig;
51940 if (offset > inode->i_sb->s_maxbytes)
51941diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
51942index 3db70da..7aeec5b 100644
51943--- a/fs/autofs4/waitq.c
51944+++ b/fs/autofs4/waitq.c
51945@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
51946 {
51947 unsigned long sigpipe, flags;
51948 mm_segment_t fs;
51949- const char *data = (const char *)addr;
51950+ const char __user *data = (const char __force_user *)addr;
51951 ssize_t wr = 0;
51952
51953 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
51954@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
51955 return 1;
51956 }
51957
51958+#ifdef CONFIG_GRKERNSEC_HIDESYM
51959+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
51960+#endif
51961+
51962 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
51963 enum autofs_notify notify)
51964 {
51965@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
51966
51967 /* If this is a direct mount request create a dummy name */
51968 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
51969+#ifdef CONFIG_GRKERNSEC_HIDESYM
51970+ /* this name does get written to userland via autofs4_write() */
51971+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
51972+#else
51973 qstr.len = sprintf(name, "%p", dentry);
51974+#endif
51975 else {
51976 qstr.len = autofs4_getpath(sbi, dentry, &name);
51977 if (!qstr.len) {
51978diff --git a/fs/befs/endian.h b/fs/befs/endian.h
51979index 2722387..c8dd2a7 100644
51980--- a/fs/befs/endian.h
51981+++ b/fs/befs/endian.h
51982@@ -11,7 +11,7 @@
51983
51984 #include <asm/byteorder.h>
51985
51986-static inline u64
51987+static inline u64 __intentional_overflow(-1)
51988 fs64_to_cpu(const struct super_block *sb, fs64 n)
51989 {
51990 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
51991@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
51992 return (__force fs64)cpu_to_be64(n);
51993 }
51994
51995-static inline u32
51996+static inline u32 __intentional_overflow(-1)
51997 fs32_to_cpu(const struct super_block *sb, fs32 n)
51998 {
51999 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
52000diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
52001index f95dddc..b1e2c1c 100644
52002--- a/fs/befs/linuxvfs.c
52003+++ b/fs/befs/linuxvfs.c
52004@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
52005 {
52006 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
52007 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
52008- char *link = nd_get_link(nd);
52009+ const char *link = nd_get_link(nd);
52010 if (!IS_ERR(link))
52011 kfree(link);
52012 }
52013diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
52014index bce8769..7fc7544 100644
52015--- a/fs/binfmt_aout.c
52016+++ b/fs/binfmt_aout.c
52017@@ -16,6 +16,7 @@
52018 #include <linux/string.h>
52019 #include <linux/fs.h>
52020 #include <linux/file.h>
52021+#include <linux/security.h>
52022 #include <linux/stat.h>
52023 #include <linux/fcntl.h>
52024 #include <linux/ptrace.h>
52025@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
52026 #endif
52027 # define START_STACK(u) ((void __user *)u.start_stack)
52028
52029+ memset(&dump, 0, sizeof(dump));
52030+
52031 fs = get_fs();
52032 set_fs(KERNEL_DS);
52033 has_dumped = 1;
52034@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
52035
52036 /* If the size of the dump file exceeds the rlimit, then see what would happen
52037 if we wrote the stack, but not the data area. */
52038+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
52039 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
52040 dump.u_dsize = 0;
52041
52042 /* Make sure we have enough room to write the stack and data areas. */
52043+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
52044 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
52045 dump.u_ssize = 0;
52046
52047@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
52048 rlim = rlimit(RLIMIT_DATA);
52049 if (rlim >= RLIM_INFINITY)
52050 rlim = ~0;
52051+
52052+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
52053 if (ex.a_data + ex.a_bss > rlim)
52054 return -ENOMEM;
52055
52056@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
52057
52058 install_exec_creds(bprm);
52059
52060+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52061+ current->mm->pax_flags = 0UL;
52062+#endif
52063+
52064+#ifdef CONFIG_PAX_PAGEEXEC
52065+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
52066+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
52067+
52068+#ifdef CONFIG_PAX_EMUTRAMP
52069+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
52070+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
52071+#endif
52072+
52073+#ifdef CONFIG_PAX_MPROTECT
52074+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
52075+ current->mm->pax_flags |= MF_PAX_MPROTECT;
52076+#endif
52077+
52078+ }
52079+#endif
52080+
52081 if (N_MAGIC(ex) == OMAGIC) {
52082 unsigned long text_addr, map_size;
52083 loff_t pos;
52084@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
52085 }
52086
52087 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
52088- PROT_READ | PROT_WRITE | PROT_EXEC,
52089+ PROT_READ | PROT_WRITE,
52090 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
52091 fd_offset + ex.a_text);
52092 if (error != N_DATADDR(ex)) {
52093diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
52094index f8a0b0e..6f036ed 100644
52095--- a/fs/binfmt_elf.c
52096+++ b/fs/binfmt_elf.c
52097@@ -34,6 +34,7 @@
52098 #include <linux/utsname.h>
52099 #include <linux/coredump.h>
52100 #include <linux/sched.h>
52101+#include <linux/xattr.h>
52102 #include <asm/uaccess.h>
52103 #include <asm/param.h>
52104 #include <asm/page.h>
52105@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
52106 #define elf_core_dump NULL
52107 #endif
52108
52109+#ifdef CONFIG_PAX_MPROTECT
52110+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
52111+#endif
52112+
52113+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52114+static void elf_handle_mmap(struct file *file);
52115+#endif
52116+
52117 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
52118 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
52119 #else
52120@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
52121 .load_binary = load_elf_binary,
52122 .load_shlib = load_elf_library,
52123 .core_dump = elf_core_dump,
52124+
52125+#ifdef CONFIG_PAX_MPROTECT
52126+ .handle_mprotect= elf_handle_mprotect,
52127+#endif
52128+
52129+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52130+ .handle_mmap = elf_handle_mmap,
52131+#endif
52132+
52133 .min_coredump = ELF_EXEC_PAGESIZE,
52134 };
52135
52136@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
52137
52138 static int set_brk(unsigned long start, unsigned long end)
52139 {
52140+ unsigned long e = end;
52141+
52142 start = ELF_PAGEALIGN(start);
52143 end = ELF_PAGEALIGN(end);
52144 if (end > start) {
52145@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
52146 if (BAD_ADDR(addr))
52147 return addr;
52148 }
52149- current->mm->start_brk = current->mm->brk = end;
52150+ current->mm->start_brk = current->mm->brk = e;
52151 return 0;
52152 }
52153
52154@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52155 elf_addr_t __user *u_rand_bytes;
52156 const char *k_platform = ELF_PLATFORM;
52157 const char *k_base_platform = ELF_BASE_PLATFORM;
52158- unsigned char k_rand_bytes[16];
52159+ u32 k_rand_bytes[4];
52160 int items;
52161 elf_addr_t *elf_info;
52162 int ei_index = 0;
52163 const struct cred *cred = current_cred();
52164 struct vm_area_struct *vma;
52165+ unsigned long saved_auxv[AT_VECTOR_SIZE];
52166
52167 /*
52168 * In some cases (e.g. Hyper-Threading), we want to avoid L1
52169@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52170 * Generate 16 random bytes for userspace PRNG seeding.
52171 */
52172 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
52173- u_rand_bytes = (elf_addr_t __user *)
52174- STACK_ALLOC(p, sizeof(k_rand_bytes));
52175+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
52176+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
52177+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
52178+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
52179+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
52180+ u_rand_bytes = (elf_addr_t __user *) p;
52181 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
52182 return -EFAULT;
52183
52184@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52185 return -EFAULT;
52186 current->mm->env_end = p;
52187
52188+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
52189+
52190 /* Put the elf_info on the stack in the right place. */
52191 sp = (elf_addr_t __user *)envp + 1;
52192- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
52193+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
52194 return -EFAULT;
52195 return 0;
52196 }
52197@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
52198 an ELF header */
52199
52200 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52201- struct file *interpreter, unsigned long *interp_map_addr,
52202- unsigned long no_base)
52203+ struct file *interpreter, unsigned long no_base)
52204 {
52205 struct elf_phdr *elf_phdata;
52206 struct elf_phdr *eppnt;
52207- unsigned long load_addr = 0;
52208+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
52209 int load_addr_set = 0;
52210 unsigned long last_bss = 0, elf_bss = 0;
52211- unsigned long error = ~0UL;
52212+ unsigned long error = -EINVAL;
52213 unsigned long total_size;
52214 int retval, i, size;
52215
52216@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52217 goto out_close;
52218 }
52219
52220+#ifdef CONFIG_PAX_SEGMEXEC
52221+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
52222+ pax_task_size = SEGMEXEC_TASK_SIZE;
52223+#endif
52224+
52225 eppnt = elf_phdata;
52226 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
52227 if (eppnt->p_type == PT_LOAD) {
52228@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52229 map_addr = elf_map(interpreter, load_addr + vaddr,
52230 eppnt, elf_prot, elf_type, total_size);
52231 total_size = 0;
52232- if (!*interp_map_addr)
52233- *interp_map_addr = map_addr;
52234 error = map_addr;
52235 if (BAD_ADDR(map_addr))
52236 goto out_close;
52237@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52238 k = load_addr + eppnt->p_vaddr;
52239 if (BAD_ADDR(k) ||
52240 eppnt->p_filesz > eppnt->p_memsz ||
52241- eppnt->p_memsz > TASK_SIZE ||
52242- TASK_SIZE - eppnt->p_memsz < k) {
52243+ eppnt->p_memsz > pax_task_size ||
52244+ pax_task_size - eppnt->p_memsz < k) {
52245 error = -ENOMEM;
52246 goto out_close;
52247 }
52248@@ -538,6 +567,315 @@ out:
52249 return error;
52250 }
52251
52252+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52253+#ifdef CONFIG_PAX_SOFTMODE
52254+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
52255+{
52256+ unsigned long pax_flags = 0UL;
52257+
52258+#ifdef CONFIG_PAX_PAGEEXEC
52259+ if (elf_phdata->p_flags & PF_PAGEEXEC)
52260+ pax_flags |= MF_PAX_PAGEEXEC;
52261+#endif
52262+
52263+#ifdef CONFIG_PAX_SEGMEXEC
52264+ if (elf_phdata->p_flags & PF_SEGMEXEC)
52265+ pax_flags |= MF_PAX_SEGMEXEC;
52266+#endif
52267+
52268+#ifdef CONFIG_PAX_EMUTRAMP
52269+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
52270+ pax_flags |= MF_PAX_EMUTRAMP;
52271+#endif
52272+
52273+#ifdef CONFIG_PAX_MPROTECT
52274+ if (elf_phdata->p_flags & PF_MPROTECT)
52275+ pax_flags |= MF_PAX_MPROTECT;
52276+#endif
52277+
52278+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52279+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
52280+ pax_flags |= MF_PAX_RANDMMAP;
52281+#endif
52282+
52283+ return pax_flags;
52284+}
52285+#endif
52286+
52287+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
52288+{
52289+ unsigned long pax_flags = 0UL;
52290+
52291+#ifdef CONFIG_PAX_PAGEEXEC
52292+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
52293+ pax_flags |= MF_PAX_PAGEEXEC;
52294+#endif
52295+
52296+#ifdef CONFIG_PAX_SEGMEXEC
52297+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
52298+ pax_flags |= MF_PAX_SEGMEXEC;
52299+#endif
52300+
52301+#ifdef CONFIG_PAX_EMUTRAMP
52302+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
52303+ pax_flags |= MF_PAX_EMUTRAMP;
52304+#endif
52305+
52306+#ifdef CONFIG_PAX_MPROTECT
52307+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
52308+ pax_flags |= MF_PAX_MPROTECT;
52309+#endif
52310+
52311+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52312+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
52313+ pax_flags |= MF_PAX_RANDMMAP;
52314+#endif
52315+
52316+ return pax_flags;
52317+}
52318+#endif
52319+
52320+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
52321+#ifdef CONFIG_PAX_SOFTMODE
52322+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
52323+{
52324+ unsigned long pax_flags = 0UL;
52325+
52326+#ifdef CONFIG_PAX_PAGEEXEC
52327+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
52328+ pax_flags |= MF_PAX_PAGEEXEC;
52329+#endif
52330+
52331+#ifdef CONFIG_PAX_SEGMEXEC
52332+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
52333+ pax_flags |= MF_PAX_SEGMEXEC;
52334+#endif
52335+
52336+#ifdef CONFIG_PAX_EMUTRAMP
52337+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
52338+ pax_flags |= MF_PAX_EMUTRAMP;
52339+#endif
52340+
52341+#ifdef CONFIG_PAX_MPROTECT
52342+ if (pax_flags_softmode & MF_PAX_MPROTECT)
52343+ pax_flags |= MF_PAX_MPROTECT;
52344+#endif
52345+
52346+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52347+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
52348+ pax_flags |= MF_PAX_RANDMMAP;
52349+#endif
52350+
52351+ return pax_flags;
52352+}
52353+#endif
52354+
52355+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
52356+{
52357+ unsigned long pax_flags = 0UL;
52358+
52359+#ifdef CONFIG_PAX_PAGEEXEC
52360+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
52361+ pax_flags |= MF_PAX_PAGEEXEC;
52362+#endif
52363+
52364+#ifdef CONFIG_PAX_SEGMEXEC
52365+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
52366+ pax_flags |= MF_PAX_SEGMEXEC;
52367+#endif
52368+
52369+#ifdef CONFIG_PAX_EMUTRAMP
52370+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
52371+ pax_flags |= MF_PAX_EMUTRAMP;
52372+#endif
52373+
52374+#ifdef CONFIG_PAX_MPROTECT
52375+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
52376+ pax_flags |= MF_PAX_MPROTECT;
52377+#endif
52378+
52379+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52380+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
52381+ pax_flags |= MF_PAX_RANDMMAP;
52382+#endif
52383+
52384+ return pax_flags;
52385+}
52386+#endif
52387+
52388+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52389+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
52390+{
52391+ unsigned long pax_flags = 0UL;
52392+
52393+#ifdef CONFIG_PAX_EI_PAX
52394+
52395+#ifdef CONFIG_PAX_PAGEEXEC
52396+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
52397+ pax_flags |= MF_PAX_PAGEEXEC;
52398+#endif
52399+
52400+#ifdef CONFIG_PAX_SEGMEXEC
52401+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
52402+ pax_flags |= MF_PAX_SEGMEXEC;
52403+#endif
52404+
52405+#ifdef CONFIG_PAX_EMUTRAMP
52406+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
52407+ pax_flags |= MF_PAX_EMUTRAMP;
52408+#endif
52409+
52410+#ifdef CONFIG_PAX_MPROTECT
52411+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
52412+ pax_flags |= MF_PAX_MPROTECT;
52413+#endif
52414+
52415+#ifdef CONFIG_PAX_ASLR
52416+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
52417+ pax_flags |= MF_PAX_RANDMMAP;
52418+#endif
52419+
52420+#else
52421+
52422+#ifdef CONFIG_PAX_PAGEEXEC
52423+ pax_flags |= MF_PAX_PAGEEXEC;
52424+#endif
52425+
52426+#ifdef CONFIG_PAX_SEGMEXEC
52427+ pax_flags |= MF_PAX_SEGMEXEC;
52428+#endif
52429+
52430+#ifdef CONFIG_PAX_MPROTECT
52431+ pax_flags |= MF_PAX_MPROTECT;
52432+#endif
52433+
52434+#ifdef CONFIG_PAX_RANDMMAP
52435+ if (randomize_va_space)
52436+ pax_flags |= MF_PAX_RANDMMAP;
52437+#endif
52438+
52439+#endif
52440+
52441+ return pax_flags;
52442+}
52443+
52444+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
52445+{
52446+
52447+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52448+ unsigned long i;
52449+
52450+ for (i = 0UL; i < elf_ex->e_phnum; i++)
52451+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
52452+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
52453+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
52454+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
52455+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
52456+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
52457+ return ~0UL;
52458+
52459+#ifdef CONFIG_PAX_SOFTMODE
52460+ if (pax_softmode)
52461+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
52462+ else
52463+#endif
52464+
52465+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
52466+ break;
52467+ }
52468+#endif
52469+
52470+ return ~0UL;
52471+}
52472+
52473+static unsigned long pax_parse_xattr_pax(struct file * const file)
52474+{
52475+
52476+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
52477+ ssize_t xattr_size, i;
52478+ unsigned char xattr_value[sizeof("pemrs") - 1];
52479+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
52480+
52481+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
52482+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
52483+ return ~0UL;
52484+
52485+ for (i = 0; i < xattr_size; i++)
52486+ switch (xattr_value[i]) {
52487+ default:
52488+ return ~0UL;
52489+
52490+#define parse_flag(option1, option2, flag) \
52491+ case option1: \
52492+ if (pax_flags_hardmode & MF_PAX_##flag) \
52493+ return ~0UL; \
52494+ pax_flags_hardmode |= MF_PAX_##flag; \
52495+ break; \
52496+ case option2: \
52497+ if (pax_flags_softmode & MF_PAX_##flag) \
52498+ return ~0UL; \
52499+ pax_flags_softmode |= MF_PAX_##flag; \
52500+ break;
52501+
52502+ parse_flag('p', 'P', PAGEEXEC);
52503+ parse_flag('e', 'E', EMUTRAMP);
52504+ parse_flag('m', 'M', MPROTECT);
52505+ parse_flag('r', 'R', RANDMMAP);
52506+ parse_flag('s', 'S', SEGMEXEC);
52507+
52508+#undef parse_flag
52509+ }
52510+
52511+ if (pax_flags_hardmode & pax_flags_softmode)
52512+ return ~0UL;
52513+
52514+#ifdef CONFIG_PAX_SOFTMODE
52515+ if (pax_softmode)
52516+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
52517+ else
52518+#endif
52519+
52520+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
52521+#else
52522+ return ~0UL;
52523+#endif
52524+
52525+}
52526+
52527+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
52528+{
52529+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
52530+
52531+ pax_flags = pax_parse_ei_pax(elf_ex);
52532+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
52533+ xattr_pax_flags = pax_parse_xattr_pax(file);
52534+
52535+ if (pt_pax_flags == ~0UL)
52536+ pt_pax_flags = xattr_pax_flags;
52537+ else if (xattr_pax_flags == ~0UL)
52538+ xattr_pax_flags = pt_pax_flags;
52539+ if (pt_pax_flags != xattr_pax_flags)
52540+ return -EINVAL;
52541+ if (pt_pax_flags != ~0UL)
52542+ pax_flags = pt_pax_flags;
52543+
52544+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
52545+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52546+ if ((__supported_pte_mask & _PAGE_NX))
52547+ pax_flags &= ~MF_PAX_SEGMEXEC;
52548+ else
52549+ pax_flags &= ~MF_PAX_PAGEEXEC;
52550+ }
52551+#endif
52552+
52553+ if (0 > pax_check_flags(&pax_flags))
52554+ return -EINVAL;
52555+
52556+ current->mm->pax_flags = pax_flags;
52557+ return 0;
52558+}
52559+#endif
52560+
52561 /*
52562 * These are the functions used to load ELF style executables and shared
52563 * libraries. There is no binary dependent code anywhere else.
52564@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
52565 {
52566 unsigned int random_variable = 0;
52567
52568+#ifdef CONFIG_PAX_RANDUSTACK
52569+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
52570+ return stack_top - current->mm->delta_stack;
52571+#endif
52572+
52573 if ((current->flags & PF_RANDOMIZE) &&
52574 !(current->personality & ADDR_NO_RANDOMIZE)) {
52575 random_variable = get_random_int() & STACK_RND_MASK;
52576@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
52577 unsigned long load_addr = 0, load_bias = 0;
52578 int load_addr_set = 0;
52579 char * elf_interpreter = NULL;
52580- unsigned long error;
52581+ unsigned long error = 0;
52582 struct elf_phdr *elf_ppnt, *elf_phdata;
52583 unsigned long elf_bss, elf_brk;
52584 int retval, i;
52585@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
52586 unsigned long start_code, end_code, start_data, end_data;
52587 unsigned long reloc_func_desc __maybe_unused = 0;
52588 int executable_stack = EXSTACK_DEFAULT;
52589- unsigned long def_flags = 0;
52590 struct pt_regs *regs = current_pt_regs();
52591 struct {
52592 struct elfhdr elf_ex;
52593 struct elfhdr interp_elf_ex;
52594 } *loc;
52595+ unsigned long pax_task_size = TASK_SIZE;
52596
52597 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
52598 if (!loc) {
52599@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
52600 goto out_free_dentry;
52601
52602 /* OK, This is the point of no return */
52603- current->mm->def_flags = def_flags;
52604+
52605+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52606+ current->mm->pax_flags = 0UL;
52607+#endif
52608+
52609+#ifdef CONFIG_PAX_DLRESOLVE
52610+ current->mm->call_dl_resolve = 0UL;
52611+#endif
52612+
52613+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
52614+ current->mm->call_syscall = 0UL;
52615+#endif
52616+
52617+#ifdef CONFIG_PAX_ASLR
52618+ current->mm->delta_mmap = 0UL;
52619+ current->mm->delta_stack = 0UL;
52620+#endif
52621+
52622+ current->mm->def_flags = 0;
52623+
52624+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52625+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
52626+ send_sig(SIGKILL, current, 0);
52627+ goto out_free_dentry;
52628+ }
52629+#endif
52630+
52631+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52632+ pax_set_initial_flags(bprm);
52633+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
52634+ if (pax_set_initial_flags_func)
52635+ (pax_set_initial_flags_func)(bprm);
52636+#endif
52637+
52638+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52639+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
52640+ current->mm->context.user_cs_limit = PAGE_SIZE;
52641+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
52642+ }
52643+#endif
52644+
52645+#ifdef CONFIG_PAX_SEGMEXEC
52646+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
52647+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
52648+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
52649+ pax_task_size = SEGMEXEC_TASK_SIZE;
52650+ current->mm->def_flags |= VM_NOHUGEPAGE;
52651+ }
52652+#endif
52653+
52654+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
52655+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52656+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
52657+ put_cpu();
52658+ }
52659+#endif
52660
52661 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
52662 may depend on the personality. */
52663 SET_PERSONALITY(loc->elf_ex);
52664+
52665+#ifdef CONFIG_PAX_ASLR
52666+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
52667+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
52668+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
52669+ }
52670+#endif
52671+
52672+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
52673+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52674+ executable_stack = EXSTACK_DISABLE_X;
52675+ current->personality &= ~READ_IMPLIES_EXEC;
52676+ } else
52677+#endif
52678+
52679 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
52680 current->personality |= READ_IMPLIES_EXEC;
52681
52682@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
52683 #else
52684 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
52685 #endif
52686+
52687+#ifdef CONFIG_PAX_RANDMMAP
52688+ /* PaX: randomize base address at the default exe base if requested */
52689+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
52690+#ifdef CONFIG_SPARC64
52691+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
52692+#else
52693+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
52694+#endif
52695+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
52696+ elf_flags |= MAP_FIXED;
52697+ }
52698+#endif
52699+
52700 }
52701
52702 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
52703@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
52704 * allowed task size. Note that p_filesz must always be
52705 * <= p_memsz so it is only necessary to check p_memsz.
52706 */
52707- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
52708- elf_ppnt->p_memsz > TASK_SIZE ||
52709- TASK_SIZE - elf_ppnt->p_memsz < k) {
52710+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
52711+ elf_ppnt->p_memsz > pax_task_size ||
52712+ pax_task_size - elf_ppnt->p_memsz < k) {
52713 /* set_brk can never work. Avoid overflows. */
52714 send_sig(SIGKILL, current, 0);
52715 retval = -EINVAL;
52716@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
52717 goto out_free_dentry;
52718 }
52719 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
52720- send_sig(SIGSEGV, current, 0);
52721- retval = -EFAULT; /* Nobody gets to see this, but.. */
52722- goto out_free_dentry;
52723+ /*
52724+ * This bss-zeroing can fail if the ELF
52725+ * file specifies odd protections. So
52726+ * we don't check the return value
52727+ */
52728 }
52729
52730+#ifdef CONFIG_PAX_RANDMMAP
52731+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
52732+ unsigned long start, size, flags;
52733+ vm_flags_t vm_flags;
52734+
52735+ start = ELF_PAGEALIGN(elf_brk);
52736+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
52737+ flags = MAP_FIXED | MAP_PRIVATE;
52738+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
52739+
52740+ down_write(&current->mm->mmap_sem);
52741+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
52742+ retval = -ENOMEM;
52743+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
52744+// if (current->personality & ADDR_NO_RANDOMIZE)
52745+// vm_flags |= VM_READ | VM_MAYREAD;
52746+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
52747+ retval = IS_ERR_VALUE(start) ? start : 0;
52748+ }
52749+ up_write(&current->mm->mmap_sem);
52750+ if (retval == 0)
52751+ retval = set_brk(start + size, start + size + PAGE_SIZE);
52752+ if (retval < 0) {
52753+ send_sig(SIGKILL, current, 0);
52754+ goto out_free_dentry;
52755+ }
52756+ }
52757+#endif
52758+
52759 if (elf_interpreter) {
52760- unsigned long interp_map_addr = 0;
52761-
52762 elf_entry = load_elf_interp(&loc->interp_elf_ex,
52763 interpreter,
52764- &interp_map_addr,
52765 load_bias);
52766 if (!IS_ERR((void *)elf_entry)) {
52767 /*
52768@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
52769 * Decide what to dump of a segment, part, all or none.
52770 */
52771 static unsigned long vma_dump_size(struct vm_area_struct *vma,
52772- unsigned long mm_flags)
52773+ unsigned long mm_flags, long signr)
52774 {
52775 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
52776
52777@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
52778 if (vma->vm_file == NULL)
52779 return 0;
52780
52781- if (FILTER(MAPPED_PRIVATE))
52782+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
52783 goto whole;
52784
52785 /*
52786@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
52787 {
52788 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
52789 int i = 0;
52790- do
52791+ do {
52792 i += 2;
52793- while (auxv[i - 2] != AT_NULL);
52794+ } while (auxv[i - 2] != AT_NULL);
52795 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
52796 }
52797
52798@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
52799 {
52800 mm_segment_t old_fs = get_fs();
52801 set_fs(KERNEL_DS);
52802- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
52803+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
52804 set_fs(old_fs);
52805 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
52806 }
52807@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
52808 }
52809
52810 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
52811- unsigned long mm_flags)
52812+ struct coredump_params *cprm)
52813 {
52814 struct vm_area_struct *vma;
52815 size_t size = 0;
52816
52817 for (vma = first_vma(current, gate_vma); vma != NULL;
52818 vma = next_vma(vma, gate_vma))
52819- size += vma_dump_size(vma, mm_flags);
52820+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52821 return size;
52822 }
52823
52824@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52825
52826 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
52827
52828- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
52829+ offset += elf_core_vma_data_size(gate_vma, cprm);
52830 offset += elf_core_extra_data_size();
52831 e_shoff = offset;
52832
52833@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
52834 offset = dataoff;
52835
52836 size += sizeof(*elf);
52837+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52838 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
52839 goto end_coredump;
52840
52841 size += sizeof(*phdr4note);
52842+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52843 if (size > cprm->limit
52844 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
52845 goto end_coredump;
52846@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52847 phdr.p_offset = offset;
52848 phdr.p_vaddr = vma->vm_start;
52849 phdr.p_paddr = 0;
52850- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
52851+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52852 phdr.p_memsz = vma->vm_end - vma->vm_start;
52853 offset += phdr.p_filesz;
52854 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
52855@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52856 phdr.p_align = ELF_EXEC_PAGESIZE;
52857
52858 size += sizeof(phdr);
52859+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52860 if (size > cprm->limit
52861 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
52862 goto end_coredump;
52863@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52864 unsigned long addr;
52865 unsigned long end;
52866
52867- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
52868+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52869
52870 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
52871 struct page *page;
52872@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52873 page = get_dump_page(addr);
52874 if (page) {
52875 void *kaddr = kmap(page);
52876+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
52877 stop = ((size += PAGE_SIZE) > cprm->limit) ||
52878 !dump_write(cprm->file, kaddr,
52879 PAGE_SIZE);
52880@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52881
52882 if (e_phnum == PN_XNUM) {
52883 size += sizeof(*shdr4extnum);
52884+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52885 if (size > cprm->limit
52886 || !dump_write(cprm->file, shdr4extnum,
52887 sizeof(*shdr4extnum)))
52888@@ -2231,6 +2691,167 @@ out:
52889
52890 #endif /* CONFIG_ELF_CORE */
52891
52892+#ifdef CONFIG_PAX_MPROTECT
52893+/* PaX: non-PIC ELF libraries need relocations on their executable segments
52894+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
52895+ * we'll remove VM_MAYWRITE for good on RELRO segments.
52896+ *
52897+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
52898+ * basis because we want to allow the common case and not the special ones.
52899+ */
52900+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
52901+{
52902+ struct elfhdr elf_h;
52903+ struct elf_phdr elf_p;
52904+ unsigned long i;
52905+ unsigned long oldflags;
52906+ bool is_textrel_rw, is_textrel_rx, is_relro;
52907+
52908+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
52909+ return;
52910+
52911+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
52912+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
52913+
52914+#ifdef CONFIG_PAX_ELFRELOCS
52915+ /* possible TEXTREL */
52916+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
52917+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
52918+#else
52919+ is_textrel_rw = false;
52920+ is_textrel_rx = false;
52921+#endif
52922+
52923+ /* possible RELRO */
52924+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
52925+
52926+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
52927+ return;
52928+
52929+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
52930+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
52931+
52932+#ifdef CONFIG_PAX_ETEXECRELOCS
52933+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
52934+#else
52935+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
52936+#endif
52937+
52938+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
52939+ !elf_check_arch(&elf_h) ||
52940+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
52941+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
52942+ return;
52943+
52944+ for (i = 0UL; i < elf_h.e_phnum; i++) {
52945+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
52946+ return;
52947+ switch (elf_p.p_type) {
52948+ case PT_DYNAMIC:
52949+ if (!is_textrel_rw && !is_textrel_rx)
52950+ continue;
52951+ i = 0UL;
52952+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
52953+ elf_dyn dyn;
52954+
52955+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
52956+ break;
52957+ if (dyn.d_tag == DT_NULL)
52958+ break;
52959+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
52960+ gr_log_textrel(vma);
52961+ if (is_textrel_rw)
52962+ vma->vm_flags |= VM_MAYWRITE;
52963+ else
52964+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
52965+ vma->vm_flags &= ~VM_MAYWRITE;
52966+ break;
52967+ }
52968+ i++;
52969+ }
52970+ is_textrel_rw = false;
52971+ is_textrel_rx = false;
52972+ continue;
52973+
52974+ case PT_GNU_RELRO:
52975+ if (!is_relro)
52976+ continue;
52977+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
52978+ vma->vm_flags &= ~VM_MAYWRITE;
52979+ is_relro = false;
52980+ continue;
52981+
52982+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52983+ case PT_PAX_FLAGS: {
52984+ const char *msg_mprotect = "", *msg_emutramp = "";
52985+ char *buffer_lib, *buffer_exe;
52986+
52987+ if (elf_p.p_flags & PF_NOMPROTECT)
52988+ msg_mprotect = "MPROTECT disabled";
52989+
52990+#ifdef CONFIG_PAX_EMUTRAMP
52991+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
52992+ msg_emutramp = "EMUTRAMP enabled";
52993+#endif
52994+
52995+ if (!msg_mprotect[0] && !msg_emutramp[0])
52996+ continue;
52997+
52998+ if (!printk_ratelimit())
52999+ continue;
53000+
53001+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
53002+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
53003+ if (buffer_lib && buffer_exe) {
53004+ char *path_lib, *path_exe;
53005+
53006+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
53007+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
53008+
53009+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
53010+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
53011+
53012+ }
53013+ free_page((unsigned long)buffer_exe);
53014+ free_page((unsigned long)buffer_lib);
53015+ continue;
53016+ }
53017+#endif
53018+
53019+ }
53020+ }
53021+}
53022+#endif
53023+
53024+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53025+
53026+extern int grsec_enable_log_rwxmaps;
53027+
53028+static void elf_handle_mmap(struct file *file)
53029+{
53030+ struct elfhdr elf_h;
53031+ struct elf_phdr elf_p;
53032+ unsigned long i;
53033+
53034+ if (!grsec_enable_log_rwxmaps)
53035+ return;
53036+
53037+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
53038+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
53039+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
53040+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
53041+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
53042+ return;
53043+
53044+ for (i = 0UL; i < elf_h.e_phnum; i++) {
53045+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
53046+ return;
53047+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
53048+ gr_log_ptgnustack(file);
53049+ }
53050+}
53051+#endif
53052+
53053 static int __init init_elf_binfmt(void)
53054 {
53055 register_binfmt(&elf_format);
53056diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
53057index d50bbe5..af3b649 100644
53058--- a/fs/binfmt_flat.c
53059+++ b/fs/binfmt_flat.c
53060@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
53061 realdatastart = (unsigned long) -ENOMEM;
53062 printk("Unable to allocate RAM for process data, errno %d\n",
53063 (int)-realdatastart);
53064+ down_write(&current->mm->mmap_sem);
53065 vm_munmap(textpos, text_len);
53066+ up_write(&current->mm->mmap_sem);
53067 ret = realdatastart;
53068 goto err;
53069 }
53070@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
53071 }
53072 if (IS_ERR_VALUE(result)) {
53073 printk("Unable to read data+bss, errno %d\n", (int)-result);
53074+ down_write(&current->mm->mmap_sem);
53075 vm_munmap(textpos, text_len);
53076 vm_munmap(realdatastart, len);
53077+ up_write(&current->mm->mmap_sem);
53078 ret = result;
53079 goto err;
53080 }
53081@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
53082 }
53083 if (IS_ERR_VALUE(result)) {
53084 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
53085+ down_write(&current->mm->mmap_sem);
53086 vm_munmap(textpos, text_len + data_len + extra +
53087 MAX_SHARED_LIBS * sizeof(unsigned long));
53088+ up_write(&current->mm->mmap_sem);
53089 ret = result;
53090 goto err;
53091 }
53092diff --git a/fs/bio.c b/fs/bio.c
53093index c5eae72..599e3cf 100644
53094--- a/fs/bio.c
53095+++ b/fs/bio.c
53096@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
53097 /*
53098 * Overflow, abort
53099 */
53100- if (end < start)
53101+ if (end < start || end - start > INT_MAX - nr_pages)
53102 return ERR_PTR(-EINVAL);
53103
53104 nr_pages += end - start;
53105@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
53106 /*
53107 * Overflow, abort
53108 */
53109- if (end < start)
53110+ if (end < start || end - start > INT_MAX - nr_pages)
53111 return ERR_PTR(-EINVAL);
53112
53113 nr_pages += end - start;
53114@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
53115 const int read = bio_data_dir(bio) == READ;
53116 struct bio_map_data *bmd = bio->bi_private;
53117 int i;
53118- char *p = bmd->sgvecs[0].iov_base;
53119+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
53120
53121 bio_for_each_segment_all(bvec, bio, i) {
53122 char *addr = page_address(bvec->bv_page);
53123diff --git a/fs/block_dev.c b/fs/block_dev.c
53124index 85f5c85..d6f0b1a 100644
53125--- a/fs/block_dev.c
53126+++ b/fs/block_dev.c
53127@@ -658,7 +658,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
53128 else if (bdev->bd_contains == bdev)
53129 return true; /* is a whole device which isn't held */
53130
53131- else if (whole->bd_holder == bd_may_claim)
53132+ else if (whole->bd_holder == (void *)bd_may_claim)
53133 return true; /* is a partition of a device that is being partitioned */
53134 else if (whole->bd_holder != NULL)
53135 return false; /* is a partition of a held device */
53136diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
53137index 7fb054b..ad36c67 100644
53138--- a/fs/btrfs/ctree.c
53139+++ b/fs/btrfs/ctree.c
53140@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
53141 free_extent_buffer(buf);
53142 add_root_to_dirty_list(root);
53143 } else {
53144- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
53145- parent_start = parent->start;
53146- else
53147+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
53148+ if (parent)
53149+ parent_start = parent->start;
53150+ else
53151+ parent_start = 0;
53152+ } else
53153 parent_start = 0;
53154
53155 WARN_ON(trans->transid != btrfs_header_generation(parent));
53156diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
53157index 0f81d67..0ad55fe 100644
53158--- a/fs/btrfs/ioctl.c
53159+++ b/fs/btrfs/ioctl.c
53160@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
53161 for (i = 0; i < num_types; i++) {
53162 struct btrfs_space_info *tmp;
53163
53164+ /* Don't copy in more than we allocated */
53165 if (!slot_count)
53166 break;
53167
53168+ slot_count--;
53169+
53170 info = NULL;
53171 rcu_read_lock();
53172 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
53173@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
53174 memcpy(dest, &space, sizeof(space));
53175 dest++;
53176 space_args.total_spaces++;
53177- slot_count--;
53178 }
53179- if (!slot_count)
53180- break;
53181 }
53182 up_read(&info->groups_sem);
53183 }
53184diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
53185index f0857e0..e7023c5 100644
53186--- a/fs/btrfs/super.c
53187+++ b/fs/btrfs/super.c
53188@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
53189 function, line, errstr);
53190 return;
53191 }
53192- ACCESS_ONCE(trans->transaction->aborted) = errno;
53193+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
53194 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
53195 }
53196 /*
53197diff --git a/fs/buffer.c b/fs/buffer.c
53198index d2a4d1b..df798ca 100644
53199--- a/fs/buffer.c
53200+++ b/fs/buffer.c
53201@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
53202 bh_cachep = kmem_cache_create("buffer_head",
53203 sizeof(struct buffer_head), 0,
53204 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
53205- SLAB_MEM_SPREAD),
53206+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
53207 NULL);
53208
53209 /*
53210diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
53211index 622f469..e8d2d55 100644
53212--- a/fs/cachefiles/bind.c
53213+++ b/fs/cachefiles/bind.c
53214@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
53215 args);
53216
53217 /* start by checking things over */
53218- ASSERT(cache->fstop_percent >= 0 &&
53219- cache->fstop_percent < cache->fcull_percent &&
53220+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
53221 cache->fcull_percent < cache->frun_percent &&
53222 cache->frun_percent < 100);
53223
53224- ASSERT(cache->bstop_percent >= 0 &&
53225- cache->bstop_percent < cache->bcull_percent &&
53226+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
53227 cache->bcull_percent < cache->brun_percent &&
53228 cache->brun_percent < 100);
53229
53230diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
53231index 0a1467b..6a53245 100644
53232--- a/fs/cachefiles/daemon.c
53233+++ b/fs/cachefiles/daemon.c
53234@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
53235 if (n > buflen)
53236 return -EMSGSIZE;
53237
53238- if (copy_to_user(_buffer, buffer, n) != 0)
53239+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
53240 return -EFAULT;
53241
53242 return n;
53243@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
53244 if (test_bit(CACHEFILES_DEAD, &cache->flags))
53245 return -EIO;
53246
53247- if (datalen < 0 || datalen > PAGE_SIZE - 1)
53248+ if (datalen > PAGE_SIZE - 1)
53249 return -EOPNOTSUPP;
53250
53251 /* drag the command string into the kernel so we can parse it */
53252@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
53253 if (args[0] != '%' || args[1] != '\0')
53254 return -EINVAL;
53255
53256- if (fstop < 0 || fstop >= cache->fcull_percent)
53257+ if (fstop >= cache->fcull_percent)
53258 return cachefiles_daemon_range_error(cache, args);
53259
53260 cache->fstop_percent = fstop;
53261@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
53262 if (args[0] != '%' || args[1] != '\0')
53263 return -EINVAL;
53264
53265- if (bstop < 0 || bstop >= cache->bcull_percent)
53266+ if (bstop >= cache->bcull_percent)
53267 return cachefiles_daemon_range_error(cache, args);
53268
53269 cache->bstop_percent = bstop;
53270diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
53271index 4938251..7e01445 100644
53272--- a/fs/cachefiles/internal.h
53273+++ b/fs/cachefiles/internal.h
53274@@ -59,7 +59,7 @@ struct cachefiles_cache {
53275 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
53276 struct rb_root active_nodes; /* active nodes (can't be culled) */
53277 rwlock_t active_lock; /* lock for active_nodes */
53278- atomic_t gravecounter; /* graveyard uniquifier */
53279+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
53280 unsigned frun_percent; /* when to stop culling (% files) */
53281 unsigned fcull_percent; /* when to start culling (% files) */
53282 unsigned fstop_percent; /* when to stop allocating (% files) */
53283@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
53284 * proc.c
53285 */
53286 #ifdef CONFIG_CACHEFILES_HISTOGRAM
53287-extern atomic_t cachefiles_lookup_histogram[HZ];
53288-extern atomic_t cachefiles_mkdir_histogram[HZ];
53289-extern atomic_t cachefiles_create_histogram[HZ];
53290+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
53291+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
53292+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
53293
53294 extern int __init cachefiles_proc_init(void);
53295 extern void cachefiles_proc_cleanup(void);
53296 static inline
53297-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
53298+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
53299 {
53300 unsigned long jif = jiffies - start_jif;
53301 if (jif >= HZ)
53302 jif = HZ - 1;
53303- atomic_inc(&histogram[jif]);
53304+ atomic_inc_unchecked(&histogram[jif]);
53305 }
53306
53307 #else
53308diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
53309index 8c01c5fc..15f982e 100644
53310--- a/fs/cachefiles/namei.c
53311+++ b/fs/cachefiles/namei.c
53312@@ -317,7 +317,7 @@ try_again:
53313 /* first step is to make up a grave dentry in the graveyard */
53314 sprintf(nbuffer, "%08x%08x",
53315 (uint32_t) get_seconds(),
53316- (uint32_t) atomic_inc_return(&cache->gravecounter));
53317+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
53318
53319 /* do the multiway lock magic */
53320 trap = lock_rename(cache->graveyard, dir);
53321diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
53322index eccd339..4c1d995 100644
53323--- a/fs/cachefiles/proc.c
53324+++ b/fs/cachefiles/proc.c
53325@@ -14,9 +14,9 @@
53326 #include <linux/seq_file.h>
53327 #include "internal.h"
53328
53329-atomic_t cachefiles_lookup_histogram[HZ];
53330-atomic_t cachefiles_mkdir_histogram[HZ];
53331-atomic_t cachefiles_create_histogram[HZ];
53332+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
53333+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
53334+atomic_unchecked_t cachefiles_create_histogram[HZ];
53335
53336 /*
53337 * display the latency histogram
53338@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
53339 return 0;
53340 default:
53341 index = (unsigned long) v - 3;
53342- x = atomic_read(&cachefiles_lookup_histogram[index]);
53343- y = atomic_read(&cachefiles_mkdir_histogram[index]);
53344- z = atomic_read(&cachefiles_create_histogram[index]);
53345+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
53346+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
53347+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
53348 if (x == 0 && y == 0 && z == 0)
53349 return 0;
53350
53351diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
53352index 317f9ee..3d24511 100644
53353--- a/fs/cachefiles/rdwr.c
53354+++ b/fs/cachefiles/rdwr.c
53355@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
53356 old_fs = get_fs();
53357 set_fs(KERNEL_DS);
53358 ret = file->f_op->write(
53359- file, (const void __user *) data, len, &pos);
53360+ file, (const void __force_user *) data, len, &pos);
53361 set_fs(old_fs);
53362 kunmap(page);
53363 file_end_write(file);
53364diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
53365index f02d82b..2632cf86 100644
53366--- a/fs/ceph/dir.c
53367+++ b/fs/ceph/dir.c
53368@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
53369 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
53370 struct ceph_mds_client *mdsc = fsc->mdsc;
53371 unsigned frag = fpos_frag(filp->f_pos);
53372- int off = fpos_off(filp->f_pos);
53373+ unsigned int off = fpos_off(filp->f_pos);
53374 int err;
53375 u32 ftype;
53376 struct ceph_mds_reply_info_parsed *rinfo;
53377diff --git a/fs/ceph/super.c b/fs/ceph/super.c
53378index 7d377c9..3fb6559 100644
53379--- a/fs/ceph/super.c
53380+++ b/fs/ceph/super.c
53381@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
53382 /*
53383 * construct our own bdi so we can control readahead, etc.
53384 */
53385-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
53386+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
53387
53388 static int ceph_register_bdi(struct super_block *sb,
53389 struct ceph_fs_client *fsc)
53390@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
53391 default_backing_dev_info.ra_pages;
53392
53393 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
53394- atomic_long_inc_return(&bdi_seq));
53395+ atomic_long_inc_return_unchecked(&bdi_seq));
53396 if (!err)
53397 sb->s_bdi = &fsc->backing_dev_info;
53398 return err;
53399diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
53400index d597483..747901b 100644
53401--- a/fs/cifs/cifs_debug.c
53402+++ b/fs/cifs/cifs_debug.c
53403@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
53404
53405 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
53406 #ifdef CONFIG_CIFS_STATS2
53407- atomic_set(&totBufAllocCount, 0);
53408- atomic_set(&totSmBufAllocCount, 0);
53409+ atomic_set_unchecked(&totBufAllocCount, 0);
53410+ atomic_set_unchecked(&totSmBufAllocCount, 0);
53411 #endif /* CONFIG_CIFS_STATS2 */
53412 spin_lock(&cifs_tcp_ses_lock);
53413 list_for_each(tmp1, &cifs_tcp_ses_list) {
53414@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
53415 tcon = list_entry(tmp3,
53416 struct cifs_tcon,
53417 tcon_list);
53418- atomic_set(&tcon->num_smbs_sent, 0);
53419+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
53420 if (server->ops->clear_stats)
53421 server->ops->clear_stats(tcon);
53422 }
53423@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
53424 smBufAllocCount.counter, cifs_min_small);
53425 #ifdef CONFIG_CIFS_STATS2
53426 seq_printf(m, "Total Large %d Small %d Allocations\n",
53427- atomic_read(&totBufAllocCount),
53428- atomic_read(&totSmBufAllocCount));
53429+ atomic_read_unchecked(&totBufAllocCount),
53430+ atomic_read_unchecked(&totSmBufAllocCount));
53431 #endif /* CONFIG_CIFS_STATS2 */
53432
53433 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
53434@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
53435 if (tcon->need_reconnect)
53436 seq_puts(m, "\tDISCONNECTED ");
53437 seq_printf(m, "\nSMBs: %d",
53438- atomic_read(&tcon->num_smbs_sent));
53439+ atomic_read_unchecked(&tcon->num_smbs_sent));
53440 if (server->ops->print_stats)
53441 server->ops->print_stats(m, tcon);
53442 }
53443diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
53444index 3752b9f..8db5569 100644
53445--- a/fs/cifs/cifsfs.c
53446+++ b/fs/cifs/cifsfs.c
53447@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
53448 */
53449 cifs_req_cachep = kmem_cache_create("cifs_request",
53450 CIFSMaxBufSize + max_hdr_size, 0,
53451- SLAB_HWCACHE_ALIGN, NULL);
53452+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
53453 if (cifs_req_cachep == NULL)
53454 return -ENOMEM;
53455
53456@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
53457 efficient to alloc 1 per page off the slab compared to 17K (5page)
53458 alloc of large cifs buffers even when page debugging is on */
53459 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
53460- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
53461+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
53462 NULL);
53463 if (cifs_sm_req_cachep == NULL) {
53464 mempool_destroy(cifs_req_poolp);
53465@@ -1147,8 +1147,8 @@ init_cifs(void)
53466 atomic_set(&bufAllocCount, 0);
53467 atomic_set(&smBufAllocCount, 0);
53468 #ifdef CONFIG_CIFS_STATS2
53469- atomic_set(&totBufAllocCount, 0);
53470- atomic_set(&totSmBufAllocCount, 0);
53471+ atomic_set_unchecked(&totBufAllocCount, 0);
53472+ atomic_set_unchecked(&totSmBufAllocCount, 0);
53473 #endif /* CONFIG_CIFS_STATS2 */
53474
53475 atomic_set(&midCount, 0);
53476diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
53477index ea3a0b3..0194e39 100644
53478--- a/fs/cifs/cifsglob.h
53479+++ b/fs/cifs/cifsglob.h
53480@@ -752,35 +752,35 @@ struct cifs_tcon {
53481 __u16 Flags; /* optional support bits */
53482 enum statusEnum tidStatus;
53483 #ifdef CONFIG_CIFS_STATS
53484- atomic_t num_smbs_sent;
53485+ atomic_unchecked_t num_smbs_sent;
53486 union {
53487 struct {
53488- atomic_t num_writes;
53489- atomic_t num_reads;
53490- atomic_t num_flushes;
53491- atomic_t num_oplock_brks;
53492- atomic_t num_opens;
53493- atomic_t num_closes;
53494- atomic_t num_deletes;
53495- atomic_t num_mkdirs;
53496- atomic_t num_posixopens;
53497- atomic_t num_posixmkdirs;
53498- atomic_t num_rmdirs;
53499- atomic_t num_renames;
53500- atomic_t num_t2renames;
53501- atomic_t num_ffirst;
53502- atomic_t num_fnext;
53503- atomic_t num_fclose;
53504- atomic_t num_hardlinks;
53505- atomic_t num_symlinks;
53506- atomic_t num_locks;
53507- atomic_t num_acl_get;
53508- atomic_t num_acl_set;
53509+ atomic_unchecked_t num_writes;
53510+ atomic_unchecked_t num_reads;
53511+ atomic_unchecked_t num_flushes;
53512+ atomic_unchecked_t num_oplock_brks;
53513+ atomic_unchecked_t num_opens;
53514+ atomic_unchecked_t num_closes;
53515+ atomic_unchecked_t num_deletes;
53516+ atomic_unchecked_t num_mkdirs;
53517+ atomic_unchecked_t num_posixopens;
53518+ atomic_unchecked_t num_posixmkdirs;
53519+ atomic_unchecked_t num_rmdirs;
53520+ atomic_unchecked_t num_renames;
53521+ atomic_unchecked_t num_t2renames;
53522+ atomic_unchecked_t num_ffirst;
53523+ atomic_unchecked_t num_fnext;
53524+ atomic_unchecked_t num_fclose;
53525+ atomic_unchecked_t num_hardlinks;
53526+ atomic_unchecked_t num_symlinks;
53527+ atomic_unchecked_t num_locks;
53528+ atomic_unchecked_t num_acl_get;
53529+ atomic_unchecked_t num_acl_set;
53530 } cifs_stats;
53531 #ifdef CONFIG_CIFS_SMB2
53532 struct {
53533- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
53534- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
53535+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
53536+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
53537 } smb2_stats;
53538 #endif /* CONFIG_CIFS_SMB2 */
53539 } stats;
53540@@ -1081,7 +1081,7 @@ convert_delimiter(char *path, char delim)
53541 }
53542
53543 #ifdef CONFIG_CIFS_STATS
53544-#define cifs_stats_inc atomic_inc
53545+#define cifs_stats_inc atomic_inc_unchecked
53546
53547 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
53548 unsigned int bytes)
53549@@ -1446,8 +1446,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
53550 /* Various Debug counters */
53551 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
53552 #ifdef CONFIG_CIFS_STATS2
53553-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
53554-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
53555+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
53556+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
53557 #endif
53558 GLOBAL_EXTERN atomic_t smBufAllocCount;
53559 GLOBAL_EXTERN atomic_t midCount;
53560diff --git a/fs/cifs/link.c b/fs/cifs/link.c
53561index b83c3f5..6437caa 100644
53562--- a/fs/cifs/link.c
53563+++ b/fs/cifs/link.c
53564@@ -616,7 +616,7 @@ symlink_exit:
53565
53566 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
53567 {
53568- char *p = nd_get_link(nd);
53569+ const char *p = nd_get_link(nd);
53570 if (!IS_ERR(p))
53571 kfree(p);
53572 }
53573diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
53574index 1bec014..f329411 100644
53575--- a/fs/cifs/misc.c
53576+++ b/fs/cifs/misc.c
53577@@ -169,7 +169,7 @@ cifs_buf_get(void)
53578 memset(ret_buf, 0, buf_size + 3);
53579 atomic_inc(&bufAllocCount);
53580 #ifdef CONFIG_CIFS_STATS2
53581- atomic_inc(&totBufAllocCount);
53582+ atomic_inc_unchecked(&totBufAllocCount);
53583 #endif /* CONFIG_CIFS_STATS2 */
53584 }
53585
53586@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
53587 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
53588 atomic_inc(&smBufAllocCount);
53589 #ifdef CONFIG_CIFS_STATS2
53590- atomic_inc(&totSmBufAllocCount);
53591+ atomic_inc_unchecked(&totSmBufAllocCount);
53592 #endif /* CONFIG_CIFS_STATS2 */
53593
53594 }
53595diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
53596index 3efdb9d..e845a5e 100644
53597--- a/fs/cifs/smb1ops.c
53598+++ b/fs/cifs/smb1ops.c
53599@@ -591,27 +591,27 @@ static void
53600 cifs_clear_stats(struct cifs_tcon *tcon)
53601 {
53602 #ifdef CONFIG_CIFS_STATS
53603- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
53604- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
53605- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
53606- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
53607- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
53608- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
53609- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
53610- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
53611- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
53612- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
53613- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
53614- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
53615- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
53616- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
53617- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
53618- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
53619- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
53620- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
53621- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
53622- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
53623- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
53624+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
53625+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
53626+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
53627+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
53628+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
53629+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
53630+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
53631+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
53632+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
53633+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
53634+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
53635+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
53636+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
53637+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
53638+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
53639+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
53640+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
53641+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
53642+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
53643+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
53644+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
53645 #endif
53646 }
53647
53648@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
53649 {
53650 #ifdef CONFIG_CIFS_STATS
53651 seq_printf(m, " Oplocks breaks: %d",
53652- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
53653+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
53654 seq_printf(m, "\nReads: %d Bytes: %llu",
53655- atomic_read(&tcon->stats.cifs_stats.num_reads),
53656+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
53657 (long long)(tcon->bytes_read));
53658 seq_printf(m, "\nWrites: %d Bytes: %llu",
53659- atomic_read(&tcon->stats.cifs_stats.num_writes),
53660+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
53661 (long long)(tcon->bytes_written));
53662 seq_printf(m, "\nFlushes: %d",
53663- atomic_read(&tcon->stats.cifs_stats.num_flushes));
53664+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
53665 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
53666- atomic_read(&tcon->stats.cifs_stats.num_locks),
53667- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
53668- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
53669+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
53670+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
53671+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
53672 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
53673- atomic_read(&tcon->stats.cifs_stats.num_opens),
53674- atomic_read(&tcon->stats.cifs_stats.num_closes),
53675- atomic_read(&tcon->stats.cifs_stats.num_deletes));
53676+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
53677+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
53678+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
53679 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
53680- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
53681- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
53682+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
53683+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
53684 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
53685- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
53686- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
53687+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
53688+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
53689 seq_printf(m, "\nRenames: %d T2 Renames %d",
53690- atomic_read(&tcon->stats.cifs_stats.num_renames),
53691- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
53692+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
53693+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
53694 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
53695- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
53696- atomic_read(&tcon->stats.cifs_stats.num_fnext),
53697- atomic_read(&tcon->stats.cifs_stats.num_fclose));
53698+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
53699+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
53700+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
53701 #endif
53702 }
53703
53704diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
53705index f2e76f3..c44fac7 100644
53706--- a/fs/cifs/smb2ops.c
53707+++ b/fs/cifs/smb2ops.c
53708@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
53709 #ifdef CONFIG_CIFS_STATS
53710 int i;
53711 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
53712- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
53713- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
53714+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
53715+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
53716 }
53717 #endif
53718 }
53719@@ -284,66 +284,66 @@ static void
53720 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
53721 {
53722 #ifdef CONFIG_CIFS_STATS
53723- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
53724- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
53725+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
53726+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
53727 seq_printf(m, "\nNegotiates: %d sent %d failed",
53728- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
53729- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
53730+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
53731+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
53732 seq_printf(m, "\nSessionSetups: %d sent %d failed",
53733- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
53734- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
53735+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
53736+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
53737 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
53738 seq_printf(m, "\nLogoffs: %d sent %d failed",
53739- atomic_read(&sent[SMB2_LOGOFF_HE]),
53740- atomic_read(&failed[SMB2_LOGOFF_HE]));
53741+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
53742+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
53743 seq_printf(m, "\nTreeConnects: %d sent %d failed",
53744- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
53745- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
53746+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
53747+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
53748 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
53749- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
53750- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
53751+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
53752+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
53753 seq_printf(m, "\nCreates: %d sent %d failed",
53754- atomic_read(&sent[SMB2_CREATE_HE]),
53755- atomic_read(&failed[SMB2_CREATE_HE]));
53756+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
53757+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
53758 seq_printf(m, "\nCloses: %d sent %d failed",
53759- atomic_read(&sent[SMB2_CLOSE_HE]),
53760- atomic_read(&failed[SMB2_CLOSE_HE]));
53761+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
53762+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
53763 seq_printf(m, "\nFlushes: %d sent %d failed",
53764- atomic_read(&sent[SMB2_FLUSH_HE]),
53765- atomic_read(&failed[SMB2_FLUSH_HE]));
53766+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
53767+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
53768 seq_printf(m, "\nReads: %d sent %d failed",
53769- atomic_read(&sent[SMB2_READ_HE]),
53770- atomic_read(&failed[SMB2_READ_HE]));
53771+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
53772+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
53773 seq_printf(m, "\nWrites: %d sent %d failed",
53774- atomic_read(&sent[SMB2_WRITE_HE]),
53775- atomic_read(&failed[SMB2_WRITE_HE]));
53776+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
53777+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
53778 seq_printf(m, "\nLocks: %d sent %d failed",
53779- atomic_read(&sent[SMB2_LOCK_HE]),
53780- atomic_read(&failed[SMB2_LOCK_HE]));
53781+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
53782+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
53783 seq_printf(m, "\nIOCTLs: %d sent %d failed",
53784- atomic_read(&sent[SMB2_IOCTL_HE]),
53785- atomic_read(&failed[SMB2_IOCTL_HE]));
53786+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
53787+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
53788 seq_printf(m, "\nCancels: %d sent %d failed",
53789- atomic_read(&sent[SMB2_CANCEL_HE]),
53790- atomic_read(&failed[SMB2_CANCEL_HE]));
53791+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
53792+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
53793 seq_printf(m, "\nEchos: %d sent %d failed",
53794- atomic_read(&sent[SMB2_ECHO_HE]),
53795- atomic_read(&failed[SMB2_ECHO_HE]));
53796+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
53797+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
53798 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
53799- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
53800- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
53801+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
53802+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
53803 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
53804- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
53805- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
53806+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
53807+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
53808 seq_printf(m, "\nQueryInfos: %d sent %d failed",
53809- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
53810- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
53811+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
53812+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
53813 seq_printf(m, "\nSetInfos: %d sent %d failed",
53814- atomic_read(&sent[SMB2_SET_INFO_HE]),
53815- atomic_read(&failed[SMB2_SET_INFO_HE]));
53816+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
53817+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
53818 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
53819- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
53820- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
53821+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
53822+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
53823 #endif
53824 }
53825
53826diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
53827index 2b95ce2..d079d75 100644
53828--- a/fs/cifs/smb2pdu.c
53829+++ b/fs/cifs/smb2pdu.c
53830@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
53831 default:
53832 cifs_dbg(VFS, "info level %u isn't supported\n",
53833 srch_inf->info_level);
53834- rc = -EINVAL;
53835- goto qdir_exit;
53836+ return -EINVAL;
53837 }
53838
53839 req->FileIndex = cpu_to_le32(index);
53840diff --git a/fs/coda/cache.c b/fs/coda/cache.c
53841index 1da168c..8bc7ff6 100644
53842--- a/fs/coda/cache.c
53843+++ b/fs/coda/cache.c
53844@@ -24,7 +24,7 @@
53845 #include "coda_linux.h"
53846 #include "coda_cache.h"
53847
53848-static atomic_t permission_epoch = ATOMIC_INIT(0);
53849+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
53850
53851 /* replace or extend an acl cache hit */
53852 void coda_cache_enter(struct inode *inode, int mask)
53853@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
53854 struct coda_inode_info *cii = ITOC(inode);
53855
53856 spin_lock(&cii->c_lock);
53857- cii->c_cached_epoch = atomic_read(&permission_epoch);
53858+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
53859 if (!uid_eq(cii->c_uid, current_fsuid())) {
53860 cii->c_uid = current_fsuid();
53861 cii->c_cached_perm = mask;
53862@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
53863 {
53864 struct coda_inode_info *cii = ITOC(inode);
53865 spin_lock(&cii->c_lock);
53866- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
53867+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
53868 spin_unlock(&cii->c_lock);
53869 }
53870
53871 /* remove all acl caches */
53872 void coda_cache_clear_all(struct super_block *sb)
53873 {
53874- atomic_inc(&permission_epoch);
53875+ atomic_inc_unchecked(&permission_epoch);
53876 }
53877
53878
53879@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
53880 spin_lock(&cii->c_lock);
53881 hit = (mask & cii->c_cached_perm) == mask &&
53882 uid_eq(cii->c_uid, current_fsuid()) &&
53883- cii->c_cached_epoch == atomic_read(&permission_epoch);
53884+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
53885 spin_unlock(&cii->c_lock);
53886
53887 return hit;
53888diff --git a/fs/compat.c b/fs/compat.c
53889index fc3b55d..7b568ae 100644
53890--- a/fs/compat.c
53891+++ b/fs/compat.c
53892@@ -54,7 +54,7 @@
53893 #include <asm/ioctls.h>
53894 #include "internal.h"
53895
53896-int compat_log = 1;
53897+int compat_log = 0;
53898
53899 int compat_printk(const char *fmt, ...)
53900 {
53901@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
53902
53903 set_fs(KERNEL_DS);
53904 /* The __user pointer cast is valid because of the set_fs() */
53905- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
53906+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
53907 set_fs(oldfs);
53908 /* truncating is ok because it's a user address */
53909 if (!ret)
53910@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
53911 goto out;
53912
53913 ret = -EINVAL;
53914- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
53915+ if (nr_segs > UIO_MAXIOV)
53916 goto out;
53917 if (nr_segs > fast_segs) {
53918 ret = -ENOMEM;
53919@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
53920
53921 struct compat_readdir_callback {
53922 struct compat_old_linux_dirent __user *dirent;
53923+ struct file * file;
53924 int result;
53925 };
53926
53927@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
53928 buf->result = -EOVERFLOW;
53929 return -EOVERFLOW;
53930 }
53931+
53932+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53933+ return 0;
53934+
53935 buf->result++;
53936 dirent = buf->dirent;
53937 if (!access_ok(VERIFY_WRITE, dirent,
53938@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
53939
53940 buf.result = 0;
53941 buf.dirent = dirent;
53942+ buf.file = f.file;
53943
53944 error = vfs_readdir(f.file, compat_fillonedir, &buf);
53945 if (buf.result)
53946@@ -899,6 +905,7 @@ struct compat_linux_dirent {
53947 struct compat_getdents_callback {
53948 struct compat_linux_dirent __user *current_dir;
53949 struct compat_linux_dirent __user *previous;
53950+ struct file * file;
53951 int count;
53952 int error;
53953 };
53954@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
53955 buf->error = -EOVERFLOW;
53956 return -EOVERFLOW;
53957 }
53958+
53959+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53960+ return 0;
53961+
53962 dirent = buf->previous;
53963 if (dirent) {
53964 if (__put_user(offset, &dirent->d_off))
53965@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
53966 buf.previous = NULL;
53967 buf.count = count;
53968 buf.error = 0;
53969+ buf.file = f.file;
53970
53971 error = vfs_readdir(f.file, compat_filldir, &buf);
53972 if (error >= 0)
53973@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
53974 struct compat_getdents_callback64 {
53975 struct linux_dirent64 __user *current_dir;
53976 struct linux_dirent64 __user *previous;
53977+ struct file * file;
53978 int count;
53979 int error;
53980 };
53981@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
53982 buf->error = -EINVAL; /* only used if we fail.. */
53983 if (reclen > buf->count)
53984 return -EINVAL;
53985+
53986+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53987+ return 0;
53988+
53989 dirent = buf->previous;
53990
53991 if (dirent) {
53992@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
53993 buf.previous = NULL;
53994 buf.count = count;
53995 buf.error = 0;
53996+ buf.file = f.file;
53997
53998 error = vfs_readdir(f.file, compat_filldir64, &buf);
53999 if (error >= 0)
54000 error = buf.error;
54001 lastdirent = buf.previous;
54002 if (lastdirent) {
54003- typeof(lastdirent->d_off) d_off = f.file->f_pos;
54004+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
54005 if (__put_user_unaligned(d_off, &lastdirent->d_off))
54006 error = -EFAULT;
54007 else
54008diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
54009index a81147e..20bf2b5 100644
54010--- a/fs/compat_binfmt_elf.c
54011+++ b/fs/compat_binfmt_elf.c
54012@@ -30,11 +30,13 @@
54013 #undef elf_phdr
54014 #undef elf_shdr
54015 #undef elf_note
54016+#undef elf_dyn
54017 #undef elf_addr_t
54018 #define elfhdr elf32_hdr
54019 #define elf_phdr elf32_phdr
54020 #define elf_shdr elf32_shdr
54021 #define elf_note elf32_note
54022+#define elf_dyn Elf32_Dyn
54023 #define elf_addr_t Elf32_Addr
54024
54025 /*
54026diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
54027index 996cdc5..15e2f33 100644
54028--- a/fs/compat_ioctl.c
54029+++ b/fs/compat_ioctl.c
54030@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
54031 return -EFAULT;
54032 if (__get_user(udata, &ss32->iomem_base))
54033 return -EFAULT;
54034- ss.iomem_base = compat_ptr(udata);
54035+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
54036 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
54037 __get_user(ss.port_high, &ss32->port_high))
54038 return -EFAULT;
54039@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
54040 for (i = 0; i < nmsgs; i++) {
54041 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
54042 return -EFAULT;
54043- if (get_user(datap, &umsgs[i].buf) ||
54044- put_user(compat_ptr(datap), &tmsgs[i].buf))
54045+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
54046+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
54047 return -EFAULT;
54048 }
54049 return sys_ioctl(fd, cmd, (unsigned long)tdata);
54050@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
54051 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
54052 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
54053 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
54054- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
54055+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
54056 return -EFAULT;
54057
54058 return ioctl_preallocate(file, p);
54059@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
54060 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
54061 {
54062 unsigned int a, b;
54063- a = *(unsigned int *)p;
54064- b = *(unsigned int *)q;
54065+ a = *(const unsigned int *)p;
54066+ b = *(const unsigned int *)q;
54067 if (a > b)
54068 return 1;
54069 if (a < b)
54070diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
54071index 7aabc6a..34c1197 100644
54072--- a/fs/configfs/dir.c
54073+++ b/fs/configfs/dir.c
54074@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
54075 }
54076 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
54077 struct configfs_dirent *next;
54078- const char * name;
54079+ const unsigned char * name;
54080+ char d_name[sizeof(next->s_dentry->d_iname)];
54081 int len;
54082 struct inode *inode = NULL;
54083
54084@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
54085 continue;
54086
54087 name = configfs_get_name(next);
54088- len = strlen(name);
54089+ if (next->s_dentry && name == next->s_dentry->d_iname) {
54090+ len = next->s_dentry->d_name.len;
54091+ memcpy(d_name, name, len);
54092+ name = d_name;
54093+ } else
54094+ len = strlen(name);
54095
54096 /*
54097 * We'll have a dentry and an inode for
54098diff --git a/fs/coredump.c b/fs/coredump.c
54099index dafafba..10b3b27 100644
54100--- a/fs/coredump.c
54101+++ b/fs/coredump.c
54102@@ -52,7 +52,7 @@ struct core_name {
54103 char *corename;
54104 int used, size;
54105 };
54106-static atomic_t call_count = ATOMIC_INIT(1);
54107+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
54108
54109 /* The maximal length of core_pattern is also specified in sysctl.c */
54110
54111@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
54112 {
54113 char *old_corename = cn->corename;
54114
54115- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
54116+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
54117 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
54118
54119 if (!cn->corename) {
54120@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
54121 int pid_in_pattern = 0;
54122 int err = 0;
54123
54124- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
54125+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
54126 cn->corename = kmalloc(cn->size, GFP_KERNEL);
54127 cn->used = 0;
54128
54129@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
54130 struct pipe_inode_info *pipe = file->private_data;
54131
54132 pipe_lock(pipe);
54133- pipe->readers++;
54134- pipe->writers--;
54135+ atomic_inc(&pipe->readers);
54136+ atomic_dec(&pipe->writers);
54137 wake_up_interruptible_sync(&pipe->wait);
54138 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54139 pipe_unlock(pipe);
54140@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
54141 * We actually want wait_event_freezable() but then we need
54142 * to clear TIF_SIGPENDING and improve dump_interrupted().
54143 */
54144- wait_event_interruptible(pipe->wait, pipe->readers == 1);
54145+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
54146
54147 pipe_lock(pipe);
54148- pipe->readers--;
54149- pipe->writers++;
54150+ atomic_dec(&pipe->readers);
54151+ atomic_inc(&pipe->writers);
54152 pipe_unlock(pipe);
54153 }
54154
54155@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
54156 struct files_struct *displaced;
54157 bool need_nonrelative = false;
54158 bool core_dumped = false;
54159- static atomic_t core_dump_count = ATOMIC_INIT(0);
54160+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
54161+ long signr = siginfo->si_signo;
54162 struct coredump_params cprm = {
54163 .siginfo = siginfo,
54164 .regs = signal_pt_regs(),
54165@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
54166 .mm_flags = mm->flags,
54167 };
54168
54169- audit_core_dumps(siginfo->si_signo);
54170+ audit_core_dumps(signr);
54171+
54172+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
54173+ gr_handle_brute_attach(cprm.mm_flags);
54174
54175 binfmt = mm->binfmt;
54176 if (!binfmt || !binfmt->core_dump)
54177@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
54178 need_nonrelative = true;
54179 }
54180
54181- retval = coredump_wait(siginfo->si_signo, &core_state);
54182+ retval = coredump_wait(signr, &core_state);
54183 if (retval < 0)
54184 goto fail_creds;
54185
54186@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
54187 }
54188 cprm.limit = RLIM_INFINITY;
54189
54190- dump_count = atomic_inc_return(&core_dump_count);
54191+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
54192 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
54193 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
54194 task_tgid_vnr(current), current->comm);
54195@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
54196 } else {
54197 struct inode *inode;
54198
54199+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
54200+
54201 if (cprm.limit < binfmt->min_coredump)
54202 goto fail_unlock;
54203
54204@@ -666,7 +672,7 @@ close_fail:
54205 filp_close(cprm.file, NULL);
54206 fail_dropcount:
54207 if (ispipe)
54208- atomic_dec(&core_dump_count);
54209+ atomic_dec_unchecked(&core_dump_count);
54210 fail_unlock:
54211 kfree(cn.corename);
54212 fail_corename:
54213@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
54214 {
54215 return !dump_interrupted() &&
54216 access_ok(VERIFY_READ, addr, nr) &&
54217- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
54218+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
54219 }
54220 EXPORT_SYMBOL(dump_write);
54221
54222diff --git a/fs/dcache.c b/fs/dcache.c
54223index f09b908..04b9690 100644
54224--- a/fs/dcache.c
54225+++ b/fs/dcache.c
54226@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
54227 mempages -= reserve;
54228
54229 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
54230- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
54231+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
54232+ SLAB_NO_SANITIZE, NULL);
54233
54234 dcache_init();
54235 inode_init();
54236diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
54237index c7c83ff..bda9461 100644
54238--- a/fs/debugfs/inode.c
54239+++ b/fs/debugfs/inode.c
54240@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
54241 */
54242 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
54243 {
54244+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54245+ return __create_file(name, S_IFDIR | S_IRWXU,
54246+#else
54247 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54248+#endif
54249 parent, NULL, NULL);
54250 }
54251 EXPORT_SYMBOL_GPL(debugfs_create_dir);
54252diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
54253index 5eab400..810a3f5 100644
54254--- a/fs/ecryptfs/inode.c
54255+++ b/fs/ecryptfs/inode.c
54256@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
54257 old_fs = get_fs();
54258 set_fs(get_ds());
54259 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
54260- (char __user *)lower_buf,
54261+ (char __force_user *)lower_buf,
54262 PATH_MAX);
54263 set_fs(old_fs);
54264 if (rc < 0)
54265@@ -706,7 +706,7 @@ out:
54266 static void
54267 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
54268 {
54269- char *buf = nd_get_link(nd);
54270+ const char *buf = nd_get_link(nd);
54271 if (!IS_ERR(buf)) {
54272 /* Free the char* */
54273 kfree(buf);
54274diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
54275index e4141f2..d8263e8 100644
54276--- a/fs/ecryptfs/miscdev.c
54277+++ b/fs/ecryptfs/miscdev.c
54278@@ -304,7 +304,7 @@ check_list:
54279 goto out_unlock_msg_ctx;
54280 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
54281 if (msg_ctx->msg) {
54282- if (copy_to_user(&buf[i], packet_length, packet_length_size))
54283+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
54284 goto out_unlock_msg_ctx;
54285 i += packet_length_size;
54286 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
54287diff --git a/fs/exec.c b/fs/exec.c
54288index 1f44670..3c84660 100644
54289--- a/fs/exec.c
54290+++ b/fs/exec.c
54291@@ -55,8 +55,20 @@
54292 #include <linux/pipe_fs_i.h>
54293 #include <linux/oom.h>
54294 #include <linux/compat.h>
54295+#include <linux/random.h>
54296+#include <linux/seq_file.h>
54297+#include <linux/coredump.h>
54298+#include <linux/mman.h>
54299+
54300+#ifdef CONFIG_PAX_REFCOUNT
54301+#include <linux/kallsyms.h>
54302+#include <linux/kdebug.h>
54303+#endif
54304+
54305+#include <trace/events/fs.h>
54306
54307 #include <asm/uaccess.h>
54308+#include <asm/sections.h>
54309 #include <asm/mmu_context.h>
54310 #include <asm/tlb.h>
54311
54312@@ -66,17 +78,32 @@
54313
54314 #include <trace/events/sched.h>
54315
54316+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54317+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
54318+{
54319+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
54320+}
54321+#endif
54322+
54323+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
54324+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54325+EXPORT_SYMBOL(pax_set_initial_flags_func);
54326+#endif
54327+
54328 int suid_dumpable = 0;
54329
54330 static LIST_HEAD(formats);
54331 static DEFINE_RWLOCK(binfmt_lock);
54332
54333+extern int gr_process_kernel_exec_ban(void);
54334+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
54335+
54336 void __register_binfmt(struct linux_binfmt * fmt, int insert)
54337 {
54338 BUG_ON(!fmt);
54339 write_lock(&binfmt_lock);
54340- insert ? list_add(&fmt->lh, &formats) :
54341- list_add_tail(&fmt->lh, &formats);
54342+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
54343+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
54344 write_unlock(&binfmt_lock);
54345 }
54346
54347@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
54348 void unregister_binfmt(struct linux_binfmt * fmt)
54349 {
54350 write_lock(&binfmt_lock);
54351- list_del(&fmt->lh);
54352+ pax_list_del((struct list_head *)&fmt->lh);
54353 write_unlock(&binfmt_lock);
54354 }
54355
54356@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
54357 int write)
54358 {
54359 struct page *page;
54360- int ret;
54361
54362-#ifdef CONFIG_STACK_GROWSUP
54363- if (write) {
54364- ret = expand_downwards(bprm->vma, pos);
54365- if (ret < 0)
54366- return NULL;
54367- }
54368-#endif
54369- ret = get_user_pages(current, bprm->mm, pos,
54370- 1, write, 1, &page, NULL);
54371- if (ret <= 0)
54372+ if (0 > expand_downwards(bprm->vma, pos))
54373+ return NULL;
54374+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
54375 return NULL;
54376
54377 if (write) {
54378@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
54379 if (size <= ARG_MAX)
54380 return page;
54381
54382+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54383+ // only allow 512KB for argv+env on suid/sgid binaries
54384+ // to prevent easy ASLR exhaustion
54385+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
54386+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
54387+ (size > (512 * 1024))) {
54388+ put_page(page);
54389+ return NULL;
54390+ }
54391+#endif
54392+
54393 /*
54394 * Limit to 1/4-th the stack size for the argv+env strings.
54395 * This ensures that:
54396@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
54397 vma->vm_end = STACK_TOP_MAX;
54398 vma->vm_start = vma->vm_end - PAGE_SIZE;
54399 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
54400+
54401+#ifdef CONFIG_PAX_SEGMEXEC
54402+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
54403+#endif
54404+
54405 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
54406 INIT_LIST_HEAD(&vma->anon_vma_chain);
54407
54408@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
54409 mm->stack_vm = mm->total_vm = 1;
54410 up_write(&mm->mmap_sem);
54411 bprm->p = vma->vm_end - sizeof(void *);
54412+
54413+#ifdef CONFIG_PAX_RANDUSTACK
54414+ if (randomize_va_space)
54415+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
54416+#endif
54417+
54418 return 0;
54419 err:
54420 up_write(&mm->mmap_sem);
54421@@ -396,7 +437,7 @@ struct user_arg_ptr {
54422 } ptr;
54423 };
54424
54425-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54426+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54427 {
54428 const char __user *native;
54429
54430@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54431 compat_uptr_t compat;
54432
54433 if (get_user(compat, argv.ptr.compat + nr))
54434- return ERR_PTR(-EFAULT);
54435+ return (const char __force_user *)ERR_PTR(-EFAULT);
54436
54437 return compat_ptr(compat);
54438 }
54439 #endif
54440
54441 if (get_user(native, argv.ptr.native + nr))
54442- return ERR_PTR(-EFAULT);
54443+ return (const char __force_user *)ERR_PTR(-EFAULT);
54444
54445 return native;
54446 }
54447@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
54448 if (!p)
54449 break;
54450
54451- if (IS_ERR(p))
54452+ if (IS_ERR((const char __force_kernel *)p))
54453 return -EFAULT;
54454
54455 if (i >= max)
54456@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
54457
54458 ret = -EFAULT;
54459 str = get_user_arg_ptr(argv, argc);
54460- if (IS_ERR(str))
54461+ if (IS_ERR((const char __force_kernel *)str))
54462 goto out;
54463
54464 len = strnlen_user(str, MAX_ARG_STRLEN);
54465@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
54466 int r;
54467 mm_segment_t oldfs = get_fs();
54468 struct user_arg_ptr argv = {
54469- .ptr.native = (const char __user *const __user *)__argv,
54470+ .ptr.native = (const char __force_user * const __force_user *)__argv,
54471 };
54472
54473 set_fs(KERNEL_DS);
54474@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
54475 unsigned long new_end = old_end - shift;
54476 struct mmu_gather tlb;
54477
54478- BUG_ON(new_start > new_end);
54479+ if (new_start >= new_end || new_start < mmap_min_addr)
54480+ return -ENOMEM;
54481
54482 /*
54483 * ensure there are no vmas between where we want to go
54484@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
54485 if (vma != find_vma(mm, new_start))
54486 return -EFAULT;
54487
54488+#ifdef CONFIG_PAX_SEGMEXEC
54489+ BUG_ON(pax_find_mirror_vma(vma));
54490+#endif
54491+
54492 /*
54493 * cover the whole range: [new_start, old_end)
54494 */
54495@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
54496 stack_top = arch_align_stack(stack_top);
54497 stack_top = PAGE_ALIGN(stack_top);
54498
54499- if (unlikely(stack_top < mmap_min_addr) ||
54500- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
54501- return -ENOMEM;
54502-
54503 stack_shift = vma->vm_end - stack_top;
54504
54505 bprm->p -= stack_shift;
54506@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
54507 bprm->exec -= stack_shift;
54508
54509 down_write(&mm->mmap_sem);
54510+
54511+ /* Move stack pages down in memory. */
54512+ if (stack_shift) {
54513+ ret = shift_arg_pages(vma, stack_shift);
54514+ if (ret)
54515+ goto out_unlock;
54516+ }
54517+
54518 vm_flags = VM_STACK_FLAGS;
54519
54520+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54521+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54522+ vm_flags &= ~VM_EXEC;
54523+
54524+#ifdef CONFIG_PAX_MPROTECT
54525+ if (mm->pax_flags & MF_PAX_MPROTECT)
54526+ vm_flags &= ~VM_MAYEXEC;
54527+#endif
54528+
54529+ }
54530+#endif
54531+
54532 /*
54533 * Adjust stack execute permissions; explicitly enable for
54534 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
54535@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
54536 goto out_unlock;
54537 BUG_ON(prev != vma);
54538
54539- /* Move stack pages down in memory. */
54540- if (stack_shift) {
54541- ret = shift_arg_pages(vma, stack_shift);
54542- if (ret)
54543- goto out_unlock;
54544- }
54545-
54546 /* mprotect_fixup is overkill to remove the temporary stack flags */
54547 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
54548
54549@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
54550 #endif
54551 current->mm->start_stack = bprm->p;
54552 ret = expand_stack(vma, stack_base);
54553+
54554+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
54555+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
54556+ unsigned long size;
54557+ vm_flags_t vm_flags;
54558+
54559+ size = STACK_TOP - vma->vm_end;
54560+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
54561+
54562+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
54563+
54564+#ifdef CONFIG_X86
54565+ if (!ret) {
54566+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
54567+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
54568+ }
54569+#endif
54570+
54571+ }
54572+#endif
54573+
54574 if (ret)
54575 ret = -EFAULT;
54576
54577@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
54578
54579 fsnotify_open(file);
54580
54581+ trace_open_exec(name);
54582+
54583 err = deny_write_access(file);
54584 if (err)
54585 goto exit;
54586@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
54587 old_fs = get_fs();
54588 set_fs(get_ds());
54589 /* The cast to a user pointer is valid due to the set_fs() */
54590- result = vfs_read(file, (void __user *)addr, count, &pos);
54591+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
54592 set_fs(old_fs);
54593 return result;
54594 }
54595@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
54596 }
54597 rcu_read_unlock();
54598
54599- if (p->fs->users > n_fs) {
54600+ if (atomic_read(&p->fs->users) > n_fs) {
54601 bprm->unsafe |= LSM_UNSAFE_SHARE;
54602 } else {
54603 res = -EAGAIN;
54604@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
54605
54606 EXPORT_SYMBOL(search_binary_handler);
54607
54608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54609+static DEFINE_PER_CPU(u64, exec_counter);
54610+static int __init init_exec_counters(void)
54611+{
54612+ unsigned int cpu;
54613+
54614+ for_each_possible_cpu(cpu) {
54615+ per_cpu(exec_counter, cpu) = (u64)cpu;
54616+ }
54617+
54618+ return 0;
54619+}
54620+early_initcall(init_exec_counters);
54621+static inline void increment_exec_counter(void)
54622+{
54623+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
54624+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
54625+}
54626+#else
54627+static inline void increment_exec_counter(void) {}
54628+#endif
54629+
54630+extern void gr_handle_exec_args(struct linux_binprm *bprm,
54631+ struct user_arg_ptr argv);
54632+
54633 /*
54634 * sys_execve() executes a new program.
54635 */
54636@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
54637 struct user_arg_ptr argv,
54638 struct user_arg_ptr envp)
54639 {
54640+#ifdef CONFIG_GRKERNSEC
54641+ struct file *old_exec_file;
54642+ struct acl_subject_label *old_acl;
54643+ struct rlimit old_rlim[RLIM_NLIMITS];
54644+#endif
54645 struct linux_binprm *bprm;
54646 struct file *file;
54647 struct files_struct *displaced;
54648@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
54649 int retval;
54650 const struct cred *cred = current_cred();
54651
54652+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
54653+
54654 /*
54655 * We move the actual failure in case of RLIMIT_NPROC excess from
54656 * set*uid() to execve() because too many poorly written programs
54657@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
54658 if (IS_ERR(file))
54659 goto out_unmark;
54660
54661+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
54662+ retval = -EPERM;
54663+ goto out_file;
54664+ }
54665+
54666 sched_exec();
54667
54668 bprm->file = file;
54669 bprm->filename = filename;
54670 bprm->interp = filename;
54671
54672+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
54673+ retval = -EACCES;
54674+ goto out_file;
54675+ }
54676+
54677 retval = bprm_mm_init(bprm);
54678 if (retval)
54679 goto out_file;
54680@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
54681 if (retval < 0)
54682 goto out;
54683
54684+#ifdef CONFIG_GRKERNSEC
54685+ old_acl = current->acl;
54686+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
54687+ old_exec_file = current->exec_file;
54688+ get_file(file);
54689+ current->exec_file = file;
54690+#endif
54691+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54692+ /* limit suid stack to 8MB
54693+ * we saved the old limits above and will restore them if this exec fails
54694+ */
54695+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
54696+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
54697+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
54698+#endif
54699+
54700+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
54701+ retval = -EPERM;
54702+ goto out_fail;
54703+ }
54704+
54705+ if (!gr_tpe_allow(file)) {
54706+ retval = -EACCES;
54707+ goto out_fail;
54708+ }
54709+
54710+ if (gr_check_crash_exec(file)) {
54711+ retval = -EACCES;
54712+ goto out_fail;
54713+ }
54714+
54715+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
54716+ bprm->unsafe);
54717+ if (retval < 0)
54718+ goto out_fail;
54719+
54720 retval = copy_strings_kernel(1, &bprm->filename, bprm);
54721 if (retval < 0)
54722- goto out;
54723+ goto out_fail;
54724
54725 bprm->exec = bprm->p;
54726 retval = copy_strings(bprm->envc, envp, bprm);
54727 if (retval < 0)
54728- goto out;
54729+ goto out_fail;
54730
54731 retval = copy_strings(bprm->argc, argv, bprm);
54732 if (retval < 0)
54733- goto out;
54734+ goto out_fail;
54735+
54736+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
54737+
54738+ gr_handle_exec_args(bprm, argv);
54739
54740 retval = search_binary_handler(bprm);
54741 if (retval < 0)
54742- goto out;
54743+ goto out_fail;
54744+#ifdef CONFIG_GRKERNSEC
54745+ if (old_exec_file)
54746+ fput(old_exec_file);
54747+#endif
54748
54749 /* execve succeeded */
54750+
54751+ increment_exec_counter();
54752 current->fs->in_exec = 0;
54753 current->in_execve = 0;
54754 acct_update_integrals(current);
54755@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
54756 put_files_struct(displaced);
54757 return retval;
54758
54759+out_fail:
54760+#ifdef CONFIG_GRKERNSEC
54761+ current->acl = old_acl;
54762+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
54763+ fput(current->exec_file);
54764+ current->exec_file = old_exec_file;
54765+#endif
54766+
54767 out:
54768 if (bprm->mm) {
54769 acct_arg_size(bprm, 0);
54770@@ -1701,3 +1875,287 @@ asmlinkage long compat_sys_execve(const char __user * filename,
54771 return error;
54772 }
54773 #endif
54774+
54775+int pax_check_flags(unsigned long *flags)
54776+{
54777+ int retval = 0;
54778+
54779+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
54780+ if (*flags & MF_PAX_SEGMEXEC)
54781+ {
54782+ *flags &= ~MF_PAX_SEGMEXEC;
54783+ retval = -EINVAL;
54784+ }
54785+#endif
54786+
54787+ if ((*flags & MF_PAX_PAGEEXEC)
54788+
54789+#ifdef CONFIG_PAX_PAGEEXEC
54790+ && (*flags & MF_PAX_SEGMEXEC)
54791+#endif
54792+
54793+ )
54794+ {
54795+ *flags &= ~MF_PAX_PAGEEXEC;
54796+ retval = -EINVAL;
54797+ }
54798+
54799+ if ((*flags & MF_PAX_MPROTECT)
54800+
54801+#ifdef CONFIG_PAX_MPROTECT
54802+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
54803+#endif
54804+
54805+ )
54806+ {
54807+ *flags &= ~MF_PAX_MPROTECT;
54808+ retval = -EINVAL;
54809+ }
54810+
54811+ if ((*flags & MF_PAX_EMUTRAMP)
54812+
54813+#ifdef CONFIG_PAX_EMUTRAMP
54814+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
54815+#endif
54816+
54817+ )
54818+ {
54819+ *flags &= ~MF_PAX_EMUTRAMP;
54820+ retval = -EINVAL;
54821+ }
54822+
54823+ return retval;
54824+}
54825+
54826+EXPORT_SYMBOL(pax_check_flags);
54827+
54828+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54829+char *pax_get_path(const struct path *path, char *buf, int buflen)
54830+{
54831+ char *pathname = d_path(path, buf, buflen);
54832+
54833+ if (IS_ERR(pathname))
54834+ goto toolong;
54835+
54836+ pathname = mangle_path(buf, pathname, "\t\n\\");
54837+ if (!pathname)
54838+ goto toolong;
54839+
54840+ *pathname = 0;
54841+ return buf;
54842+
54843+toolong:
54844+ return "<path too long>";
54845+}
54846+EXPORT_SYMBOL(pax_get_path);
54847+
54848+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
54849+{
54850+ struct task_struct *tsk = current;
54851+ struct mm_struct *mm = current->mm;
54852+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
54853+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
54854+ char *path_exec = NULL;
54855+ char *path_fault = NULL;
54856+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
54857+ siginfo_t info = { };
54858+
54859+ if (buffer_exec && buffer_fault) {
54860+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
54861+
54862+ down_read(&mm->mmap_sem);
54863+ vma = mm->mmap;
54864+ while (vma && (!vma_exec || !vma_fault)) {
54865+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
54866+ vma_exec = vma;
54867+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
54868+ vma_fault = vma;
54869+ vma = vma->vm_next;
54870+ }
54871+ if (vma_exec)
54872+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
54873+ if (vma_fault) {
54874+ start = vma_fault->vm_start;
54875+ end = vma_fault->vm_end;
54876+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
54877+ if (vma_fault->vm_file)
54878+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
54879+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
54880+ path_fault = "<heap>";
54881+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
54882+ path_fault = "<stack>";
54883+ else
54884+ path_fault = "<anonymous mapping>";
54885+ }
54886+ up_read(&mm->mmap_sem);
54887+ }
54888+ if (tsk->signal->curr_ip)
54889+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
54890+ else
54891+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
54892+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
54893+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
54894+ free_page((unsigned long)buffer_exec);
54895+ free_page((unsigned long)buffer_fault);
54896+ pax_report_insns(regs, pc, sp);
54897+ info.si_signo = SIGKILL;
54898+ info.si_errno = 0;
54899+ info.si_code = SI_KERNEL;
54900+ info.si_pid = 0;
54901+ info.si_uid = 0;
54902+ do_coredump(&info);
54903+}
54904+#endif
54905+
54906+#ifdef CONFIG_PAX_REFCOUNT
54907+void pax_report_refcount_overflow(struct pt_regs *regs)
54908+{
54909+ if (current->signal->curr_ip)
54910+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
54911+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
54912+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
54913+ else
54914+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
54915+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
54916+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
54917+ preempt_disable();
54918+ show_regs(regs);
54919+ preempt_enable();
54920+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
54921+}
54922+#endif
54923+
54924+#ifdef CONFIG_PAX_USERCOPY
54925+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
54926+static noinline int check_stack_object(const void *obj, unsigned long len)
54927+{
54928+ const void * const stack = task_stack_page(current);
54929+ const void * const stackend = stack + THREAD_SIZE;
54930+
54931+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
54932+ const void *frame = NULL;
54933+ const void *oldframe;
54934+#endif
54935+
54936+ if (obj + len < obj)
54937+ return -1;
54938+
54939+ if (obj + len <= stack || stackend <= obj)
54940+ return 0;
54941+
54942+ if (obj < stack || stackend < obj + len)
54943+ return -1;
54944+
54945+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
54946+ oldframe = __builtin_frame_address(1);
54947+ if (oldframe)
54948+ frame = __builtin_frame_address(2);
54949+ /*
54950+ low ----------------------------------------------> high
54951+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
54952+ ^----------------^
54953+ allow copies only within here
54954+ */
54955+ while (stack <= frame && frame < stackend) {
54956+ /* if obj + len extends past the last frame, this
54957+ check won't pass and the next frame will be 0,
54958+ causing us to bail out and correctly report
54959+ the copy as invalid
54960+ */
54961+ if (obj + len <= frame)
54962+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
54963+ oldframe = frame;
54964+ frame = *(const void * const *)frame;
54965+ }
54966+ return -1;
54967+#else
54968+ return 1;
54969+#endif
54970+}
54971+
54972+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
54973+{
54974+ if (current->signal->curr_ip)
54975+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
54976+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
54977+ else
54978+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
54979+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
54980+ dump_stack();
54981+ gr_handle_kernel_exploit();
54982+ do_group_exit(SIGKILL);
54983+}
54984+#endif
54985+
54986+#ifdef CONFIG_PAX_USERCOPY
54987+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
54988+{
54989+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
54990+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
54991+#ifdef CONFIG_MODULES
54992+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
54993+#else
54994+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
54995+#endif
54996+
54997+#else
54998+ unsigned long textlow = (unsigned long)_stext;
54999+ unsigned long texthigh = (unsigned long)_etext;
55000+#endif
55001+
55002+ if (high <= textlow || low > texthigh)
55003+ return false;
55004+ else
55005+ return true;
55006+}
55007+#endif
55008+
55009+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
55010+{
55011+
55012+#ifdef CONFIG_PAX_USERCOPY
55013+ const char *type;
55014+
55015+ if (!n)
55016+ return;
55017+
55018+ type = check_heap_object(ptr, n);
55019+ if (!type) {
55020+ int ret = check_stack_object(ptr, n);
55021+ if (ret == 1 || ret == 2)
55022+ return;
55023+ if (ret == 0) {
55024+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
55025+ type = "<kernel text>";
55026+ else
55027+ return;
55028+ } else
55029+ type = "<process stack>";
55030+ }
55031+
55032+ pax_report_usercopy(ptr, n, to_user, type);
55033+#endif
55034+
55035+}
55036+EXPORT_SYMBOL(__check_object_size);
55037+
55038+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
55039+void pax_track_stack(void)
55040+{
55041+ unsigned long sp = (unsigned long)&sp;
55042+ if (sp < current_thread_info()->lowest_stack &&
55043+ sp > (unsigned long)task_stack_page(current))
55044+ current_thread_info()->lowest_stack = sp;
55045+}
55046+EXPORT_SYMBOL(pax_track_stack);
55047+#endif
55048+
55049+#ifdef CONFIG_PAX_SIZE_OVERFLOW
55050+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
55051+{
55052+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
55053+ dump_stack();
55054+ do_group_exit(SIGKILL);
55055+}
55056+EXPORT_SYMBOL(report_size_overflow);
55057+#endif
55058diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
55059index 9f9992b..8b59411 100644
55060--- a/fs/ext2/balloc.c
55061+++ b/fs/ext2/balloc.c
55062@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
55063
55064 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
55065 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
55066- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
55067+ if (free_blocks < root_blocks + 1 &&
55068 !uid_eq(sbi->s_resuid, current_fsuid()) &&
55069 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
55070- !in_group_p (sbi->s_resgid))) {
55071+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
55072 return 0;
55073 }
55074 return 1;
55075diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
55076index 22548f5..41521d8 100644
55077--- a/fs/ext3/balloc.c
55078+++ b/fs/ext3/balloc.c
55079@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
55080
55081 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
55082 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
55083- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
55084+ if (free_blocks < root_blocks + 1 &&
55085 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
55086 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
55087- !in_group_p (sbi->s_resgid))) {
55088+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
55089 return 0;
55090 }
55091 return 1;
55092diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
55093index 3742e4c..69a797f 100644
55094--- a/fs/ext4/balloc.c
55095+++ b/fs/ext4/balloc.c
55096@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
55097 /* Hm, nope. Are (enough) root reserved clusters available? */
55098 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
55099 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
55100- capable(CAP_SYS_RESOURCE) ||
55101- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
55102+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
55103+ capable_nolog(CAP_SYS_RESOURCE)) {
55104
55105 if (free_clusters >= (nclusters + dirty_clusters +
55106 resv_clusters))
55107diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
55108index 5aae3d1..b5da7f8 100644
55109--- a/fs/ext4/ext4.h
55110+++ b/fs/ext4/ext4.h
55111@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
55112 unsigned long s_mb_last_start;
55113
55114 /* stats for buddy allocator */
55115- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
55116- atomic_t s_bal_success; /* we found long enough chunks */
55117- atomic_t s_bal_allocated; /* in blocks */
55118- atomic_t s_bal_ex_scanned; /* total extents scanned */
55119- atomic_t s_bal_goals; /* goal hits */
55120- atomic_t s_bal_breaks; /* too long searches */
55121- atomic_t s_bal_2orders; /* 2^order hits */
55122+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
55123+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
55124+ atomic_unchecked_t s_bal_allocated; /* in blocks */
55125+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
55126+ atomic_unchecked_t s_bal_goals; /* goal hits */
55127+ atomic_unchecked_t s_bal_breaks; /* too long searches */
55128+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
55129 spinlock_t s_bal_lock;
55130 unsigned long s_mb_buddies_generated;
55131 unsigned long long s_mb_generation_time;
55132- atomic_t s_mb_lost_chunks;
55133- atomic_t s_mb_preallocated;
55134- atomic_t s_mb_discarded;
55135+ atomic_unchecked_t s_mb_lost_chunks;
55136+ atomic_unchecked_t s_mb_preallocated;
55137+ atomic_unchecked_t s_mb_discarded;
55138 atomic_t s_lock_busy;
55139
55140 /* locality groups */
55141diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
55142index 59c6750..a549154 100644
55143--- a/fs/ext4/mballoc.c
55144+++ b/fs/ext4/mballoc.c
55145@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
55146 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
55147
55148 if (EXT4_SB(sb)->s_mb_stats)
55149- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
55150+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
55151
55152 break;
55153 }
55154@@ -2170,7 +2170,7 @@ repeat:
55155 ac->ac_status = AC_STATUS_CONTINUE;
55156 ac->ac_flags |= EXT4_MB_HINT_FIRST;
55157 cr = 3;
55158- atomic_inc(&sbi->s_mb_lost_chunks);
55159+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
55160 goto repeat;
55161 }
55162 }
55163@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
55164 if (sbi->s_mb_stats) {
55165 ext4_msg(sb, KERN_INFO,
55166 "mballoc: %u blocks %u reqs (%u success)",
55167- atomic_read(&sbi->s_bal_allocated),
55168- atomic_read(&sbi->s_bal_reqs),
55169- atomic_read(&sbi->s_bal_success));
55170+ atomic_read_unchecked(&sbi->s_bal_allocated),
55171+ atomic_read_unchecked(&sbi->s_bal_reqs),
55172+ atomic_read_unchecked(&sbi->s_bal_success));
55173 ext4_msg(sb, KERN_INFO,
55174 "mballoc: %u extents scanned, %u goal hits, "
55175 "%u 2^N hits, %u breaks, %u lost",
55176- atomic_read(&sbi->s_bal_ex_scanned),
55177- atomic_read(&sbi->s_bal_goals),
55178- atomic_read(&sbi->s_bal_2orders),
55179- atomic_read(&sbi->s_bal_breaks),
55180- atomic_read(&sbi->s_mb_lost_chunks));
55181+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
55182+ atomic_read_unchecked(&sbi->s_bal_goals),
55183+ atomic_read_unchecked(&sbi->s_bal_2orders),
55184+ atomic_read_unchecked(&sbi->s_bal_breaks),
55185+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
55186 ext4_msg(sb, KERN_INFO,
55187 "mballoc: %lu generated and it took %Lu",
55188 sbi->s_mb_buddies_generated,
55189 sbi->s_mb_generation_time);
55190 ext4_msg(sb, KERN_INFO,
55191 "mballoc: %u preallocated, %u discarded",
55192- atomic_read(&sbi->s_mb_preallocated),
55193- atomic_read(&sbi->s_mb_discarded));
55194+ atomic_read_unchecked(&sbi->s_mb_preallocated),
55195+ atomic_read_unchecked(&sbi->s_mb_discarded));
55196 }
55197
55198 free_percpu(sbi->s_locality_groups);
55199@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
55200 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
55201
55202 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
55203- atomic_inc(&sbi->s_bal_reqs);
55204- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
55205+ atomic_inc_unchecked(&sbi->s_bal_reqs);
55206+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
55207 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
55208- atomic_inc(&sbi->s_bal_success);
55209- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
55210+ atomic_inc_unchecked(&sbi->s_bal_success);
55211+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
55212 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
55213 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
55214- atomic_inc(&sbi->s_bal_goals);
55215+ atomic_inc_unchecked(&sbi->s_bal_goals);
55216 if (ac->ac_found > sbi->s_mb_max_to_scan)
55217- atomic_inc(&sbi->s_bal_breaks);
55218+ atomic_inc_unchecked(&sbi->s_bal_breaks);
55219 }
55220
55221 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
55222@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
55223 trace_ext4_mb_new_inode_pa(ac, pa);
55224
55225 ext4_mb_use_inode_pa(ac, pa);
55226- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
55227+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
55228
55229 ei = EXT4_I(ac->ac_inode);
55230 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
55231@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
55232 trace_ext4_mb_new_group_pa(ac, pa);
55233
55234 ext4_mb_use_group_pa(ac, pa);
55235- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
55236+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
55237
55238 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
55239 lg = ac->ac_lg;
55240@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
55241 * from the bitmap and continue.
55242 */
55243 }
55244- atomic_add(free, &sbi->s_mb_discarded);
55245+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
55246
55247 return err;
55248 }
55249@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
55250 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
55251 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
55252 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
55253- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
55254+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
55255 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
55256
55257 return 0;
55258diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
55259index 214461e..3614c89 100644
55260--- a/fs/ext4/mmp.c
55261+++ b/fs/ext4/mmp.c
55262@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
55263 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
55264 const char *function, unsigned int line, const char *msg)
55265 {
55266- __ext4_warning(sb, function, line, msg);
55267+ __ext4_warning(sb, function, line, "%s", msg);
55268 __ext4_warning(sb, function, line,
55269 "MMP failure info: last update time: %llu, last update "
55270 "node: %s, last update device: %s\n",
55271diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
55272index 49d3c01..9579efd 100644
55273--- a/fs/ext4/resize.c
55274+++ b/fs/ext4/resize.c
55275@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
55276 ext4_fsblk_t end = start + input->blocks_count;
55277 ext4_group_t group = input->group;
55278 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
55279- unsigned overhead = ext4_group_overhead_blocks(sb, group);
55280- ext4_fsblk_t metaend = start + overhead;
55281+ unsigned overhead;
55282+ ext4_fsblk_t metaend;
55283 struct buffer_head *bh = NULL;
55284 ext4_grpblk_t free_blocks_count, offset;
55285 int err = -EINVAL;
55286
55287+ if (group != sbi->s_groups_count) {
55288+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
55289+ input->group, sbi->s_groups_count);
55290+ return -EINVAL;
55291+ }
55292+
55293+ overhead = ext4_group_overhead_blocks(sb, group);
55294+ metaend = start + overhead;
55295 input->free_blocks_count = free_blocks_count =
55296 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
55297
55298@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
55299 free_blocks_count, input->reserved_blocks);
55300
55301 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
55302- if (group != sbi->s_groups_count)
55303- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
55304- input->group, sbi->s_groups_count);
55305- else if (offset != 0)
55306+ if (offset != 0)
55307 ext4_warning(sb, "Last group not full");
55308 else if (input->reserved_blocks > input->blocks_count / 5)
55309 ext4_warning(sb, "Reserved blocks too high (%u)",
55310diff --git a/fs/ext4/super.c b/fs/ext4/super.c
55311index 3f7c39e..227f24f 100644
55312--- a/fs/ext4/super.c
55313+++ b/fs/ext4/super.c
55314@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
55315 }
55316
55317 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
55318-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
55319+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
55320 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
55321
55322 #ifdef CONFIG_QUOTA
55323@@ -2372,7 +2372,7 @@ struct ext4_attr {
55324 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
55325 const char *, size_t);
55326 int offset;
55327-};
55328+} __do_const;
55329
55330 static int parse_strtoull(const char *buf,
55331 unsigned long long max, unsigned long long *value)
55332diff --git a/fs/fcntl.c b/fs/fcntl.c
55333index 6599222..e7bf0de 100644
55334--- a/fs/fcntl.c
55335+++ b/fs/fcntl.c
55336@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
55337 if (err)
55338 return err;
55339
55340+ if (gr_handle_chroot_fowner(pid, type))
55341+ return -ENOENT;
55342+ if (gr_check_protected_task_fowner(pid, type))
55343+ return -EACCES;
55344+
55345 f_modown(filp, pid, type, force);
55346 return 0;
55347 }
55348diff --git a/fs/fhandle.c b/fs/fhandle.c
55349index 999ff5c..41f4109 100644
55350--- a/fs/fhandle.c
55351+++ b/fs/fhandle.c
55352@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
55353 } else
55354 retval = 0;
55355 /* copy the mount id */
55356- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
55357- sizeof(*mnt_id)) ||
55358+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
55359 copy_to_user(ufh, handle,
55360 sizeof(struct file_handle) + handle_bytes))
55361 retval = -EFAULT;
55362diff --git a/fs/file.c b/fs/file.c
55363index 4a78f98..9447397 100644
55364--- a/fs/file.c
55365+++ b/fs/file.c
55366@@ -16,6 +16,7 @@
55367 #include <linux/slab.h>
55368 #include <linux/vmalloc.h>
55369 #include <linux/file.h>
55370+#include <linux/security.h>
55371 #include <linux/fdtable.h>
55372 #include <linux/bitops.h>
55373 #include <linux/interrupt.h>
55374@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
55375 if (!file)
55376 return __close_fd(files, fd);
55377
55378+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
55379 if (fd >= rlimit(RLIMIT_NOFILE))
55380 return -EBADF;
55381
55382@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
55383 if (unlikely(oldfd == newfd))
55384 return -EINVAL;
55385
55386+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
55387 if (newfd >= rlimit(RLIMIT_NOFILE))
55388 return -EBADF;
55389
55390@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
55391 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
55392 {
55393 int err;
55394+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
55395 if (from >= rlimit(RLIMIT_NOFILE))
55396 return -EINVAL;
55397 err = alloc_fd(from, flags);
55398diff --git a/fs/filesystems.c b/fs/filesystems.c
55399index 92567d9..fcd8cbf 100644
55400--- a/fs/filesystems.c
55401+++ b/fs/filesystems.c
55402@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
55403 int len = dot ? dot - name : strlen(name);
55404
55405 fs = __get_fs_type(name, len);
55406+#ifdef CONFIG_GRKERNSEC_MODHARDEN
55407+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
55408+#else
55409 if (!fs && (request_module("fs-%.*s", len, name) == 0))
55410+#endif
55411 fs = __get_fs_type(name, len);
55412
55413 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
55414diff --git a/fs/fs_struct.c b/fs/fs_struct.c
55415index d8ac61d..79a36f0 100644
55416--- a/fs/fs_struct.c
55417+++ b/fs/fs_struct.c
55418@@ -4,6 +4,7 @@
55419 #include <linux/path.h>
55420 #include <linux/slab.h>
55421 #include <linux/fs_struct.h>
55422+#include <linux/grsecurity.h>
55423 #include "internal.h"
55424
55425 /*
55426@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
55427 write_seqcount_begin(&fs->seq);
55428 old_root = fs->root;
55429 fs->root = *path;
55430+ gr_set_chroot_entries(current, path);
55431 write_seqcount_end(&fs->seq);
55432 spin_unlock(&fs->lock);
55433 if (old_root.dentry)
55434@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
55435 int hits = 0;
55436 spin_lock(&fs->lock);
55437 write_seqcount_begin(&fs->seq);
55438+ /* this root replacement is only done by pivot_root,
55439+ leave grsec's chroot tagging alone for this task
55440+ so that a pivoted root isn't treated as a chroot
55441+ */
55442 hits += replace_path(&fs->root, old_root, new_root);
55443 hits += replace_path(&fs->pwd, old_root, new_root);
55444 write_seqcount_end(&fs->seq);
55445@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
55446 task_lock(tsk);
55447 spin_lock(&fs->lock);
55448 tsk->fs = NULL;
55449- kill = !--fs->users;
55450+ gr_clear_chroot_entries(tsk);
55451+ kill = !atomic_dec_return(&fs->users);
55452 spin_unlock(&fs->lock);
55453 task_unlock(tsk);
55454 if (kill)
55455@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
55456 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
55457 /* We don't need to lock fs - think why ;-) */
55458 if (fs) {
55459- fs->users = 1;
55460+ atomic_set(&fs->users, 1);
55461 fs->in_exec = 0;
55462 spin_lock_init(&fs->lock);
55463 seqcount_init(&fs->seq);
55464@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
55465 spin_lock(&old->lock);
55466 fs->root = old->root;
55467 path_get(&fs->root);
55468+ /* instead of calling gr_set_chroot_entries here,
55469+ we call it from every caller of this function
55470+ */
55471 fs->pwd = old->pwd;
55472 path_get(&fs->pwd);
55473 spin_unlock(&old->lock);
55474@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
55475
55476 task_lock(current);
55477 spin_lock(&fs->lock);
55478- kill = !--fs->users;
55479+ kill = !atomic_dec_return(&fs->users);
55480 current->fs = new_fs;
55481+ gr_set_chroot_entries(current, &new_fs->root);
55482 spin_unlock(&fs->lock);
55483 task_unlock(current);
55484
55485@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
55486
55487 int current_umask(void)
55488 {
55489- return current->fs->umask;
55490+ return current->fs->umask | gr_acl_umask();
55491 }
55492 EXPORT_SYMBOL(current_umask);
55493
55494 /* to be mentioned only in INIT_TASK */
55495 struct fs_struct init_fs = {
55496- .users = 1,
55497+ .users = ATOMIC_INIT(1),
55498 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
55499 .seq = SEQCNT_ZERO,
55500 .umask = 0022,
55501diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
55502index e2cba1f..17a25bb 100644
55503--- a/fs/fscache/cookie.c
55504+++ b/fs/fscache/cookie.c
55505@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
55506 parent ? (char *) parent->def->name : "<no-parent>",
55507 def->name, netfs_data);
55508
55509- fscache_stat(&fscache_n_acquires);
55510+ fscache_stat_unchecked(&fscache_n_acquires);
55511
55512 /* if there's no parent cookie, then we don't create one here either */
55513 if (!parent) {
55514- fscache_stat(&fscache_n_acquires_null);
55515+ fscache_stat_unchecked(&fscache_n_acquires_null);
55516 _leave(" [no parent]");
55517 return NULL;
55518 }
55519@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
55520 /* allocate and initialise a cookie */
55521 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
55522 if (!cookie) {
55523- fscache_stat(&fscache_n_acquires_oom);
55524+ fscache_stat_unchecked(&fscache_n_acquires_oom);
55525 _leave(" [ENOMEM]");
55526 return NULL;
55527 }
55528@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
55529
55530 switch (cookie->def->type) {
55531 case FSCACHE_COOKIE_TYPE_INDEX:
55532- fscache_stat(&fscache_n_cookie_index);
55533+ fscache_stat_unchecked(&fscache_n_cookie_index);
55534 break;
55535 case FSCACHE_COOKIE_TYPE_DATAFILE:
55536- fscache_stat(&fscache_n_cookie_data);
55537+ fscache_stat_unchecked(&fscache_n_cookie_data);
55538 break;
55539 default:
55540- fscache_stat(&fscache_n_cookie_special);
55541+ fscache_stat_unchecked(&fscache_n_cookie_special);
55542 break;
55543 }
55544
55545@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
55546 if (fscache_acquire_non_index_cookie(cookie) < 0) {
55547 atomic_dec(&parent->n_children);
55548 __fscache_cookie_put(cookie);
55549- fscache_stat(&fscache_n_acquires_nobufs);
55550+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
55551 _leave(" = NULL");
55552 return NULL;
55553 }
55554 }
55555
55556- fscache_stat(&fscache_n_acquires_ok);
55557+ fscache_stat_unchecked(&fscache_n_acquires_ok);
55558 _leave(" = %p", cookie);
55559 return cookie;
55560 }
55561@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
55562 cache = fscache_select_cache_for_object(cookie->parent);
55563 if (!cache) {
55564 up_read(&fscache_addremove_sem);
55565- fscache_stat(&fscache_n_acquires_no_cache);
55566+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
55567 _leave(" = -ENOMEDIUM [no cache]");
55568 return -ENOMEDIUM;
55569 }
55570@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
55571 object = cache->ops->alloc_object(cache, cookie);
55572 fscache_stat_d(&fscache_n_cop_alloc_object);
55573 if (IS_ERR(object)) {
55574- fscache_stat(&fscache_n_object_no_alloc);
55575+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
55576 ret = PTR_ERR(object);
55577 goto error;
55578 }
55579
55580- fscache_stat(&fscache_n_object_alloc);
55581+ fscache_stat_unchecked(&fscache_n_object_alloc);
55582
55583 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
55584
55585@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
55586
55587 _enter("{%s}", cookie->def->name);
55588
55589- fscache_stat(&fscache_n_invalidates);
55590+ fscache_stat_unchecked(&fscache_n_invalidates);
55591
55592 /* Only permit invalidation of data files. Invalidating an index will
55593 * require the caller to release all its attachments to the tree rooted
55594@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
55595 {
55596 struct fscache_object *object;
55597
55598- fscache_stat(&fscache_n_updates);
55599+ fscache_stat_unchecked(&fscache_n_updates);
55600
55601 if (!cookie) {
55602- fscache_stat(&fscache_n_updates_null);
55603+ fscache_stat_unchecked(&fscache_n_updates_null);
55604 _leave(" [no cookie]");
55605 return;
55606 }
55607@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
55608 struct fscache_object *object;
55609 unsigned long event;
55610
55611- fscache_stat(&fscache_n_relinquishes);
55612+ fscache_stat_unchecked(&fscache_n_relinquishes);
55613 if (retire)
55614- fscache_stat(&fscache_n_relinquishes_retire);
55615+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
55616
55617 if (!cookie) {
55618- fscache_stat(&fscache_n_relinquishes_null);
55619+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
55620 _leave(" [no cookie]");
55621 return;
55622 }
55623@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
55624
55625 /* wait for the cookie to finish being instantiated (or to fail) */
55626 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
55627- fscache_stat(&fscache_n_relinquishes_waitcrt);
55628+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
55629 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
55630 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
55631 }
55632diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
55633index ee38fef..0a326d4 100644
55634--- a/fs/fscache/internal.h
55635+++ b/fs/fscache/internal.h
55636@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
55637 * stats.c
55638 */
55639 #ifdef CONFIG_FSCACHE_STATS
55640-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
55641-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
55642+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
55643+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
55644
55645-extern atomic_t fscache_n_op_pend;
55646-extern atomic_t fscache_n_op_run;
55647-extern atomic_t fscache_n_op_enqueue;
55648-extern atomic_t fscache_n_op_deferred_release;
55649-extern atomic_t fscache_n_op_release;
55650-extern atomic_t fscache_n_op_gc;
55651-extern atomic_t fscache_n_op_cancelled;
55652-extern atomic_t fscache_n_op_rejected;
55653+extern atomic_unchecked_t fscache_n_op_pend;
55654+extern atomic_unchecked_t fscache_n_op_run;
55655+extern atomic_unchecked_t fscache_n_op_enqueue;
55656+extern atomic_unchecked_t fscache_n_op_deferred_release;
55657+extern atomic_unchecked_t fscache_n_op_release;
55658+extern atomic_unchecked_t fscache_n_op_gc;
55659+extern atomic_unchecked_t fscache_n_op_cancelled;
55660+extern atomic_unchecked_t fscache_n_op_rejected;
55661
55662-extern atomic_t fscache_n_attr_changed;
55663-extern atomic_t fscache_n_attr_changed_ok;
55664-extern atomic_t fscache_n_attr_changed_nobufs;
55665-extern atomic_t fscache_n_attr_changed_nomem;
55666-extern atomic_t fscache_n_attr_changed_calls;
55667+extern atomic_unchecked_t fscache_n_attr_changed;
55668+extern atomic_unchecked_t fscache_n_attr_changed_ok;
55669+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
55670+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
55671+extern atomic_unchecked_t fscache_n_attr_changed_calls;
55672
55673-extern atomic_t fscache_n_allocs;
55674-extern atomic_t fscache_n_allocs_ok;
55675-extern atomic_t fscache_n_allocs_wait;
55676-extern atomic_t fscache_n_allocs_nobufs;
55677-extern atomic_t fscache_n_allocs_intr;
55678-extern atomic_t fscache_n_allocs_object_dead;
55679-extern atomic_t fscache_n_alloc_ops;
55680-extern atomic_t fscache_n_alloc_op_waits;
55681+extern atomic_unchecked_t fscache_n_allocs;
55682+extern atomic_unchecked_t fscache_n_allocs_ok;
55683+extern atomic_unchecked_t fscache_n_allocs_wait;
55684+extern atomic_unchecked_t fscache_n_allocs_nobufs;
55685+extern atomic_unchecked_t fscache_n_allocs_intr;
55686+extern atomic_unchecked_t fscache_n_allocs_object_dead;
55687+extern atomic_unchecked_t fscache_n_alloc_ops;
55688+extern atomic_unchecked_t fscache_n_alloc_op_waits;
55689
55690-extern atomic_t fscache_n_retrievals;
55691-extern atomic_t fscache_n_retrievals_ok;
55692-extern atomic_t fscache_n_retrievals_wait;
55693-extern atomic_t fscache_n_retrievals_nodata;
55694-extern atomic_t fscache_n_retrievals_nobufs;
55695-extern atomic_t fscache_n_retrievals_intr;
55696-extern atomic_t fscache_n_retrievals_nomem;
55697-extern atomic_t fscache_n_retrievals_object_dead;
55698-extern atomic_t fscache_n_retrieval_ops;
55699-extern atomic_t fscache_n_retrieval_op_waits;
55700+extern atomic_unchecked_t fscache_n_retrievals;
55701+extern atomic_unchecked_t fscache_n_retrievals_ok;
55702+extern atomic_unchecked_t fscache_n_retrievals_wait;
55703+extern atomic_unchecked_t fscache_n_retrievals_nodata;
55704+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
55705+extern atomic_unchecked_t fscache_n_retrievals_intr;
55706+extern atomic_unchecked_t fscache_n_retrievals_nomem;
55707+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
55708+extern atomic_unchecked_t fscache_n_retrieval_ops;
55709+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
55710
55711-extern atomic_t fscache_n_stores;
55712-extern atomic_t fscache_n_stores_ok;
55713-extern atomic_t fscache_n_stores_again;
55714-extern atomic_t fscache_n_stores_nobufs;
55715-extern atomic_t fscache_n_stores_oom;
55716-extern atomic_t fscache_n_store_ops;
55717-extern atomic_t fscache_n_store_calls;
55718-extern atomic_t fscache_n_store_pages;
55719-extern atomic_t fscache_n_store_radix_deletes;
55720-extern atomic_t fscache_n_store_pages_over_limit;
55721+extern atomic_unchecked_t fscache_n_stores;
55722+extern atomic_unchecked_t fscache_n_stores_ok;
55723+extern atomic_unchecked_t fscache_n_stores_again;
55724+extern atomic_unchecked_t fscache_n_stores_nobufs;
55725+extern atomic_unchecked_t fscache_n_stores_oom;
55726+extern atomic_unchecked_t fscache_n_store_ops;
55727+extern atomic_unchecked_t fscache_n_store_calls;
55728+extern atomic_unchecked_t fscache_n_store_pages;
55729+extern atomic_unchecked_t fscache_n_store_radix_deletes;
55730+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
55731
55732-extern atomic_t fscache_n_store_vmscan_not_storing;
55733-extern atomic_t fscache_n_store_vmscan_gone;
55734-extern atomic_t fscache_n_store_vmscan_busy;
55735-extern atomic_t fscache_n_store_vmscan_cancelled;
55736-extern atomic_t fscache_n_store_vmscan_wait;
55737+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
55738+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
55739+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
55740+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
55741+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
55742
55743-extern atomic_t fscache_n_marks;
55744-extern atomic_t fscache_n_uncaches;
55745+extern atomic_unchecked_t fscache_n_marks;
55746+extern atomic_unchecked_t fscache_n_uncaches;
55747
55748-extern atomic_t fscache_n_acquires;
55749-extern atomic_t fscache_n_acquires_null;
55750-extern atomic_t fscache_n_acquires_no_cache;
55751-extern atomic_t fscache_n_acquires_ok;
55752-extern atomic_t fscache_n_acquires_nobufs;
55753-extern atomic_t fscache_n_acquires_oom;
55754+extern atomic_unchecked_t fscache_n_acquires;
55755+extern atomic_unchecked_t fscache_n_acquires_null;
55756+extern atomic_unchecked_t fscache_n_acquires_no_cache;
55757+extern atomic_unchecked_t fscache_n_acquires_ok;
55758+extern atomic_unchecked_t fscache_n_acquires_nobufs;
55759+extern atomic_unchecked_t fscache_n_acquires_oom;
55760
55761-extern atomic_t fscache_n_invalidates;
55762-extern atomic_t fscache_n_invalidates_run;
55763+extern atomic_unchecked_t fscache_n_invalidates;
55764+extern atomic_unchecked_t fscache_n_invalidates_run;
55765
55766-extern atomic_t fscache_n_updates;
55767-extern atomic_t fscache_n_updates_null;
55768-extern atomic_t fscache_n_updates_run;
55769+extern atomic_unchecked_t fscache_n_updates;
55770+extern atomic_unchecked_t fscache_n_updates_null;
55771+extern atomic_unchecked_t fscache_n_updates_run;
55772
55773-extern atomic_t fscache_n_relinquishes;
55774-extern atomic_t fscache_n_relinquishes_null;
55775-extern atomic_t fscache_n_relinquishes_waitcrt;
55776-extern atomic_t fscache_n_relinquishes_retire;
55777+extern atomic_unchecked_t fscache_n_relinquishes;
55778+extern atomic_unchecked_t fscache_n_relinquishes_null;
55779+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
55780+extern atomic_unchecked_t fscache_n_relinquishes_retire;
55781
55782-extern atomic_t fscache_n_cookie_index;
55783-extern atomic_t fscache_n_cookie_data;
55784-extern atomic_t fscache_n_cookie_special;
55785+extern atomic_unchecked_t fscache_n_cookie_index;
55786+extern atomic_unchecked_t fscache_n_cookie_data;
55787+extern atomic_unchecked_t fscache_n_cookie_special;
55788
55789-extern atomic_t fscache_n_object_alloc;
55790-extern atomic_t fscache_n_object_no_alloc;
55791-extern atomic_t fscache_n_object_lookups;
55792-extern atomic_t fscache_n_object_lookups_negative;
55793-extern atomic_t fscache_n_object_lookups_positive;
55794-extern atomic_t fscache_n_object_lookups_timed_out;
55795-extern atomic_t fscache_n_object_created;
55796-extern atomic_t fscache_n_object_avail;
55797-extern atomic_t fscache_n_object_dead;
55798+extern atomic_unchecked_t fscache_n_object_alloc;
55799+extern atomic_unchecked_t fscache_n_object_no_alloc;
55800+extern atomic_unchecked_t fscache_n_object_lookups;
55801+extern atomic_unchecked_t fscache_n_object_lookups_negative;
55802+extern atomic_unchecked_t fscache_n_object_lookups_positive;
55803+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
55804+extern atomic_unchecked_t fscache_n_object_created;
55805+extern atomic_unchecked_t fscache_n_object_avail;
55806+extern atomic_unchecked_t fscache_n_object_dead;
55807
55808-extern atomic_t fscache_n_checkaux_none;
55809-extern atomic_t fscache_n_checkaux_okay;
55810-extern atomic_t fscache_n_checkaux_update;
55811-extern atomic_t fscache_n_checkaux_obsolete;
55812+extern atomic_unchecked_t fscache_n_checkaux_none;
55813+extern atomic_unchecked_t fscache_n_checkaux_okay;
55814+extern atomic_unchecked_t fscache_n_checkaux_update;
55815+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
55816
55817 extern atomic_t fscache_n_cop_alloc_object;
55818 extern atomic_t fscache_n_cop_lookup_object;
55819@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
55820 atomic_inc(stat);
55821 }
55822
55823+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
55824+{
55825+ atomic_inc_unchecked(stat);
55826+}
55827+
55828 static inline void fscache_stat_d(atomic_t *stat)
55829 {
55830 atomic_dec(stat);
55831@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
55832
55833 #define __fscache_stat(stat) (NULL)
55834 #define fscache_stat(stat) do {} while (0)
55835+#define fscache_stat_unchecked(stat) do {} while (0)
55836 #define fscache_stat_d(stat) do {} while (0)
55837 #endif
55838
55839diff --git a/fs/fscache/object.c b/fs/fscache/object.c
55840index 50d41c1..10ee117 100644
55841--- a/fs/fscache/object.c
55842+++ b/fs/fscache/object.c
55843@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55844 /* Invalidate an object on disk */
55845 case FSCACHE_OBJECT_INVALIDATING:
55846 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
55847- fscache_stat(&fscache_n_invalidates_run);
55848+ fscache_stat_unchecked(&fscache_n_invalidates_run);
55849 fscache_stat(&fscache_n_cop_invalidate_object);
55850 fscache_invalidate_object(object);
55851 fscache_stat_d(&fscache_n_cop_invalidate_object);
55852@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55853 /* update the object metadata on disk */
55854 case FSCACHE_OBJECT_UPDATING:
55855 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
55856- fscache_stat(&fscache_n_updates_run);
55857+ fscache_stat_unchecked(&fscache_n_updates_run);
55858 fscache_stat(&fscache_n_cop_update_object);
55859 object->cache->ops->update_object(object);
55860 fscache_stat_d(&fscache_n_cop_update_object);
55861@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55862 spin_lock(&object->lock);
55863 object->state = FSCACHE_OBJECT_DEAD;
55864 spin_unlock(&object->lock);
55865- fscache_stat(&fscache_n_object_dead);
55866+ fscache_stat_unchecked(&fscache_n_object_dead);
55867 goto terminal_transit;
55868
55869 /* handle the parent cache of this object being withdrawn from
55870@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55871 spin_lock(&object->lock);
55872 object->state = FSCACHE_OBJECT_DEAD;
55873 spin_unlock(&object->lock);
55874- fscache_stat(&fscache_n_object_dead);
55875+ fscache_stat_unchecked(&fscache_n_object_dead);
55876 goto terminal_transit;
55877
55878 /* complain about the object being woken up once it is
55879@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
55880 parent->cookie->def->name, cookie->def->name,
55881 object->cache->tag->name);
55882
55883- fscache_stat(&fscache_n_object_lookups);
55884+ fscache_stat_unchecked(&fscache_n_object_lookups);
55885 fscache_stat(&fscache_n_cop_lookup_object);
55886 ret = object->cache->ops->lookup_object(object);
55887 fscache_stat_d(&fscache_n_cop_lookup_object);
55888@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
55889 if (ret == -ETIMEDOUT) {
55890 /* probably stuck behind another object, so move this one to
55891 * the back of the queue */
55892- fscache_stat(&fscache_n_object_lookups_timed_out);
55893+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
55894 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
55895 }
55896
55897@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
55898
55899 spin_lock(&object->lock);
55900 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
55901- fscache_stat(&fscache_n_object_lookups_negative);
55902+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
55903
55904 /* transit here to allow write requests to begin stacking up
55905 * and read requests to begin returning ENODATA */
55906@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
55907 * result, in which case there may be data available */
55908 spin_lock(&object->lock);
55909 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
55910- fscache_stat(&fscache_n_object_lookups_positive);
55911+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
55912
55913 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
55914
55915@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
55916 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
55917 } else {
55918 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
55919- fscache_stat(&fscache_n_object_created);
55920+ fscache_stat_unchecked(&fscache_n_object_created);
55921
55922 object->state = FSCACHE_OBJECT_AVAILABLE;
55923 spin_unlock(&object->lock);
55924@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
55925 fscache_enqueue_dependents(object);
55926
55927 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
55928- fscache_stat(&fscache_n_object_avail);
55929+ fscache_stat_unchecked(&fscache_n_object_avail);
55930
55931 _leave("");
55932 }
55933@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
55934 enum fscache_checkaux result;
55935
55936 if (!object->cookie->def->check_aux) {
55937- fscache_stat(&fscache_n_checkaux_none);
55938+ fscache_stat_unchecked(&fscache_n_checkaux_none);
55939 return FSCACHE_CHECKAUX_OKAY;
55940 }
55941
55942@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
55943 switch (result) {
55944 /* entry okay as is */
55945 case FSCACHE_CHECKAUX_OKAY:
55946- fscache_stat(&fscache_n_checkaux_okay);
55947+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
55948 break;
55949
55950 /* entry requires update */
55951 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
55952- fscache_stat(&fscache_n_checkaux_update);
55953+ fscache_stat_unchecked(&fscache_n_checkaux_update);
55954 break;
55955
55956 /* entry requires deletion */
55957 case FSCACHE_CHECKAUX_OBSOLETE:
55958- fscache_stat(&fscache_n_checkaux_obsolete);
55959+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
55960 break;
55961
55962 default:
55963diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
55964index 762a9ec..2023284 100644
55965--- a/fs/fscache/operation.c
55966+++ b/fs/fscache/operation.c
55967@@ -17,7 +17,7 @@
55968 #include <linux/slab.h>
55969 #include "internal.h"
55970
55971-atomic_t fscache_op_debug_id;
55972+atomic_unchecked_t fscache_op_debug_id;
55973 EXPORT_SYMBOL(fscache_op_debug_id);
55974
55975 /**
55976@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
55977 ASSERTCMP(atomic_read(&op->usage), >, 0);
55978 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
55979
55980- fscache_stat(&fscache_n_op_enqueue);
55981+ fscache_stat_unchecked(&fscache_n_op_enqueue);
55982 switch (op->flags & FSCACHE_OP_TYPE) {
55983 case FSCACHE_OP_ASYNC:
55984 _debug("queue async");
55985@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
55986 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
55987 if (op->processor)
55988 fscache_enqueue_operation(op);
55989- fscache_stat(&fscache_n_op_run);
55990+ fscache_stat_unchecked(&fscache_n_op_run);
55991 }
55992
55993 /*
55994@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
55995 if (object->n_in_progress > 0) {
55996 atomic_inc(&op->usage);
55997 list_add_tail(&op->pend_link, &object->pending_ops);
55998- fscache_stat(&fscache_n_op_pend);
55999+ fscache_stat_unchecked(&fscache_n_op_pend);
56000 } else if (!list_empty(&object->pending_ops)) {
56001 atomic_inc(&op->usage);
56002 list_add_tail(&op->pend_link, &object->pending_ops);
56003- fscache_stat(&fscache_n_op_pend);
56004+ fscache_stat_unchecked(&fscache_n_op_pend);
56005 fscache_start_operations(object);
56006 } else {
56007 ASSERTCMP(object->n_in_progress, ==, 0);
56008@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
56009 object->n_exclusive++; /* reads and writes must wait */
56010 atomic_inc(&op->usage);
56011 list_add_tail(&op->pend_link, &object->pending_ops);
56012- fscache_stat(&fscache_n_op_pend);
56013+ fscache_stat_unchecked(&fscache_n_op_pend);
56014 ret = 0;
56015 } else {
56016 /* If we're in any other state, there must have been an I/O
56017@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
56018 if (object->n_exclusive > 0) {
56019 atomic_inc(&op->usage);
56020 list_add_tail(&op->pend_link, &object->pending_ops);
56021- fscache_stat(&fscache_n_op_pend);
56022+ fscache_stat_unchecked(&fscache_n_op_pend);
56023 } else if (!list_empty(&object->pending_ops)) {
56024 atomic_inc(&op->usage);
56025 list_add_tail(&op->pend_link, &object->pending_ops);
56026- fscache_stat(&fscache_n_op_pend);
56027+ fscache_stat_unchecked(&fscache_n_op_pend);
56028 fscache_start_operations(object);
56029 } else {
56030 ASSERTCMP(object->n_exclusive, ==, 0);
56031@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
56032 object->n_ops++;
56033 atomic_inc(&op->usage);
56034 list_add_tail(&op->pend_link, &object->pending_ops);
56035- fscache_stat(&fscache_n_op_pend);
56036+ fscache_stat_unchecked(&fscache_n_op_pend);
56037 ret = 0;
56038 } else if (object->state == FSCACHE_OBJECT_DYING ||
56039 object->state == FSCACHE_OBJECT_LC_DYING ||
56040 object->state == FSCACHE_OBJECT_WITHDRAWING) {
56041- fscache_stat(&fscache_n_op_rejected);
56042+ fscache_stat_unchecked(&fscache_n_op_rejected);
56043 op->state = FSCACHE_OP_ST_CANCELLED;
56044 ret = -ENOBUFS;
56045 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
56046@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
56047 ret = -EBUSY;
56048 if (op->state == FSCACHE_OP_ST_PENDING) {
56049 ASSERT(!list_empty(&op->pend_link));
56050- fscache_stat(&fscache_n_op_cancelled);
56051+ fscache_stat_unchecked(&fscache_n_op_cancelled);
56052 list_del_init(&op->pend_link);
56053 if (do_cancel)
56054 do_cancel(op);
56055@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
56056 while (!list_empty(&object->pending_ops)) {
56057 op = list_entry(object->pending_ops.next,
56058 struct fscache_operation, pend_link);
56059- fscache_stat(&fscache_n_op_cancelled);
56060+ fscache_stat_unchecked(&fscache_n_op_cancelled);
56061 list_del_init(&op->pend_link);
56062
56063 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
56064@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
56065 op->state, ==, FSCACHE_OP_ST_CANCELLED);
56066 op->state = FSCACHE_OP_ST_DEAD;
56067
56068- fscache_stat(&fscache_n_op_release);
56069+ fscache_stat_unchecked(&fscache_n_op_release);
56070
56071 if (op->release) {
56072 op->release(op);
56073@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
56074 * lock, and defer it otherwise */
56075 if (!spin_trylock(&object->lock)) {
56076 _debug("defer put");
56077- fscache_stat(&fscache_n_op_deferred_release);
56078+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
56079
56080 cache = object->cache;
56081 spin_lock(&cache->op_gc_list_lock);
56082@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
56083
56084 _debug("GC DEFERRED REL OBJ%x OP%x",
56085 object->debug_id, op->debug_id);
56086- fscache_stat(&fscache_n_op_gc);
56087+ fscache_stat_unchecked(&fscache_n_op_gc);
56088
56089 ASSERTCMP(atomic_read(&op->usage), ==, 0);
56090 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
56091diff --git a/fs/fscache/page.c b/fs/fscache/page.c
56092index ff000e5..c44ec6d 100644
56093--- a/fs/fscache/page.c
56094+++ b/fs/fscache/page.c
56095@@ -61,7 +61,7 @@ try_again:
56096 val = radix_tree_lookup(&cookie->stores, page->index);
56097 if (!val) {
56098 rcu_read_unlock();
56099- fscache_stat(&fscache_n_store_vmscan_not_storing);
56100+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
56101 __fscache_uncache_page(cookie, page);
56102 return true;
56103 }
56104@@ -91,11 +91,11 @@ try_again:
56105 spin_unlock(&cookie->stores_lock);
56106
56107 if (xpage) {
56108- fscache_stat(&fscache_n_store_vmscan_cancelled);
56109- fscache_stat(&fscache_n_store_radix_deletes);
56110+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
56111+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
56112 ASSERTCMP(xpage, ==, page);
56113 } else {
56114- fscache_stat(&fscache_n_store_vmscan_gone);
56115+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
56116 }
56117
56118 wake_up_bit(&cookie->flags, 0);
56119@@ -110,11 +110,11 @@ page_busy:
56120 * sleeping on memory allocation, so we may need to impose a timeout
56121 * too. */
56122 if (!(gfp & __GFP_WAIT)) {
56123- fscache_stat(&fscache_n_store_vmscan_busy);
56124+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
56125 return false;
56126 }
56127
56128- fscache_stat(&fscache_n_store_vmscan_wait);
56129+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
56130 __fscache_wait_on_page_write(cookie, page);
56131 gfp &= ~__GFP_WAIT;
56132 goto try_again;
56133@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
56134 FSCACHE_COOKIE_STORING_TAG);
56135 if (!radix_tree_tag_get(&cookie->stores, page->index,
56136 FSCACHE_COOKIE_PENDING_TAG)) {
56137- fscache_stat(&fscache_n_store_radix_deletes);
56138+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
56139 xpage = radix_tree_delete(&cookie->stores, page->index);
56140 }
56141 spin_unlock(&cookie->stores_lock);
56142@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
56143
56144 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
56145
56146- fscache_stat(&fscache_n_attr_changed_calls);
56147+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
56148
56149 if (fscache_object_is_active(object)) {
56150 fscache_stat(&fscache_n_cop_attr_changed);
56151@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56152
56153 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56154
56155- fscache_stat(&fscache_n_attr_changed);
56156+ fscache_stat_unchecked(&fscache_n_attr_changed);
56157
56158 op = kzalloc(sizeof(*op), GFP_KERNEL);
56159 if (!op) {
56160- fscache_stat(&fscache_n_attr_changed_nomem);
56161+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
56162 _leave(" = -ENOMEM");
56163 return -ENOMEM;
56164 }
56165@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56166 if (fscache_submit_exclusive_op(object, op) < 0)
56167 goto nobufs;
56168 spin_unlock(&cookie->lock);
56169- fscache_stat(&fscache_n_attr_changed_ok);
56170+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
56171 fscache_put_operation(op);
56172 _leave(" = 0");
56173 return 0;
56174@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56175 nobufs:
56176 spin_unlock(&cookie->lock);
56177 kfree(op);
56178- fscache_stat(&fscache_n_attr_changed_nobufs);
56179+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
56180 _leave(" = %d", -ENOBUFS);
56181 return -ENOBUFS;
56182 }
56183@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
56184 /* allocate a retrieval operation and attempt to submit it */
56185 op = kzalloc(sizeof(*op), GFP_NOIO);
56186 if (!op) {
56187- fscache_stat(&fscache_n_retrievals_nomem);
56188+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56189 return NULL;
56190 }
56191
56192@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
56193 return 0;
56194 }
56195
56196- fscache_stat(&fscache_n_retrievals_wait);
56197+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
56198
56199 jif = jiffies;
56200 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
56201 fscache_wait_bit_interruptible,
56202 TASK_INTERRUPTIBLE) != 0) {
56203- fscache_stat(&fscache_n_retrievals_intr);
56204+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56205 _leave(" = -ERESTARTSYS");
56206 return -ERESTARTSYS;
56207 }
56208@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
56209 */
56210 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56211 struct fscache_retrieval *op,
56212- atomic_t *stat_op_waits,
56213- atomic_t *stat_object_dead)
56214+ atomic_unchecked_t *stat_op_waits,
56215+ atomic_unchecked_t *stat_object_dead)
56216 {
56217 int ret;
56218
56219@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56220 goto check_if_dead;
56221
56222 _debug(">>> WT");
56223- fscache_stat(stat_op_waits);
56224+ fscache_stat_unchecked(stat_op_waits);
56225 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
56226 fscache_wait_bit_interruptible,
56227 TASK_INTERRUPTIBLE) != 0) {
56228@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56229
56230 check_if_dead:
56231 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
56232- fscache_stat(stat_object_dead);
56233+ fscache_stat_unchecked(stat_object_dead);
56234 _leave(" = -ENOBUFS [cancelled]");
56235 return -ENOBUFS;
56236 }
56237 if (unlikely(fscache_object_is_dead(object))) {
56238 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
56239 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
56240- fscache_stat(stat_object_dead);
56241+ fscache_stat_unchecked(stat_object_dead);
56242 return -ENOBUFS;
56243 }
56244 return 0;
56245@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56246
56247 _enter("%p,%p,,,", cookie, page);
56248
56249- fscache_stat(&fscache_n_retrievals);
56250+ fscache_stat_unchecked(&fscache_n_retrievals);
56251
56252 if (hlist_empty(&cookie->backing_objects))
56253 goto nobufs;
56254@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56255 goto nobufs_unlock_dec;
56256 spin_unlock(&cookie->lock);
56257
56258- fscache_stat(&fscache_n_retrieval_ops);
56259+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
56260
56261 /* pin the netfs read context in case we need to do the actual netfs
56262 * read because we've encountered a cache read failure */
56263@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56264
56265 error:
56266 if (ret == -ENOMEM)
56267- fscache_stat(&fscache_n_retrievals_nomem);
56268+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56269 else if (ret == -ERESTARTSYS)
56270- fscache_stat(&fscache_n_retrievals_intr);
56271+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56272 else if (ret == -ENODATA)
56273- fscache_stat(&fscache_n_retrievals_nodata);
56274+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
56275 else if (ret < 0)
56276- fscache_stat(&fscache_n_retrievals_nobufs);
56277+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56278 else
56279- fscache_stat(&fscache_n_retrievals_ok);
56280+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
56281
56282 fscache_put_retrieval(op);
56283 _leave(" = %d", ret);
56284@@ -467,7 +467,7 @@ nobufs_unlock:
56285 spin_unlock(&cookie->lock);
56286 kfree(op);
56287 nobufs:
56288- fscache_stat(&fscache_n_retrievals_nobufs);
56289+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56290 _leave(" = -ENOBUFS");
56291 return -ENOBUFS;
56292 }
56293@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56294
56295 _enter("%p,,%d,,,", cookie, *nr_pages);
56296
56297- fscache_stat(&fscache_n_retrievals);
56298+ fscache_stat_unchecked(&fscache_n_retrievals);
56299
56300 if (hlist_empty(&cookie->backing_objects))
56301 goto nobufs;
56302@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56303 goto nobufs_unlock_dec;
56304 spin_unlock(&cookie->lock);
56305
56306- fscache_stat(&fscache_n_retrieval_ops);
56307+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
56308
56309 /* pin the netfs read context in case we need to do the actual netfs
56310 * read because we've encountered a cache read failure */
56311@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56312
56313 error:
56314 if (ret == -ENOMEM)
56315- fscache_stat(&fscache_n_retrievals_nomem);
56316+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56317 else if (ret == -ERESTARTSYS)
56318- fscache_stat(&fscache_n_retrievals_intr);
56319+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56320 else if (ret == -ENODATA)
56321- fscache_stat(&fscache_n_retrievals_nodata);
56322+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
56323 else if (ret < 0)
56324- fscache_stat(&fscache_n_retrievals_nobufs);
56325+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56326 else
56327- fscache_stat(&fscache_n_retrievals_ok);
56328+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
56329
56330 fscache_put_retrieval(op);
56331 _leave(" = %d", ret);
56332@@ -591,7 +591,7 @@ nobufs_unlock:
56333 spin_unlock(&cookie->lock);
56334 kfree(op);
56335 nobufs:
56336- fscache_stat(&fscache_n_retrievals_nobufs);
56337+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56338 _leave(" = -ENOBUFS");
56339 return -ENOBUFS;
56340 }
56341@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56342
56343 _enter("%p,%p,,,", cookie, page);
56344
56345- fscache_stat(&fscache_n_allocs);
56346+ fscache_stat_unchecked(&fscache_n_allocs);
56347
56348 if (hlist_empty(&cookie->backing_objects))
56349 goto nobufs;
56350@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56351 goto nobufs_unlock;
56352 spin_unlock(&cookie->lock);
56353
56354- fscache_stat(&fscache_n_alloc_ops);
56355+ fscache_stat_unchecked(&fscache_n_alloc_ops);
56356
56357 ret = fscache_wait_for_retrieval_activation(
56358 object, op,
56359@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56360
56361 error:
56362 if (ret == -ERESTARTSYS)
56363- fscache_stat(&fscache_n_allocs_intr);
56364+ fscache_stat_unchecked(&fscache_n_allocs_intr);
56365 else if (ret < 0)
56366- fscache_stat(&fscache_n_allocs_nobufs);
56367+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
56368 else
56369- fscache_stat(&fscache_n_allocs_ok);
56370+ fscache_stat_unchecked(&fscache_n_allocs_ok);
56371
56372 fscache_put_retrieval(op);
56373 _leave(" = %d", ret);
56374@@ -677,7 +677,7 @@ nobufs_unlock:
56375 spin_unlock(&cookie->lock);
56376 kfree(op);
56377 nobufs:
56378- fscache_stat(&fscache_n_allocs_nobufs);
56379+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
56380 _leave(" = -ENOBUFS");
56381 return -ENOBUFS;
56382 }
56383@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56384
56385 spin_lock(&cookie->stores_lock);
56386
56387- fscache_stat(&fscache_n_store_calls);
56388+ fscache_stat_unchecked(&fscache_n_store_calls);
56389
56390 /* find a page to store */
56391 page = NULL;
56392@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56393 page = results[0];
56394 _debug("gang %d [%lx]", n, page->index);
56395 if (page->index > op->store_limit) {
56396- fscache_stat(&fscache_n_store_pages_over_limit);
56397+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
56398 goto superseded;
56399 }
56400
56401@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56402 spin_unlock(&cookie->stores_lock);
56403 spin_unlock(&object->lock);
56404
56405- fscache_stat(&fscache_n_store_pages);
56406+ fscache_stat_unchecked(&fscache_n_store_pages);
56407 fscache_stat(&fscache_n_cop_write_page);
56408 ret = object->cache->ops->write_page(op, page);
56409 fscache_stat_d(&fscache_n_cop_write_page);
56410@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56411 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56412 ASSERT(PageFsCache(page));
56413
56414- fscache_stat(&fscache_n_stores);
56415+ fscache_stat_unchecked(&fscache_n_stores);
56416
56417 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
56418 _leave(" = -ENOBUFS [invalidating]");
56419@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56420 spin_unlock(&cookie->stores_lock);
56421 spin_unlock(&object->lock);
56422
56423- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
56424+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
56425 op->store_limit = object->store_limit;
56426
56427 if (fscache_submit_op(object, &op->op) < 0)
56428@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56429
56430 spin_unlock(&cookie->lock);
56431 radix_tree_preload_end();
56432- fscache_stat(&fscache_n_store_ops);
56433- fscache_stat(&fscache_n_stores_ok);
56434+ fscache_stat_unchecked(&fscache_n_store_ops);
56435+ fscache_stat_unchecked(&fscache_n_stores_ok);
56436
56437 /* the work queue now carries its own ref on the object */
56438 fscache_put_operation(&op->op);
56439@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56440 return 0;
56441
56442 already_queued:
56443- fscache_stat(&fscache_n_stores_again);
56444+ fscache_stat_unchecked(&fscache_n_stores_again);
56445 already_pending:
56446 spin_unlock(&cookie->stores_lock);
56447 spin_unlock(&object->lock);
56448 spin_unlock(&cookie->lock);
56449 radix_tree_preload_end();
56450 kfree(op);
56451- fscache_stat(&fscache_n_stores_ok);
56452+ fscache_stat_unchecked(&fscache_n_stores_ok);
56453 _leave(" = 0");
56454 return 0;
56455
56456@@ -959,14 +959,14 @@ nobufs:
56457 spin_unlock(&cookie->lock);
56458 radix_tree_preload_end();
56459 kfree(op);
56460- fscache_stat(&fscache_n_stores_nobufs);
56461+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
56462 _leave(" = -ENOBUFS");
56463 return -ENOBUFS;
56464
56465 nomem_free:
56466 kfree(op);
56467 nomem:
56468- fscache_stat(&fscache_n_stores_oom);
56469+ fscache_stat_unchecked(&fscache_n_stores_oom);
56470 _leave(" = -ENOMEM");
56471 return -ENOMEM;
56472 }
56473@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
56474 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56475 ASSERTCMP(page, !=, NULL);
56476
56477- fscache_stat(&fscache_n_uncaches);
56478+ fscache_stat_unchecked(&fscache_n_uncaches);
56479
56480 /* cache withdrawal may beat us to it */
56481 if (!PageFsCache(page))
56482@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
56483 struct fscache_cookie *cookie = op->op.object->cookie;
56484
56485 #ifdef CONFIG_FSCACHE_STATS
56486- atomic_inc(&fscache_n_marks);
56487+ atomic_inc_unchecked(&fscache_n_marks);
56488 #endif
56489
56490 _debug("- mark %p{%lx}", page, page->index);
56491diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
56492index 40d13c7..ddf52b9 100644
56493--- a/fs/fscache/stats.c
56494+++ b/fs/fscache/stats.c
56495@@ -18,99 +18,99 @@
56496 /*
56497 * operation counters
56498 */
56499-atomic_t fscache_n_op_pend;
56500-atomic_t fscache_n_op_run;
56501-atomic_t fscache_n_op_enqueue;
56502-atomic_t fscache_n_op_requeue;
56503-atomic_t fscache_n_op_deferred_release;
56504-atomic_t fscache_n_op_release;
56505-atomic_t fscache_n_op_gc;
56506-atomic_t fscache_n_op_cancelled;
56507-atomic_t fscache_n_op_rejected;
56508+atomic_unchecked_t fscache_n_op_pend;
56509+atomic_unchecked_t fscache_n_op_run;
56510+atomic_unchecked_t fscache_n_op_enqueue;
56511+atomic_unchecked_t fscache_n_op_requeue;
56512+atomic_unchecked_t fscache_n_op_deferred_release;
56513+atomic_unchecked_t fscache_n_op_release;
56514+atomic_unchecked_t fscache_n_op_gc;
56515+atomic_unchecked_t fscache_n_op_cancelled;
56516+atomic_unchecked_t fscache_n_op_rejected;
56517
56518-atomic_t fscache_n_attr_changed;
56519-atomic_t fscache_n_attr_changed_ok;
56520-atomic_t fscache_n_attr_changed_nobufs;
56521-atomic_t fscache_n_attr_changed_nomem;
56522-atomic_t fscache_n_attr_changed_calls;
56523+atomic_unchecked_t fscache_n_attr_changed;
56524+atomic_unchecked_t fscache_n_attr_changed_ok;
56525+atomic_unchecked_t fscache_n_attr_changed_nobufs;
56526+atomic_unchecked_t fscache_n_attr_changed_nomem;
56527+atomic_unchecked_t fscache_n_attr_changed_calls;
56528
56529-atomic_t fscache_n_allocs;
56530-atomic_t fscache_n_allocs_ok;
56531-atomic_t fscache_n_allocs_wait;
56532-atomic_t fscache_n_allocs_nobufs;
56533-atomic_t fscache_n_allocs_intr;
56534-atomic_t fscache_n_allocs_object_dead;
56535-atomic_t fscache_n_alloc_ops;
56536-atomic_t fscache_n_alloc_op_waits;
56537+atomic_unchecked_t fscache_n_allocs;
56538+atomic_unchecked_t fscache_n_allocs_ok;
56539+atomic_unchecked_t fscache_n_allocs_wait;
56540+atomic_unchecked_t fscache_n_allocs_nobufs;
56541+atomic_unchecked_t fscache_n_allocs_intr;
56542+atomic_unchecked_t fscache_n_allocs_object_dead;
56543+atomic_unchecked_t fscache_n_alloc_ops;
56544+atomic_unchecked_t fscache_n_alloc_op_waits;
56545
56546-atomic_t fscache_n_retrievals;
56547-atomic_t fscache_n_retrievals_ok;
56548-atomic_t fscache_n_retrievals_wait;
56549-atomic_t fscache_n_retrievals_nodata;
56550-atomic_t fscache_n_retrievals_nobufs;
56551-atomic_t fscache_n_retrievals_intr;
56552-atomic_t fscache_n_retrievals_nomem;
56553-atomic_t fscache_n_retrievals_object_dead;
56554-atomic_t fscache_n_retrieval_ops;
56555-atomic_t fscache_n_retrieval_op_waits;
56556+atomic_unchecked_t fscache_n_retrievals;
56557+atomic_unchecked_t fscache_n_retrievals_ok;
56558+atomic_unchecked_t fscache_n_retrievals_wait;
56559+atomic_unchecked_t fscache_n_retrievals_nodata;
56560+atomic_unchecked_t fscache_n_retrievals_nobufs;
56561+atomic_unchecked_t fscache_n_retrievals_intr;
56562+atomic_unchecked_t fscache_n_retrievals_nomem;
56563+atomic_unchecked_t fscache_n_retrievals_object_dead;
56564+atomic_unchecked_t fscache_n_retrieval_ops;
56565+atomic_unchecked_t fscache_n_retrieval_op_waits;
56566
56567-atomic_t fscache_n_stores;
56568-atomic_t fscache_n_stores_ok;
56569-atomic_t fscache_n_stores_again;
56570-atomic_t fscache_n_stores_nobufs;
56571-atomic_t fscache_n_stores_oom;
56572-atomic_t fscache_n_store_ops;
56573-atomic_t fscache_n_store_calls;
56574-atomic_t fscache_n_store_pages;
56575-atomic_t fscache_n_store_radix_deletes;
56576-atomic_t fscache_n_store_pages_over_limit;
56577+atomic_unchecked_t fscache_n_stores;
56578+atomic_unchecked_t fscache_n_stores_ok;
56579+atomic_unchecked_t fscache_n_stores_again;
56580+atomic_unchecked_t fscache_n_stores_nobufs;
56581+atomic_unchecked_t fscache_n_stores_oom;
56582+atomic_unchecked_t fscache_n_store_ops;
56583+atomic_unchecked_t fscache_n_store_calls;
56584+atomic_unchecked_t fscache_n_store_pages;
56585+atomic_unchecked_t fscache_n_store_radix_deletes;
56586+atomic_unchecked_t fscache_n_store_pages_over_limit;
56587
56588-atomic_t fscache_n_store_vmscan_not_storing;
56589-atomic_t fscache_n_store_vmscan_gone;
56590-atomic_t fscache_n_store_vmscan_busy;
56591-atomic_t fscache_n_store_vmscan_cancelled;
56592-atomic_t fscache_n_store_vmscan_wait;
56593+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
56594+atomic_unchecked_t fscache_n_store_vmscan_gone;
56595+atomic_unchecked_t fscache_n_store_vmscan_busy;
56596+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
56597+atomic_unchecked_t fscache_n_store_vmscan_wait;
56598
56599-atomic_t fscache_n_marks;
56600-atomic_t fscache_n_uncaches;
56601+atomic_unchecked_t fscache_n_marks;
56602+atomic_unchecked_t fscache_n_uncaches;
56603
56604-atomic_t fscache_n_acquires;
56605-atomic_t fscache_n_acquires_null;
56606-atomic_t fscache_n_acquires_no_cache;
56607-atomic_t fscache_n_acquires_ok;
56608-atomic_t fscache_n_acquires_nobufs;
56609-atomic_t fscache_n_acquires_oom;
56610+atomic_unchecked_t fscache_n_acquires;
56611+atomic_unchecked_t fscache_n_acquires_null;
56612+atomic_unchecked_t fscache_n_acquires_no_cache;
56613+atomic_unchecked_t fscache_n_acquires_ok;
56614+atomic_unchecked_t fscache_n_acquires_nobufs;
56615+atomic_unchecked_t fscache_n_acquires_oom;
56616
56617-atomic_t fscache_n_invalidates;
56618-atomic_t fscache_n_invalidates_run;
56619+atomic_unchecked_t fscache_n_invalidates;
56620+atomic_unchecked_t fscache_n_invalidates_run;
56621
56622-atomic_t fscache_n_updates;
56623-atomic_t fscache_n_updates_null;
56624-atomic_t fscache_n_updates_run;
56625+atomic_unchecked_t fscache_n_updates;
56626+atomic_unchecked_t fscache_n_updates_null;
56627+atomic_unchecked_t fscache_n_updates_run;
56628
56629-atomic_t fscache_n_relinquishes;
56630-atomic_t fscache_n_relinquishes_null;
56631-atomic_t fscache_n_relinquishes_waitcrt;
56632-atomic_t fscache_n_relinquishes_retire;
56633+atomic_unchecked_t fscache_n_relinquishes;
56634+atomic_unchecked_t fscache_n_relinquishes_null;
56635+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
56636+atomic_unchecked_t fscache_n_relinquishes_retire;
56637
56638-atomic_t fscache_n_cookie_index;
56639-atomic_t fscache_n_cookie_data;
56640-atomic_t fscache_n_cookie_special;
56641+atomic_unchecked_t fscache_n_cookie_index;
56642+atomic_unchecked_t fscache_n_cookie_data;
56643+atomic_unchecked_t fscache_n_cookie_special;
56644
56645-atomic_t fscache_n_object_alloc;
56646-atomic_t fscache_n_object_no_alloc;
56647-atomic_t fscache_n_object_lookups;
56648-atomic_t fscache_n_object_lookups_negative;
56649-atomic_t fscache_n_object_lookups_positive;
56650-atomic_t fscache_n_object_lookups_timed_out;
56651-atomic_t fscache_n_object_created;
56652-atomic_t fscache_n_object_avail;
56653-atomic_t fscache_n_object_dead;
56654+atomic_unchecked_t fscache_n_object_alloc;
56655+atomic_unchecked_t fscache_n_object_no_alloc;
56656+atomic_unchecked_t fscache_n_object_lookups;
56657+atomic_unchecked_t fscache_n_object_lookups_negative;
56658+atomic_unchecked_t fscache_n_object_lookups_positive;
56659+atomic_unchecked_t fscache_n_object_lookups_timed_out;
56660+atomic_unchecked_t fscache_n_object_created;
56661+atomic_unchecked_t fscache_n_object_avail;
56662+atomic_unchecked_t fscache_n_object_dead;
56663
56664-atomic_t fscache_n_checkaux_none;
56665-atomic_t fscache_n_checkaux_okay;
56666-atomic_t fscache_n_checkaux_update;
56667-atomic_t fscache_n_checkaux_obsolete;
56668+atomic_unchecked_t fscache_n_checkaux_none;
56669+atomic_unchecked_t fscache_n_checkaux_okay;
56670+atomic_unchecked_t fscache_n_checkaux_update;
56671+atomic_unchecked_t fscache_n_checkaux_obsolete;
56672
56673 atomic_t fscache_n_cop_alloc_object;
56674 atomic_t fscache_n_cop_lookup_object;
56675@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
56676 seq_puts(m, "FS-Cache statistics\n");
56677
56678 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
56679- atomic_read(&fscache_n_cookie_index),
56680- atomic_read(&fscache_n_cookie_data),
56681- atomic_read(&fscache_n_cookie_special));
56682+ atomic_read_unchecked(&fscache_n_cookie_index),
56683+ atomic_read_unchecked(&fscache_n_cookie_data),
56684+ atomic_read_unchecked(&fscache_n_cookie_special));
56685
56686 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
56687- atomic_read(&fscache_n_object_alloc),
56688- atomic_read(&fscache_n_object_no_alloc),
56689- atomic_read(&fscache_n_object_avail),
56690- atomic_read(&fscache_n_object_dead));
56691+ atomic_read_unchecked(&fscache_n_object_alloc),
56692+ atomic_read_unchecked(&fscache_n_object_no_alloc),
56693+ atomic_read_unchecked(&fscache_n_object_avail),
56694+ atomic_read_unchecked(&fscache_n_object_dead));
56695 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
56696- atomic_read(&fscache_n_checkaux_none),
56697- atomic_read(&fscache_n_checkaux_okay),
56698- atomic_read(&fscache_n_checkaux_update),
56699- atomic_read(&fscache_n_checkaux_obsolete));
56700+ atomic_read_unchecked(&fscache_n_checkaux_none),
56701+ atomic_read_unchecked(&fscache_n_checkaux_okay),
56702+ atomic_read_unchecked(&fscache_n_checkaux_update),
56703+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
56704
56705 seq_printf(m, "Pages : mrk=%u unc=%u\n",
56706- atomic_read(&fscache_n_marks),
56707- atomic_read(&fscache_n_uncaches));
56708+ atomic_read_unchecked(&fscache_n_marks),
56709+ atomic_read_unchecked(&fscache_n_uncaches));
56710
56711 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
56712 " oom=%u\n",
56713- atomic_read(&fscache_n_acquires),
56714- atomic_read(&fscache_n_acquires_null),
56715- atomic_read(&fscache_n_acquires_no_cache),
56716- atomic_read(&fscache_n_acquires_ok),
56717- atomic_read(&fscache_n_acquires_nobufs),
56718- atomic_read(&fscache_n_acquires_oom));
56719+ atomic_read_unchecked(&fscache_n_acquires),
56720+ atomic_read_unchecked(&fscache_n_acquires_null),
56721+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
56722+ atomic_read_unchecked(&fscache_n_acquires_ok),
56723+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
56724+ atomic_read_unchecked(&fscache_n_acquires_oom));
56725
56726 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
56727- atomic_read(&fscache_n_object_lookups),
56728- atomic_read(&fscache_n_object_lookups_negative),
56729- atomic_read(&fscache_n_object_lookups_positive),
56730- atomic_read(&fscache_n_object_created),
56731- atomic_read(&fscache_n_object_lookups_timed_out));
56732+ atomic_read_unchecked(&fscache_n_object_lookups),
56733+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
56734+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
56735+ atomic_read_unchecked(&fscache_n_object_created),
56736+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
56737
56738 seq_printf(m, "Invals : n=%u run=%u\n",
56739- atomic_read(&fscache_n_invalidates),
56740- atomic_read(&fscache_n_invalidates_run));
56741+ atomic_read_unchecked(&fscache_n_invalidates),
56742+ atomic_read_unchecked(&fscache_n_invalidates_run));
56743
56744 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
56745- atomic_read(&fscache_n_updates),
56746- atomic_read(&fscache_n_updates_null),
56747- atomic_read(&fscache_n_updates_run));
56748+ atomic_read_unchecked(&fscache_n_updates),
56749+ atomic_read_unchecked(&fscache_n_updates_null),
56750+ atomic_read_unchecked(&fscache_n_updates_run));
56751
56752 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
56753- atomic_read(&fscache_n_relinquishes),
56754- atomic_read(&fscache_n_relinquishes_null),
56755- atomic_read(&fscache_n_relinquishes_waitcrt),
56756- atomic_read(&fscache_n_relinquishes_retire));
56757+ atomic_read_unchecked(&fscache_n_relinquishes),
56758+ atomic_read_unchecked(&fscache_n_relinquishes_null),
56759+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
56760+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
56761
56762 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
56763- atomic_read(&fscache_n_attr_changed),
56764- atomic_read(&fscache_n_attr_changed_ok),
56765- atomic_read(&fscache_n_attr_changed_nobufs),
56766- atomic_read(&fscache_n_attr_changed_nomem),
56767- atomic_read(&fscache_n_attr_changed_calls));
56768+ atomic_read_unchecked(&fscache_n_attr_changed),
56769+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
56770+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
56771+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
56772+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
56773
56774 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
56775- atomic_read(&fscache_n_allocs),
56776- atomic_read(&fscache_n_allocs_ok),
56777- atomic_read(&fscache_n_allocs_wait),
56778- atomic_read(&fscache_n_allocs_nobufs),
56779- atomic_read(&fscache_n_allocs_intr));
56780+ atomic_read_unchecked(&fscache_n_allocs),
56781+ atomic_read_unchecked(&fscache_n_allocs_ok),
56782+ atomic_read_unchecked(&fscache_n_allocs_wait),
56783+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
56784+ atomic_read_unchecked(&fscache_n_allocs_intr));
56785 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
56786- atomic_read(&fscache_n_alloc_ops),
56787- atomic_read(&fscache_n_alloc_op_waits),
56788- atomic_read(&fscache_n_allocs_object_dead));
56789+ atomic_read_unchecked(&fscache_n_alloc_ops),
56790+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
56791+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
56792
56793 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
56794 " int=%u oom=%u\n",
56795- atomic_read(&fscache_n_retrievals),
56796- atomic_read(&fscache_n_retrievals_ok),
56797- atomic_read(&fscache_n_retrievals_wait),
56798- atomic_read(&fscache_n_retrievals_nodata),
56799- atomic_read(&fscache_n_retrievals_nobufs),
56800- atomic_read(&fscache_n_retrievals_intr),
56801- atomic_read(&fscache_n_retrievals_nomem));
56802+ atomic_read_unchecked(&fscache_n_retrievals),
56803+ atomic_read_unchecked(&fscache_n_retrievals_ok),
56804+ atomic_read_unchecked(&fscache_n_retrievals_wait),
56805+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
56806+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
56807+ atomic_read_unchecked(&fscache_n_retrievals_intr),
56808+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
56809 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
56810- atomic_read(&fscache_n_retrieval_ops),
56811- atomic_read(&fscache_n_retrieval_op_waits),
56812- atomic_read(&fscache_n_retrievals_object_dead));
56813+ atomic_read_unchecked(&fscache_n_retrieval_ops),
56814+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
56815+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
56816
56817 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
56818- atomic_read(&fscache_n_stores),
56819- atomic_read(&fscache_n_stores_ok),
56820- atomic_read(&fscache_n_stores_again),
56821- atomic_read(&fscache_n_stores_nobufs),
56822- atomic_read(&fscache_n_stores_oom));
56823+ atomic_read_unchecked(&fscache_n_stores),
56824+ atomic_read_unchecked(&fscache_n_stores_ok),
56825+ atomic_read_unchecked(&fscache_n_stores_again),
56826+ atomic_read_unchecked(&fscache_n_stores_nobufs),
56827+ atomic_read_unchecked(&fscache_n_stores_oom));
56828 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
56829- atomic_read(&fscache_n_store_ops),
56830- atomic_read(&fscache_n_store_calls),
56831- atomic_read(&fscache_n_store_pages),
56832- atomic_read(&fscache_n_store_radix_deletes),
56833- atomic_read(&fscache_n_store_pages_over_limit));
56834+ atomic_read_unchecked(&fscache_n_store_ops),
56835+ atomic_read_unchecked(&fscache_n_store_calls),
56836+ atomic_read_unchecked(&fscache_n_store_pages),
56837+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
56838+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
56839
56840 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
56841- atomic_read(&fscache_n_store_vmscan_not_storing),
56842- atomic_read(&fscache_n_store_vmscan_gone),
56843- atomic_read(&fscache_n_store_vmscan_busy),
56844- atomic_read(&fscache_n_store_vmscan_cancelled),
56845- atomic_read(&fscache_n_store_vmscan_wait));
56846+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
56847+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
56848+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
56849+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
56850+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
56851
56852 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
56853- atomic_read(&fscache_n_op_pend),
56854- atomic_read(&fscache_n_op_run),
56855- atomic_read(&fscache_n_op_enqueue),
56856- atomic_read(&fscache_n_op_cancelled),
56857- atomic_read(&fscache_n_op_rejected));
56858+ atomic_read_unchecked(&fscache_n_op_pend),
56859+ atomic_read_unchecked(&fscache_n_op_run),
56860+ atomic_read_unchecked(&fscache_n_op_enqueue),
56861+ atomic_read_unchecked(&fscache_n_op_cancelled),
56862+ atomic_read_unchecked(&fscache_n_op_rejected));
56863 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
56864- atomic_read(&fscache_n_op_deferred_release),
56865- atomic_read(&fscache_n_op_release),
56866- atomic_read(&fscache_n_op_gc));
56867+ atomic_read_unchecked(&fscache_n_op_deferred_release),
56868+ atomic_read_unchecked(&fscache_n_op_release),
56869+ atomic_read_unchecked(&fscache_n_op_gc));
56870
56871 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
56872 atomic_read(&fscache_n_cop_alloc_object),
56873diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
56874index aef34b1..59bfd7b 100644
56875--- a/fs/fuse/cuse.c
56876+++ b/fs/fuse/cuse.c
56877@@ -600,10 +600,12 @@ static int __init cuse_init(void)
56878 INIT_LIST_HEAD(&cuse_conntbl[i]);
56879
56880 /* inherit and extend fuse_dev_operations */
56881- cuse_channel_fops = fuse_dev_operations;
56882- cuse_channel_fops.owner = THIS_MODULE;
56883- cuse_channel_fops.open = cuse_channel_open;
56884- cuse_channel_fops.release = cuse_channel_release;
56885+ pax_open_kernel();
56886+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
56887+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
56888+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
56889+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
56890+ pax_close_kernel();
56891
56892 cuse_class = class_create(THIS_MODULE, "cuse");
56893 if (IS_ERR(cuse_class))
56894diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
56895index 1d55f94..088da65 100644
56896--- a/fs/fuse/dev.c
56897+++ b/fs/fuse/dev.c
56898@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
56899 ret = 0;
56900 pipe_lock(pipe);
56901
56902- if (!pipe->readers) {
56903+ if (!atomic_read(&pipe->readers)) {
56904 send_sig(SIGPIPE, current, 0);
56905 if (!ret)
56906 ret = -EPIPE;
56907@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
56908 page_nr++;
56909 ret += buf->len;
56910
56911- if (pipe->files)
56912+ if (atomic_read(&pipe->files))
56913 do_wakeup = 1;
56914 }
56915
56916diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
56917index 5b12746..b481b03 100644
56918--- a/fs/fuse/dir.c
56919+++ b/fs/fuse/dir.c
56920@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
56921 return link;
56922 }
56923
56924-static void free_link(char *link)
56925+static void free_link(const char *link)
56926 {
56927 if (!IS_ERR(link))
56928 free_page((unsigned long) link);
56929diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
56930index 62b484e..0f9a140 100644
56931--- a/fs/gfs2/inode.c
56932+++ b/fs/gfs2/inode.c
56933@@ -1441,7 +1441,7 @@ out:
56934
56935 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
56936 {
56937- char *s = nd_get_link(nd);
56938+ const char *s = nd_get_link(nd);
56939 if (!IS_ERR(s))
56940 kfree(s);
56941 }
56942diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
56943index a3f868a..bb308ae 100644
56944--- a/fs/hugetlbfs/inode.c
56945+++ b/fs/hugetlbfs/inode.c
56946@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
56947 struct mm_struct *mm = current->mm;
56948 struct vm_area_struct *vma;
56949 struct hstate *h = hstate_file(file);
56950+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
56951 struct vm_unmapped_area_info info;
56952
56953 if (len & ~huge_page_mask(h))
56954@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
56955 return addr;
56956 }
56957
56958+#ifdef CONFIG_PAX_RANDMMAP
56959+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
56960+#endif
56961+
56962 if (addr) {
56963 addr = ALIGN(addr, huge_page_size(h));
56964 vma = find_vma(mm, addr);
56965- if (TASK_SIZE - len >= addr &&
56966- (!vma || addr + len <= vma->vm_start))
56967+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
56968 return addr;
56969 }
56970
56971 info.flags = 0;
56972 info.length = len;
56973 info.low_limit = TASK_UNMAPPED_BASE;
56974+
56975+#ifdef CONFIG_PAX_RANDMMAP
56976+ if (mm->pax_flags & MF_PAX_RANDMMAP)
56977+ info.low_limit += mm->delta_mmap;
56978+#endif
56979+
56980 info.high_limit = TASK_SIZE;
56981 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
56982 info.align_offset = 0;
56983@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
56984 };
56985 MODULE_ALIAS_FS("hugetlbfs");
56986
56987-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56988+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56989
56990 static int can_do_hugetlb_shm(void)
56991 {
56992diff --git a/fs/inode.c b/fs/inode.c
56993index 00d5fc3..98ce7d7 100644
56994--- a/fs/inode.c
56995+++ b/fs/inode.c
56996@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
56997
56998 #ifdef CONFIG_SMP
56999 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
57000- static atomic_t shared_last_ino;
57001- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
57002+ static atomic_unchecked_t shared_last_ino;
57003+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
57004
57005 res = next - LAST_INO_BATCH;
57006 }
57007diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
57008index 4a6cf28..d3a29d3 100644
57009--- a/fs/jffs2/erase.c
57010+++ b/fs/jffs2/erase.c
57011@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
57012 struct jffs2_unknown_node marker = {
57013 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
57014 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
57015- .totlen = cpu_to_je32(c->cleanmarker_size)
57016+ .totlen = cpu_to_je32(c->cleanmarker_size),
57017+ .hdr_crc = cpu_to_je32(0)
57018 };
57019
57020 jffs2_prealloc_raw_node_refs(c, jeb, 1);
57021diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
57022index a6597d6..41b30ec 100644
57023--- a/fs/jffs2/wbuf.c
57024+++ b/fs/jffs2/wbuf.c
57025@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
57026 {
57027 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
57028 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
57029- .totlen = constant_cpu_to_je32(8)
57030+ .totlen = constant_cpu_to_je32(8),
57031+ .hdr_crc = constant_cpu_to_je32(0)
57032 };
57033
57034 /*
57035diff --git a/fs/jfs/super.c b/fs/jfs/super.c
57036index 788e0a9..8433098 100644
57037--- a/fs/jfs/super.c
57038+++ b/fs/jfs/super.c
57039@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
57040
57041 jfs_inode_cachep =
57042 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
57043- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
57044+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
57045 init_once);
57046 if (jfs_inode_cachep == NULL)
57047 return -ENOMEM;
57048diff --git a/fs/libfs.c b/fs/libfs.c
57049index 916da8c..1588998 100644
57050--- a/fs/libfs.c
57051+++ b/fs/libfs.c
57052@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
57053
57054 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
57055 struct dentry *next;
57056+ char d_name[sizeof(next->d_iname)];
57057+ const unsigned char *name;
57058+
57059 next = list_entry(p, struct dentry, d_u.d_child);
57060 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
57061 if (!simple_positive(next)) {
57062@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
57063
57064 spin_unlock(&next->d_lock);
57065 spin_unlock(&dentry->d_lock);
57066- if (filldir(dirent, next->d_name.name,
57067+ name = next->d_name.name;
57068+ if (name == next->d_iname) {
57069+ memcpy(d_name, name, next->d_name.len);
57070+ name = d_name;
57071+ }
57072+ if (filldir(dirent, name,
57073 next->d_name.len, filp->f_pos,
57074 next->d_inode->i_ino,
57075 dt_type(next->d_inode)) < 0)
57076diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
57077index acd3947..1f896e2 100644
57078--- a/fs/lockd/clntproc.c
57079+++ b/fs/lockd/clntproc.c
57080@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
57081 /*
57082 * Cookie counter for NLM requests
57083 */
57084-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
57085+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
57086
57087 void nlmclnt_next_cookie(struct nlm_cookie *c)
57088 {
57089- u32 cookie = atomic_inc_return(&nlm_cookie);
57090+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
57091
57092 memcpy(c->data, &cookie, 4);
57093 c->len=4;
57094diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
57095index a2aa97d..10d6c41 100644
57096--- a/fs/lockd/svc.c
57097+++ b/fs/lockd/svc.c
57098@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
57099 svc_sock_update_bufs(serv);
57100 serv->sv_maxconn = nlm_max_connections;
57101
57102- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
57103+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
57104 if (IS_ERR(nlmsvc_task)) {
57105 error = PTR_ERR(nlmsvc_task);
57106 printk(KERN_WARNING
57107diff --git a/fs/locks.c b/fs/locks.c
57108index cb424a4..850e4dd 100644
57109--- a/fs/locks.c
57110+++ b/fs/locks.c
57111@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
57112 return;
57113
57114 if (filp->f_op && filp->f_op->flock) {
57115- struct file_lock fl = {
57116+ struct file_lock flock = {
57117 .fl_pid = current->tgid,
57118 .fl_file = filp,
57119 .fl_flags = FL_FLOCK,
57120 .fl_type = F_UNLCK,
57121 .fl_end = OFFSET_MAX,
57122 };
57123- filp->f_op->flock(filp, F_SETLKW, &fl);
57124- if (fl.fl_ops && fl.fl_ops->fl_release_private)
57125- fl.fl_ops->fl_release_private(&fl);
57126+ filp->f_op->flock(filp, F_SETLKW, &flock);
57127+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
57128+ flock.fl_ops->fl_release_private(&flock);
57129 }
57130
57131 lock_flocks();
57132diff --git a/fs/namei.c b/fs/namei.c
57133index 9ed9361..2b72db1 100644
57134--- a/fs/namei.c
57135+++ b/fs/namei.c
57136@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
57137 if (ret != -EACCES)
57138 return ret;
57139
57140+#ifdef CONFIG_GRKERNSEC
57141+ /* we'll block if we have to log due to a denied capability use */
57142+ if (mask & MAY_NOT_BLOCK)
57143+ return -ECHILD;
57144+#endif
57145+
57146 if (S_ISDIR(inode->i_mode)) {
57147 /* DACs are overridable for directories */
57148- if (inode_capable(inode, CAP_DAC_OVERRIDE))
57149- return 0;
57150 if (!(mask & MAY_WRITE))
57151- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
57152+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
57153+ inode_capable(inode, CAP_DAC_READ_SEARCH))
57154 return 0;
57155+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
57156+ return 0;
57157 return -EACCES;
57158 }
57159 /*
57160+ * Searching includes executable on directories, else just read.
57161+ */
57162+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
57163+ if (mask == MAY_READ)
57164+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
57165+ inode_capable(inode, CAP_DAC_READ_SEARCH))
57166+ return 0;
57167+
57168+ /*
57169 * Read/write DACs are always overridable.
57170 * Executable DACs are overridable when there is
57171 * at least one exec bit set.
57172@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
57173 if (inode_capable(inode, CAP_DAC_OVERRIDE))
57174 return 0;
57175
57176- /*
57177- * Searching includes executable on directories, else just read.
57178- */
57179- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
57180- if (mask == MAY_READ)
57181- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
57182- return 0;
57183-
57184 return -EACCES;
57185 }
57186
57187@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
57188 {
57189 struct dentry *dentry = link->dentry;
57190 int error;
57191- char *s;
57192+ const char *s;
57193
57194 BUG_ON(nd->flags & LOOKUP_RCU);
57195
57196@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
57197 if (error)
57198 goto out_put_nd_path;
57199
57200+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
57201+ dentry->d_inode, dentry, nd->path.mnt)) {
57202+ error = -EACCES;
57203+ goto out_put_nd_path;
57204+ }
57205+
57206 nd->last_type = LAST_BIND;
57207 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
57208 error = PTR_ERR(*p);
57209@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
57210 if (res)
57211 break;
57212 res = walk_component(nd, path, LOOKUP_FOLLOW);
57213+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
57214+ res = -EACCES;
57215 put_link(nd, &link, cookie);
57216 } while (res > 0);
57217
57218@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
57219 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
57220 {
57221 unsigned long a, b, adata, bdata, mask, hash, len;
57222- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
57223+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
57224
57225 hash = a = 0;
57226 len = -sizeof(unsigned long);
57227@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
57228 if (err)
57229 break;
57230 err = lookup_last(nd, &path);
57231+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
57232+ err = -EACCES;
57233 put_link(nd, &link, cookie);
57234 }
57235 }
57236@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
57237 if (!err)
57238 err = complete_walk(nd);
57239
57240+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
57241+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57242+ path_put(&nd->path);
57243+ err = -ENOENT;
57244+ }
57245+ }
57246+
57247 if (!err && nd->flags & LOOKUP_DIRECTORY) {
57248 if (!can_lookup(nd->inode)) {
57249 path_put(&nd->path);
57250@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
57251 retval = path_lookupat(dfd, name->name,
57252 flags | LOOKUP_REVAL, nd);
57253
57254- if (likely(!retval))
57255+ if (likely(!retval)) {
57256 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
57257+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
57258+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
57259+ path_put(&nd->path);
57260+ return -ENOENT;
57261+ }
57262+ }
57263+ }
57264 return retval;
57265 }
57266
57267@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
57268 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
57269 return -EPERM;
57270
57271+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
57272+ return -EPERM;
57273+ if (gr_handle_rawio(inode))
57274+ return -EPERM;
57275+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
57276+ return -EACCES;
57277+
57278 return 0;
57279 }
57280
57281@@ -2602,7 +2641,7 @@ looked_up:
57282 * cleared otherwise prior to returning.
57283 */
57284 static int lookup_open(struct nameidata *nd, struct path *path,
57285- struct file *file,
57286+ struct path *link, struct file *file,
57287 const struct open_flags *op,
57288 bool got_write, int *opened)
57289 {
57290@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
57291 /* Negative dentry, just create the file */
57292 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
57293 umode_t mode = op->mode;
57294+
57295+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
57296+ error = -EACCES;
57297+ goto out_dput;
57298+ }
57299+
57300+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
57301+ error = -EACCES;
57302+ goto out_dput;
57303+ }
57304+
57305 if (!IS_POSIXACL(dir->d_inode))
57306 mode &= ~current_umask();
57307 /*
57308@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
57309 nd->flags & LOOKUP_EXCL);
57310 if (error)
57311 goto out_dput;
57312+ else
57313+ gr_handle_create(dentry, nd->path.mnt);
57314 }
57315 out_no_open:
57316 path->dentry = dentry;
57317@@ -2672,7 +2724,7 @@ out_dput:
57318 /*
57319 * Handle the last step of open()
57320 */
57321-static int do_last(struct nameidata *nd, struct path *path,
57322+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
57323 struct file *file, const struct open_flags *op,
57324 int *opened, struct filename *name)
57325 {
57326@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
57327 error = complete_walk(nd);
57328 if (error)
57329 return error;
57330+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57331+ error = -ENOENT;
57332+ goto out;
57333+ }
57334 audit_inode(name, nd->path.dentry, 0);
57335 if (open_flag & O_CREAT) {
57336 error = -EISDIR;
57337 goto out;
57338 }
57339+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57340+ error = -EACCES;
57341+ goto out;
57342+ }
57343 goto finish_open;
57344 case LAST_BIND:
57345 error = complete_walk(nd);
57346 if (error)
57347 return error;
57348+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
57349+ error = -ENOENT;
57350+ goto out;
57351+ }
57352+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57353+ error = -EACCES;
57354+ goto out;
57355+ }
57356 audit_inode(name, dir, 0);
57357 goto finish_open;
57358 }
57359@@ -2759,7 +2827,7 @@ retry_lookup:
57360 */
57361 }
57362 mutex_lock(&dir->d_inode->i_mutex);
57363- error = lookup_open(nd, path, file, op, got_write, opened);
57364+ error = lookup_open(nd, path, link, file, op, got_write, opened);
57365 mutex_unlock(&dir->d_inode->i_mutex);
57366
57367 if (error <= 0) {
57368@@ -2783,11 +2851,28 @@ retry_lookup:
57369 goto finish_open_created;
57370 }
57371
57372+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
57373+ error = -ENOENT;
57374+ goto exit_dput;
57375+ }
57376+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
57377+ error = -EACCES;
57378+ goto exit_dput;
57379+ }
57380+
57381 /*
57382 * create/update audit record if it already exists.
57383 */
57384- if (path->dentry->d_inode)
57385+ if (path->dentry->d_inode) {
57386+ /* only check if O_CREAT is specified, all other checks need to go
57387+ into may_open */
57388+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
57389+ error = -EACCES;
57390+ goto exit_dput;
57391+ }
57392+
57393 audit_inode(name, path->dentry, 0);
57394+ }
57395
57396 /*
57397 * If atomic_open() acquired write access it is dropped now due to
57398@@ -2828,6 +2913,11 @@ finish_lookup:
57399 }
57400 }
57401 BUG_ON(inode != path->dentry->d_inode);
57402+ /* if we're resolving a symlink to another symlink */
57403+ if (link && gr_handle_symlink_owner(link, inode)) {
57404+ error = -EACCES;
57405+ goto out;
57406+ }
57407 return 1;
57408 }
57409
57410@@ -2837,7 +2927,6 @@ finish_lookup:
57411 save_parent.dentry = nd->path.dentry;
57412 save_parent.mnt = mntget(path->mnt);
57413 nd->path.dentry = path->dentry;
57414-
57415 }
57416 nd->inode = inode;
57417 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
57418@@ -2846,6 +2935,16 @@ finish_lookup:
57419 path_put(&save_parent);
57420 return error;
57421 }
57422+
57423+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57424+ error = -ENOENT;
57425+ goto out;
57426+ }
57427+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57428+ error = -EACCES;
57429+ goto out;
57430+ }
57431+
57432 error = -EISDIR;
57433 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
57434 goto out;
57435@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
57436 if (unlikely(error))
57437 goto out;
57438
57439- error = do_last(nd, &path, file, op, &opened, pathname);
57440+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
57441 while (unlikely(error > 0)) { /* trailing symlink */
57442 struct path link = path;
57443 void *cookie;
57444@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
57445 error = follow_link(&link, nd, &cookie);
57446 if (unlikely(error))
57447 break;
57448- error = do_last(nd, &path, file, op, &opened, pathname);
57449+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
57450 put_link(nd, &link, cookie);
57451 }
57452 out:
57453@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
57454 goto unlock;
57455
57456 error = -EEXIST;
57457- if (dentry->d_inode)
57458+ if (dentry->d_inode) {
57459+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
57460+ error = -ENOENT;
57461+ }
57462 goto fail;
57463+ }
57464 /*
57465 * Special case - lookup gave negative, but... we had foo/bar/
57466 * From the vfs_mknod() POV we just have a negative dentry -
57467@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
57468 }
57469 EXPORT_SYMBOL(user_path_create);
57470
57471+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
57472+{
57473+ struct filename *tmp = getname(pathname);
57474+ struct dentry *res;
57475+ if (IS_ERR(tmp))
57476+ return ERR_CAST(tmp);
57477+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
57478+ if (IS_ERR(res))
57479+ putname(tmp);
57480+ else
57481+ *to = tmp;
57482+ return res;
57483+}
57484+
57485 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
57486 {
57487 int error = may_create(dir, dentry);
57488@@ -3177,6 +3294,17 @@ retry:
57489
57490 if (!IS_POSIXACL(path.dentry->d_inode))
57491 mode &= ~current_umask();
57492+
57493+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
57494+ error = -EPERM;
57495+ goto out;
57496+ }
57497+
57498+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
57499+ error = -EACCES;
57500+ goto out;
57501+ }
57502+
57503 error = security_path_mknod(&path, dentry, mode, dev);
57504 if (error)
57505 goto out;
57506@@ -3193,6 +3321,8 @@ retry:
57507 break;
57508 }
57509 out:
57510+ if (!error)
57511+ gr_handle_create(dentry, path.mnt);
57512 done_path_create(&path, dentry);
57513 if (retry_estale(error, lookup_flags)) {
57514 lookup_flags |= LOOKUP_REVAL;
57515@@ -3245,9 +3375,16 @@ retry:
57516
57517 if (!IS_POSIXACL(path.dentry->d_inode))
57518 mode &= ~current_umask();
57519+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
57520+ error = -EACCES;
57521+ goto out;
57522+ }
57523 error = security_path_mkdir(&path, dentry, mode);
57524 if (!error)
57525 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
57526+ if (!error)
57527+ gr_handle_create(dentry, path.mnt);
57528+out:
57529 done_path_create(&path, dentry);
57530 if (retry_estale(error, lookup_flags)) {
57531 lookup_flags |= LOOKUP_REVAL;
57532@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
57533 struct filename *name;
57534 struct dentry *dentry;
57535 struct nameidata nd;
57536+ ino_t saved_ino = 0;
57537+ dev_t saved_dev = 0;
57538 unsigned int lookup_flags = 0;
57539 retry:
57540 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
57541@@ -3360,10 +3499,21 @@ retry:
57542 error = -ENOENT;
57543 goto exit3;
57544 }
57545+
57546+ saved_ino = dentry->d_inode->i_ino;
57547+ saved_dev = gr_get_dev_from_dentry(dentry);
57548+
57549+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
57550+ error = -EACCES;
57551+ goto exit3;
57552+ }
57553+
57554 error = security_path_rmdir(&nd.path, dentry);
57555 if (error)
57556 goto exit3;
57557 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
57558+ if (!error && (saved_dev || saved_ino))
57559+ gr_handle_delete(saved_ino, saved_dev);
57560 exit3:
57561 dput(dentry);
57562 exit2:
57563@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
57564 struct dentry *dentry;
57565 struct nameidata nd;
57566 struct inode *inode = NULL;
57567+ ino_t saved_ino = 0;
57568+ dev_t saved_dev = 0;
57569 unsigned int lookup_flags = 0;
57570 retry:
57571 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
57572@@ -3455,10 +3607,22 @@ retry:
57573 if (!inode)
57574 goto slashes;
57575 ihold(inode);
57576+
57577+ if (inode->i_nlink <= 1) {
57578+ saved_ino = inode->i_ino;
57579+ saved_dev = gr_get_dev_from_dentry(dentry);
57580+ }
57581+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
57582+ error = -EACCES;
57583+ goto exit2;
57584+ }
57585+
57586 error = security_path_unlink(&nd.path, dentry);
57587 if (error)
57588 goto exit2;
57589 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
57590+ if (!error && (saved_ino || saved_dev))
57591+ gr_handle_delete(saved_ino, saved_dev);
57592 exit2:
57593 dput(dentry);
57594 }
57595@@ -3536,9 +3700,17 @@ retry:
57596 if (IS_ERR(dentry))
57597 goto out_putname;
57598
57599+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
57600+ error = -EACCES;
57601+ goto out;
57602+ }
57603+
57604 error = security_path_symlink(&path, dentry, from->name);
57605 if (!error)
57606 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
57607+ if (!error)
57608+ gr_handle_create(dentry, path.mnt);
57609+out:
57610 done_path_create(&path, dentry);
57611 if (retry_estale(error, lookup_flags)) {
57612 lookup_flags |= LOOKUP_REVAL;
57613@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
57614 {
57615 struct dentry *new_dentry;
57616 struct path old_path, new_path;
57617+ struct filename *to = NULL;
57618 int how = 0;
57619 int error;
57620
57621@@ -3635,7 +3808,7 @@ retry:
57622 if (error)
57623 return error;
57624
57625- new_dentry = user_path_create(newdfd, newname, &new_path,
57626+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
57627 (how & LOOKUP_REVAL));
57628 error = PTR_ERR(new_dentry);
57629 if (IS_ERR(new_dentry))
57630@@ -3647,11 +3820,28 @@ retry:
57631 error = may_linkat(&old_path);
57632 if (unlikely(error))
57633 goto out_dput;
57634+
57635+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
57636+ old_path.dentry->d_inode,
57637+ old_path.dentry->d_inode->i_mode, to)) {
57638+ error = -EACCES;
57639+ goto out_dput;
57640+ }
57641+
57642+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
57643+ old_path.dentry, old_path.mnt, to)) {
57644+ error = -EACCES;
57645+ goto out_dput;
57646+ }
57647+
57648 error = security_path_link(old_path.dentry, &new_path, new_dentry);
57649 if (error)
57650 goto out_dput;
57651 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
57652+ if (!error)
57653+ gr_handle_create(new_dentry, new_path.mnt);
57654 out_dput:
57655+ putname(to);
57656 done_path_create(&new_path, new_dentry);
57657 if (retry_estale(error, how)) {
57658 how |= LOOKUP_REVAL;
57659@@ -3897,12 +4087,21 @@ retry:
57660 if (new_dentry == trap)
57661 goto exit5;
57662
57663+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
57664+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
57665+ to);
57666+ if (error)
57667+ goto exit5;
57668+
57669 error = security_path_rename(&oldnd.path, old_dentry,
57670 &newnd.path, new_dentry);
57671 if (error)
57672 goto exit5;
57673 error = vfs_rename(old_dir->d_inode, old_dentry,
57674 new_dir->d_inode, new_dentry);
57675+ if (!error)
57676+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
57677+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
57678 exit5:
57679 dput(new_dentry);
57680 exit4:
57681@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
57682
57683 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
57684 {
57685+ char tmpbuf[64];
57686+ const char *newlink;
57687 int len;
57688
57689 len = PTR_ERR(link);
57690@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
57691 len = strlen(link);
57692 if (len > (unsigned) buflen)
57693 len = buflen;
57694- if (copy_to_user(buffer, link, len))
57695+
57696+ if (len < sizeof(tmpbuf)) {
57697+ memcpy(tmpbuf, link, len);
57698+ newlink = tmpbuf;
57699+ } else
57700+ newlink = link;
57701+
57702+ if (copy_to_user(buffer, newlink, len))
57703 len = -EFAULT;
57704 out:
57705 return len;
57706diff --git a/fs/namespace.c b/fs/namespace.c
57707index a45ba4f..e7dc489 100644
57708--- a/fs/namespace.c
57709+++ b/fs/namespace.c
57710@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
57711 if (!(sb->s_flags & MS_RDONLY))
57712 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
57713 up_write(&sb->s_umount);
57714+
57715+ gr_log_remount(mnt->mnt_devname, retval);
57716+
57717 return retval;
57718 }
57719
57720@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
57721 }
57722 br_write_unlock(&vfsmount_lock);
57723 namespace_unlock();
57724+
57725+ gr_log_unmount(mnt->mnt_devname, retval);
57726+
57727 return retval;
57728 }
57729
57730@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
57731 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
57732 */
57733
57734-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
57735+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
57736 {
57737 struct path path;
57738 struct mount *mnt;
57739@@ -1342,7 +1348,7 @@ out:
57740 /*
57741 * The 2.0 compatible umount. No flags.
57742 */
57743-SYSCALL_DEFINE1(oldumount, char __user *, name)
57744+SYSCALL_DEFINE1(oldumount, const char __user *, name)
57745 {
57746 return sys_umount(name, 0);
57747 }
57748@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
57749 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
57750 MS_STRICTATIME);
57751
57752+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
57753+ retval = -EPERM;
57754+ goto dput_out;
57755+ }
57756+
57757+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
57758+ retval = -EPERM;
57759+ goto dput_out;
57760+ }
57761+
57762 if (flags & MS_REMOUNT)
57763 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
57764 data_page);
57765@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
57766 dev_name, data_page);
57767 dput_out:
57768 path_put(&path);
57769+
57770+ gr_log_mount(dev_name, dir_name, retval);
57771+
57772 return retval;
57773 }
57774
57775@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
57776 }
57777 EXPORT_SYMBOL(mount_subtree);
57778
57779-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
57780- char __user *, type, unsigned long, flags, void __user *, data)
57781+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
57782+ const char __user *, type, unsigned long, flags, void __user *, data)
57783 {
57784 int ret;
57785 char *kernel_type;
57786@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
57787 if (error)
57788 goto out2;
57789
57790+ if (gr_handle_chroot_pivot()) {
57791+ error = -EPERM;
57792+ goto out2;
57793+ }
57794+
57795 get_fs_root(current->fs, &root);
57796 old_mp = lock_mount(&old);
57797 error = PTR_ERR(old_mp);
57798@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
57799 !nsown_capable(CAP_SYS_ADMIN))
57800 return -EPERM;
57801
57802- if (fs->users != 1)
57803+ if (atomic_read(&fs->users) != 1)
57804 return -EINVAL;
57805
57806 get_mnt_ns(mnt_ns);
57807diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
57808index cff089a..4c3d57a 100644
57809--- a/fs/nfs/callback.c
57810+++ b/fs/nfs/callback.c
57811@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
57812 struct svc_rqst *rqstp;
57813 int (*callback_svc)(void *vrqstp);
57814 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
57815- char svc_name[12];
57816 int ret;
57817
57818 nfs_callback_bc_serv(minorversion, xprt, serv);
57819@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
57820
57821 svc_sock_update_bufs(serv);
57822
57823- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
57824 cb_info->serv = serv;
57825 cb_info->rqst = rqstp;
57826- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
57827+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
57828 if (IS_ERR(cb_info->task)) {
57829 ret = PTR_ERR(cb_info->task);
57830 svc_exit_thread(cb_info->rqst);
57831diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
57832index a35582c..ebbdcd5 100644
57833--- a/fs/nfs/callback_xdr.c
57834+++ b/fs/nfs/callback_xdr.c
57835@@ -51,7 +51,7 @@ struct callback_op {
57836 callback_decode_arg_t decode_args;
57837 callback_encode_res_t encode_res;
57838 long res_maxsize;
57839-};
57840+} __do_const;
57841
57842 static struct callback_op callback_ops[];
57843
57844diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
57845index c1c7a9d..7afa0b8 100644
57846--- a/fs/nfs/inode.c
57847+++ b/fs/nfs/inode.c
57848@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
57849 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
57850 }
57851
57852-static atomic_long_t nfs_attr_generation_counter;
57853+static atomic_long_unchecked_t nfs_attr_generation_counter;
57854
57855 static unsigned long nfs_read_attr_generation_counter(void)
57856 {
57857- return atomic_long_read(&nfs_attr_generation_counter);
57858+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
57859 }
57860
57861 unsigned long nfs_inc_attr_generation_counter(void)
57862 {
57863- return atomic_long_inc_return(&nfs_attr_generation_counter);
57864+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
57865 }
57866
57867 void nfs_fattr_init(struct nfs_fattr *fattr)
57868diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
57869index 2c37442..9b9538b 100644
57870--- a/fs/nfs/nfs4state.c
57871+++ b/fs/nfs/nfs4state.c
57872@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
57873 snprintf(buf, sizeof(buf), "%s-manager",
57874 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
57875 rcu_read_unlock();
57876- task = kthread_run(nfs4_run_state_manager, clp, buf);
57877+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
57878 if (IS_ERR(task)) {
57879 printk(KERN_ERR "%s: kthread_run: %ld\n",
57880 __func__, PTR_ERR(task));
57881diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
57882index 27d74a2..c4c2a73 100644
57883--- a/fs/nfsd/nfs4proc.c
57884+++ b/fs/nfsd/nfs4proc.c
57885@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
57886 nfsd4op_rsize op_rsize_bop;
57887 stateid_getter op_get_currentstateid;
57888 stateid_setter op_set_currentstateid;
57889-};
57890+} __do_const;
57891
57892 static struct nfsd4_operation nfsd4_ops[];
57893
57894diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
57895index 582321a..0224663 100644
57896--- a/fs/nfsd/nfs4xdr.c
57897+++ b/fs/nfsd/nfs4xdr.c
57898@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
57899
57900 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
57901
57902-static nfsd4_dec nfsd4_dec_ops[] = {
57903+static const nfsd4_dec nfsd4_dec_ops[] = {
57904 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
57905 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
57906 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
57907@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
57908 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
57909 };
57910
57911-static nfsd4_dec nfsd41_dec_ops[] = {
57912+static const nfsd4_dec nfsd41_dec_ops[] = {
57913 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
57914 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
57915 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
57916@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
57917 };
57918
57919 struct nfsd4_minorversion_ops {
57920- nfsd4_dec *decoders;
57921+ const nfsd4_dec *decoders;
57922 int nops;
57923 };
57924
57925diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
57926index e76244e..9fe8f2f1 100644
57927--- a/fs/nfsd/nfscache.c
57928+++ b/fs/nfsd/nfscache.c
57929@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
57930 {
57931 struct svc_cacherep *rp = rqstp->rq_cacherep;
57932 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
57933- int len;
57934+ long len;
57935 size_t bufsize = 0;
57936
57937 if (!rp)
57938 return;
57939
57940- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
57941- len >>= 2;
57942+ if (statp) {
57943+ len = (char*)statp - (char*)resv->iov_base;
57944+ len = resv->iov_len - len;
57945+ len >>= 2;
57946+ }
57947
57948 /* Don't cache excessive amounts of data and XDR failures */
57949 if (!statp || len > (256 >> 2)) {
57950diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
57951index baf149a..76b86ad 100644
57952--- a/fs/nfsd/vfs.c
57953+++ b/fs/nfsd/vfs.c
57954@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
57955 } else {
57956 oldfs = get_fs();
57957 set_fs(KERNEL_DS);
57958- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
57959+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
57960 set_fs(oldfs);
57961 }
57962
57963@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
57964
57965 /* Write the data. */
57966 oldfs = get_fs(); set_fs(KERNEL_DS);
57967- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
57968+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
57969 set_fs(oldfs);
57970 if (host_err < 0)
57971 goto out_nfserr;
57972@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
57973 */
57974
57975 oldfs = get_fs(); set_fs(KERNEL_DS);
57976- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
57977+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
57978 set_fs(oldfs);
57979
57980 if (host_err < 0)
57981diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
57982index fea6bd5..8ee9d81 100644
57983--- a/fs/nls/nls_base.c
57984+++ b/fs/nls/nls_base.c
57985@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
57986
57987 int register_nls(struct nls_table * nls)
57988 {
57989- struct nls_table ** tmp = &tables;
57990+ struct nls_table *tmp = tables;
57991
57992 if (nls->next)
57993 return -EBUSY;
57994
57995 spin_lock(&nls_lock);
57996- while (*tmp) {
57997- if (nls == *tmp) {
57998+ while (tmp) {
57999+ if (nls == tmp) {
58000 spin_unlock(&nls_lock);
58001 return -EBUSY;
58002 }
58003- tmp = &(*tmp)->next;
58004+ tmp = tmp->next;
58005 }
58006- nls->next = tables;
58007+ pax_open_kernel();
58008+ *(struct nls_table **)&nls->next = tables;
58009+ pax_close_kernel();
58010 tables = nls;
58011 spin_unlock(&nls_lock);
58012 return 0;
58013@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
58014
58015 int unregister_nls(struct nls_table * nls)
58016 {
58017- struct nls_table ** tmp = &tables;
58018+ struct nls_table * const * tmp = &tables;
58019
58020 spin_lock(&nls_lock);
58021 while (*tmp) {
58022 if (nls == *tmp) {
58023- *tmp = nls->next;
58024+ pax_open_kernel();
58025+ *(struct nls_table **)tmp = nls->next;
58026+ pax_close_kernel();
58027 spin_unlock(&nls_lock);
58028 return 0;
58029 }
58030diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
58031index 7424929..35f6be5 100644
58032--- a/fs/nls/nls_euc-jp.c
58033+++ b/fs/nls/nls_euc-jp.c
58034@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
58035 p_nls = load_nls("cp932");
58036
58037 if (p_nls) {
58038- table.charset2upper = p_nls->charset2upper;
58039- table.charset2lower = p_nls->charset2lower;
58040+ pax_open_kernel();
58041+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
58042+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
58043+ pax_close_kernel();
58044 return register_nls(&table);
58045 }
58046
58047diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
58048index e7bc1d7..06bd4bb 100644
58049--- a/fs/nls/nls_koi8-ru.c
58050+++ b/fs/nls/nls_koi8-ru.c
58051@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
58052 p_nls = load_nls("koi8-u");
58053
58054 if (p_nls) {
58055- table.charset2upper = p_nls->charset2upper;
58056- table.charset2lower = p_nls->charset2lower;
58057+ pax_open_kernel();
58058+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
58059+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
58060+ pax_close_kernel();
58061 return register_nls(&table);
58062 }
58063
58064diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
58065index 77cc85d..a1e6299 100644
58066--- a/fs/notify/fanotify/fanotify_user.c
58067+++ b/fs/notify/fanotify/fanotify_user.c
58068@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
58069
58070 fd = fanotify_event_metadata.fd;
58071 ret = -EFAULT;
58072- if (copy_to_user(buf, &fanotify_event_metadata,
58073- fanotify_event_metadata.event_len))
58074+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
58075+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
58076 goto out_close_fd;
58077
58078 ret = prepare_for_access_response(group, event, fd);
58079diff --git a/fs/notify/notification.c b/fs/notify/notification.c
58080index 7b51b05..5ea5ef6 100644
58081--- a/fs/notify/notification.c
58082+++ b/fs/notify/notification.c
58083@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
58084 * get set to 0 so it will never get 'freed'
58085 */
58086 static struct fsnotify_event *q_overflow_event;
58087-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
58088+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
58089
58090 /**
58091 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
58092@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
58093 */
58094 u32 fsnotify_get_cookie(void)
58095 {
58096- return atomic_inc_return(&fsnotify_sync_cookie);
58097+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
58098 }
58099 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
58100
58101diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
58102index aa411c3..c260a84 100644
58103--- a/fs/ntfs/dir.c
58104+++ b/fs/ntfs/dir.c
58105@@ -1329,7 +1329,7 @@ find_next_index_buffer:
58106 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
58107 ~(s64)(ndir->itype.index.block_size - 1)));
58108 /* Bounds checks. */
58109- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
58110+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
58111 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
58112 "inode 0x%lx or driver bug.", vdir->i_ino);
58113 goto err_out;
58114diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
58115index c5670b8..2b43d9b 100644
58116--- a/fs/ntfs/file.c
58117+++ b/fs/ntfs/file.c
58118@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
58119 char *addr;
58120 size_t total = 0;
58121 unsigned len;
58122- int left;
58123+ unsigned left;
58124
58125 do {
58126 len = PAGE_CACHE_SIZE - ofs;
58127@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
58128 #endif /* NTFS_RW */
58129 };
58130
58131-const struct file_operations ntfs_empty_file_ops = {};
58132+const struct file_operations ntfs_empty_file_ops __read_only;
58133
58134-const struct inode_operations ntfs_empty_inode_ops = {};
58135+const struct inode_operations ntfs_empty_inode_ops __read_only;
58136diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
58137index 82650d5..db37dcf 100644
58138--- a/fs/ntfs/super.c
58139+++ b/fs/ntfs/super.c
58140@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
58141 if (!silent)
58142 ntfs_error(sb, "Primary boot sector is invalid.");
58143 } else if (!silent)
58144- ntfs_error(sb, read_err_str, "primary");
58145+ ntfs_error(sb, read_err_str, "%s", "primary");
58146 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
58147 if (bh_primary)
58148 brelse(bh_primary);
58149@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
58150 goto hotfix_primary_boot_sector;
58151 brelse(bh_backup);
58152 } else if (!silent)
58153- ntfs_error(sb, read_err_str, "backup");
58154+ ntfs_error(sb, read_err_str, "%s", "backup");
58155 /* Try to read NT3.51- backup boot sector. */
58156 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
58157 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
58158@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
58159 "sector.");
58160 brelse(bh_backup);
58161 } else if (!silent)
58162- ntfs_error(sb, read_err_str, "backup");
58163+ ntfs_error(sb, read_err_str, "%s", "backup");
58164 /* We failed. Cleanup and return. */
58165 if (bh_primary)
58166 brelse(bh_primary);
58167diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
58168index 20dfec7..e238cb7 100644
58169--- a/fs/ocfs2/aops.c
58170+++ b/fs/ocfs2/aops.c
58171@@ -1756,7 +1756,7 @@ try_again:
58172 goto out;
58173 } else if (ret == 1) {
58174 clusters_need = wc->w_clen;
58175- ret = ocfs2_refcount_cow(inode, filp, di_bh,
58176+ ret = ocfs2_refcount_cow(inode, di_bh,
58177 wc->w_cpos, wc->w_clen, UINT_MAX);
58178 if (ret) {
58179 mlog_errno(ret);
58180diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
58181index ff54014..ff125fd 100644
58182--- a/fs/ocfs2/file.c
58183+++ b/fs/ocfs2/file.c
58184@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
58185 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
58186 goto out;
58187
58188- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
58189+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
58190
58191 out:
58192 return status;
58193@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
58194 zero_clusters = last_cpos - zero_cpos;
58195
58196 if (needs_cow) {
58197- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
58198+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
58199 zero_clusters, UINT_MAX);
58200 if (rc) {
58201 mlog_errno(rc);
58202@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
58203
58204 *meta_level = 1;
58205
58206- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
58207+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
58208 if (ret)
58209 mlog_errno(ret);
58210 out:
58211diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
58212index aebeacd..0dcdd26 100644
58213--- a/fs/ocfs2/localalloc.c
58214+++ b/fs/ocfs2/localalloc.c
58215@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
58216 goto bail;
58217 }
58218
58219- atomic_inc(&osb->alloc_stats.moves);
58220+ atomic_inc_unchecked(&osb->alloc_stats.moves);
58221
58222 bail:
58223 if (handle)
58224diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
58225index f1fc172..452068b 100644
58226--- a/fs/ocfs2/move_extents.c
58227+++ b/fs/ocfs2/move_extents.c
58228@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
58229 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
58230 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
58231
58232- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
58233+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
58234 p_cpos, new_p_cpos, len);
58235 if (ret) {
58236 mlog_errno(ret);
58237diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
58238index d355e6e..578d905 100644
58239--- a/fs/ocfs2/ocfs2.h
58240+++ b/fs/ocfs2/ocfs2.h
58241@@ -235,11 +235,11 @@ enum ocfs2_vol_state
58242
58243 struct ocfs2_alloc_stats
58244 {
58245- atomic_t moves;
58246- atomic_t local_data;
58247- atomic_t bitmap_data;
58248- atomic_t bg_allocs;
58249- atomic_t bg_extends;
58250+ atomic_unchecked_t moves;
58251+ atomic_unchecked_t local_data;
58252+ atomic_unchecked_t bitmap_data;
58253+ atomic_unchecked_t bg_allocs;
58254+ atomic_unchecked_t bg_extends;
58255 };
58256
58257 enum ocfs2_local_alloc_state
58258diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
58259index 998b17e..aefe414 100644
58260--- a/fs/ocfs2/refcounttree.c
58261+++ b/fs/ocfs2/refcounttree.c
58262@@ -49,7 +49,6 @@
58263
58264 struct ocfs2_cow_context {
58265 struct inode *inode;
58266- struct file *file;
58267 u32 cow_start;
58268 u32 cow_len;
58269 struct ocfs2_extent_tree data_et;
58270@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
58271 u32 *num_clusters,
58272 unsigned int *extent_flags);
58273 int (*cow_duplicate_clusters)(handle_t *handle,
58274- struct file *file,
58275+ struct inode *inode,
58276 u32 cpos, u32 old_cluster,
58277 u32 new_cluster, u32 new_len);
58278 };
58279@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
58280 }
58281
58282 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58283- struct file *file,
58284+ struct inode *inode,
58285 u32 cpos, u32 old_cluster,
58286 u32 new_cluster, u32 new_len)
58287 {
58288 int ret = 0, partial;
58289- struct inode *inode = file_inode(file);
58290- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
58291- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
58292+ struct super_block *sb = inode->i_sb;
58293 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
58294 struct page *page;
58295 pgoff_t page_index;
58296@@ -2973,13 +2970,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58297 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
58298 BUG_ON(PageDirty(page));
58299
58300- if (PageReadahead(page)) {
58301- page_cache_async_readahead(mapping,
58302- &file->f_ra, file,
58303- page, page_index,
58304- readahead_pages);
58305- }
58306-
58307 if (!PageUptodate(page)) {
58308 ret = block_read_full_page(page, ocfs2_get_block);
58309 if (ret) {
58310@@ -2999,7 +2989,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58311 }
58312 }
58313
58314- ocfs2_map_and_dirty_page(inode, handle, from, to,
58315+ ocfs2_map_and_dirty_page(inode,
58316+ handle, from, to,
58317 page, 0, &new_block);
58318 mark_page_accessed(page);
58319 unlock:
58320@@ -3015,12 +3006,11 @@ unlock:
58321 }
58322
58323 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
58324- struct file *file,
58325+ struct inode *inode,
58326 u32 cpos, u32 old_cluster,
58327 u32 new_cluster, u32 new_len)
58328 {
58329 int ret = 0;
58330- struct inode *inode = file_inode(file);
58331 struct super_block *sb = inode->i_sb;
58332 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
58333 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
58334@@ -3145,7 +3135,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
58335
58336 /*If the old clusters is unwritten, no need to duplicate. */
58337 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
58338- ret = context->cow_duplicate_clusters(handle, context->file,
58339+ ret = context->cow_duplicate_clusters(handle, context->inode,
58340 cpos, old, new, len);
58341 if (ret) {
58342 mlog_errno(ret);
58343@@ -3423,35 +3413,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
58344 return ret;
58345 }
58346
58347-static void ocfs2_readahead_for_cow(struct inode *inode,
58348- struct file *file,
58349- u32 start, u32 len)
58350-{
58351- struct address_space *mapping;
58352- pgoff_t index;
58353- unsigned long num_pages;
58354- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
58355-
58356- if (!file)
58357- return;
58358-
58359- mapping = file->f_mapping;
58360- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
58361- if (!num_pages)
58362- num_pages = 1;
58363-
58364- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
58365- page_cache_sync_readahead(mapping, &file->f_ra, file,
58366- index, num_pages);
58367-}
58368-
58369 /*
58370 * Starting at cpos, try to CoW write_len clusters. Don't CoW
58371 * past max_cpos. This will stop when it runs into a hole or an
58372 * unrefcounted extent.
58373 */
58374 static int ocfs2_refcount_cow_hunk(struct inode *inode,
58375- struct file *file,
58376 struct buffer_head *di_bh,
58377 u32 cpos, u32 write_len, u32 max_cpos)
58378 {
58379@@ -3480,8 +3447,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
58380
58381 BUG_ON(cow_len == 0);
58382
58383- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
58384-
58385 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
58386 if (!context) {
58387 ret = -ENOMEM;
58388@@ -3503,7 +3468,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
58389 context->ref_root_bh = ref_root_bh;
58390 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
58391 context->get_clusters = ocfs2_di_get_clusters;
58392- context->file = file;
58393
58394 ocfs2_init_dinode_extent_tree(&context->data_et,
58395 INODE_CACHE(inode), di_bh);
58396@@ -3532,7 +3496,6 @@ out:
58397 * clusters between cpos and cpos+write_len are safe to modify.
58398 */
58399 int ocfs2_refcount_cow(struct inode *inode,
58400- struct file *file,
58401 struct buffer_head *di_bh,
58402 u32 cpos, u32 write_len, u32 max_cpos)
58403 {
58404@@ -3552,7 +3515,7 @@ int ocfs2_refcount_cow(struct inode *inode,
58405 num_clusters = write_len;
58406
58407 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
58408- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
58409+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
58410 num_clusters, max_cpos);
58411 if (ret) {
58412 mlog_errno(ret);
58413diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
58414index 7754608..6422bbcdb 100644
58415--- a/fs/ocfs2/refcounttree.h
58416+++ b/fs/ocfs2/refcounttree.h
58417@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
58418 int *credits,
58419 int *ref_blocks);
58420 int ocfs2_refcount_cow(struct inode *inode,
58421- struct file *filep, struct buffer_head *di_bh,
58422+ struct buffer_head *di_bh,
58423 u32 cpos, u32 write_len, u32 max_cpos);
58424
58425 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
58426@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
58427 u32 cpos, u32 write_len,
58428 struct ocfs2_post_refcount *post);
58429 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58430- struct file *file,
58431+ struct inode *inode,
58432 u32 cpos, u32 old_cluster,
58433 u32 new_cluster, u32 new_len);
58434 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
58435- struct file *file,
58436+ struct inode *inode,
58437 u32 cpos, u32 old_cluster,
58438 u32 new_cluster, u32 new_len);
58439 int ocfs2_cow_sync_writeback(struct super_block *sb,
58440diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
58441index b7e74b5..19c6536 100644
58442--- a/fs/ocfs2/suballoc.c
58443+++ b/fs/ocfs2/suballoc.c
58444@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
58445 mlog_errno(status);
58446 goto bail;
58447 }
58448- atomic_inc(&osb->alloc_stats.bg_extends);
58449+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
58450
58451 /* You should never ask for this much metadata */
58452 BUG_ON(bits_wanted >
58453@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
58454 mlog_errno(status);
58455 goto bail;
58456 }
58457- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58458+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58459
58460 *suballoc_loc = res.sr_bg_blkno;
58461 *suballoc_bit_start = res.sr_bit_offset;
58462@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
58463 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
58464 res->sr_bits);
58465
58466- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58467+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58468
58469 BUG_ON(res->sr_bits != 1);
58470
58471@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
58472 mlog_errno(status);
58473 goto bail;
58474 }
58475- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58476+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58477
58478 BUG_ON(res.sr_bits != 1);
58479
58480@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58481 cluster_start,
58482 num_clusters);
58483 if (!status)
58484- atomic_inc(&osb->alloc_stats.local_data);
58485+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
58486 } else {
58487 if (min_clusters > (osb->bitmap_cpg - 1)) {
58488 /* The only paths asking for contiguousness
58489@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58490 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
58491 res.sr_bg_blkno,
58492 res.sr_bit_offset);
58493- atomic_inc(&osb->alloc_stats.bitmap_data);
58494+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
58495 *num_clusters = res.sr_bits;
58496 }
58497 }
58498diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
58499index 01b8516..579c4df 100644
58500--- a/fs/ocfs2/super.c
58501+++ b/fs/ocfs2/super.c
58502@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
58503 "%10s => GlobalAllocs: %d LocalAllocs: %d "
58504 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
58505 "Stats",
58506- atomic_read(&osb->alloc_stats.bitmap_data),
58507- atomic_read(&osb->alloc_stats.local_data),
58508- atomic_read(&osb->alloc_stats.bg_allocs),
58509- atomic_read(&osb->alloc_stats.moves),
58510- atomic_read(&osb->alloc_stats.bg_extends));
58511+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
58512+ atomic_read_unchecked(&osb->alloc_stats.local_data),
58513+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
58514+ atomic_read_unchecked(&osb->alloc_stats.moves),
58515+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
58516
58517 out += snprintf(buf + out, len - out,
58518 "%10s => State: %u Descriptor: %llu Size: %u bits "
58519@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
58520 spin_lock_init(&osb->osb_xattr_lock);
58521 ocfs2_init_steal_slots(osb);
58522
58523- atomic_set(&osb->alloc_stats.moves, 0);
58524- atomic_set(&osb->alloc_stats.local_data, 0);
58525- atomic_set(&osb->alloc_stats.bitmap_data, 0);
58526- atomic_set(&osb->alloc_stats.bg_allocs, 0);
58527- atomic_set(&osb->alloc_stats.bg_extends, 0);
58528+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
58529+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
58530+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
58531+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
58532+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
58533
58534 /* Copy the blockcheck stats from the superblock probe */
58535 osb->osb_ecc_stats = *stats;
58536diff --git a/fs/open.c b/fs/open.c
58537index 8c74100..4239c48 100644
58538--- a/fs/open.c
58539+++ b/fs/open.c
58540@@ -32,6 +32,8 @@
58541 #include <linux/dnotify.h>
58542 #include <linux/compat.h>
58543
58544+#define CREATE_TRACE_POINTS
58545+#include <trace/events/fs.h>
58546 #include "internal.h"
58547
58548 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
58549@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
58550 error = locks_verify_truncate(inode, NULL, length);
58551 if (!error)
58552 error = security_path_truncate(path);
58553+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
58554+ error = -EACCES;
58555 if (!error)
58556 error = do_truncate(path->dentry, length, 0, NULL);
58557
58558@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
58559 error = locks_verify_truncate(inode, f.file, length);
58560 if (!error)
58561 error = security_path_truncate(&f.file->f_path);
58562+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
58563+ error = -EACCES;
58564 if (!error)
58565 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
58566 sb_end_write(inode->i_sb);
58567@@ -360,6 +366,9 @@ retry:
58568 if (__mnt_is_readonly(path.mnt))
58569 res = -EROFS;
58570
58571+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
58572+ res = -EACCES;
58573+
58574 out_path_release:
58575 path_put(&path);
58576 if (retry_estale(res, lookup_flags)) {
58577@@ -391,6 +400,8 @@ retry:
58578 if (error)
58579 goto dput_and_out;
58580
58581+ gr_log_chdir(path.dentry, path.mnt);
58582+
58583 set_fs_pwd(current->fs, &path);
58584
58585 dput_and_out:
58586@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
58587 goto out_putf;
58588
58589 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
58590+
58591+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
58592+ error = -EPERM;
58593+
58594+ if (!error)
58595+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
58596+
58597 if (!error)
58598 set_fs_pwd(current->fs, &f.file->f_path);
58599 out_putf:
58600@@ -449,7 +467,13 @@ retry:
58601 if (error)
58602 goto dput_and_out;
58603
58604+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
58605+ goto dput_and_out;
58606+
58607 set_fs_root(current->fs, &path);
58608+
58609+ gr_handle_chroot_chdir(&path);
58610+
58611 error = 0;
58612 dput_and_out:
58613 path_put(&path);
58614@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
58615 if (error)
58616 return error;
58617 mutex_lock(&inode->i_mutex);
58618+
58619+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
58620+ error = -EACCES;
58621+ goto out_unlock;
58622+ }
58623+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
58624+ error = -EACCES;
58625+ goto out_unlock;
58626+ }
58627+
58628 error = security_path_chmod(path, mode);
58629 if (error)
58630 goto out_unlock;
58631@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
58632 uid = make_kuid(current_user_ns(), user);
58633 gid = make_kgid(current_user_ns(), group);
58634
58635+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
58636+ return -EACCES;
58637+
58638 newattrs.ia_valid = ATTR_CTIME;
58639 if (user != (uid_t) -1) {
58640 if (!uid_valid(uid))
58641@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
58642 } else {
58643 fsnotify_open(f);
58644 fd_install(fd, f);
58645+ trace_do_sys_open(tmp->name, flags, mode);
58646 }
58647 }
58648 putname(tmp);
58649diff --git a/fs/pipe.c b/fs/pipe.c
58650index d2c45e1..009fe1c 100644
58651--- a/fs/pipe.c
58652+++ b/fs/pipe.c
58653@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
58654
58655 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
58656 {
58657- if (pipe->files)
58658+ if (atomic_read(&pipe->files))
58659 mutex_lock_nested(&pipe->mutex, subclass);
58660 }
58661
58662@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
58663
58664 void pipe_unlock(struct pipe_inode_info *pipe)
58665 {
58666- if (pipe->files)
58667+ if (atomic_read(&pipe->files))
58668 mutex_unlock(&pipe->mutex);
58669 }
58670 EXPORT_SYMBOL(pipe_unlock);
58671@@ -449,9 +449,9 @@ redo:
58672 }
58673 if (bufs) /* More to do? */
58674 continue;
58675- if (!pipe->writers)
58676+ if (!atomic_read(&pipe->writers))
58677 break;
58678- if (!pipe->waiting_writers) {
58679+ if (!atomic_read(&pipe->waiting_writers)) {
58680 /* syscall merging: Usually we must not sleep
58681 * if O_NONBLOCK is set, or if we got some data.
58682 * But if a writer sleeps in kernel space, then
58683@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
58684 ret = 0;
58685 __pipe_lock(pipe);
58686
58687- if (!pipe->readers) {
58688+ if (!atomic_read(&pipe->readers)) {
58689 send_sig(SIGPIPE, current, 0);
58690 ret = -EPIPE;
58691 goto out;
58692@@ -562,7 +562,7 @@ redo1:
58693 for (;;) {
58694 int bufs;
58695
58696- if (!pipe->readers) {
58697+ if (!atomic_read(&pipe->readers)) {
58698 send_sig(SIGPIPE, current, 0);
58699 if (!ret)
58700 ret = -EPIPE;
58701@@ -653,9 +653,9 @@ redo2:
58702 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58703 do_wakeup = 0;
58704 }
58705- pipe->waiting_writers++;
58706+ atomic_inc(&pipe->waiting_writers);
58707 pipe_wait(pipe);
58708- pipe->waiting_writers--;
58709+ atomic_dec(&pipe->waiting_writers);
58710 }
58711 out:
58712 __pipe_unlock(pipe);
58713@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
58714 mask = 0;
58715 if (filp->f_mode & FMODE_READ) {
58716 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
58717- if (!pipe->writers && filp->f_version != pipe->w_counter)
58718+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
58719 mask |= POLLHUP;
58720 }
58721
58722@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
58723 * Most Unices do not set POLLERR for FIFOs but on Linux they
58724 * behave exactly like pipes for poll().
58725 */
58726- if (!pipe->readers)
58727+ if (!atomic_read(&pipe->readers))
58728 mask |= POLLERR;
58729 }
58730
58731@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
58732
58733 __pipe_lock(pipe);
58734 if (file->f_mode & FMODE_READ)
58735- pipe->readers--;
58736+ atomic_dec(&pipe->readers);
58737 if (file->f_mode & FMODE_WRITE)
58738- pipe->writers--;
58739+ atomic_dec(&pipe->writers);
58740
58741- if (pipe->readers || pipe->writers) {
58742+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
58743 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
58744 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58745 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
58746 }
58747 spin_lock(&inode->i_lock);
58748- if (!--pipe->files) {
58749+ if (atomic_dec_and_test(&pipe->files)) {
58750 inode->i_pipe = NULL;
58751 kill = 1;
58752 }
58753@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
58754 kfree(pipe);
58755 }
58756
58757-static struct vfsmount *pipe_mnt __read_mostly;
58758+struct vfsmount *pipe_mnt __read_mostly;
58759
58760 /*
58761 * pipefs_dname() is called from d_path().
58762@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
58763 goto fail_iput;
58764
58765 inode->i_pipe = pipe;
58766- pipe->files = 2;
58767- pipe->readers = pipe->writers = 1;
58768+ atomic_set(&pipe->files, 2);
58769+ atomic_set(&pipe->readers, 1);
58770+ atomic_set(&pipe->writers, 1);
58771 inode->i_fop = &pipefifo_fops;
58772
58773 /*
58774@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
58775 spin_lock(&inode->i_lock);
58776 if (inode->i_pipe) {
58777 pipe = inode->i_pipe;
58778- pipe->files++;
58779+ atomic_inc(&pipe->files);
58780 spin_unlock(&inode->i_lock);
58781 } else {
58782 spin_unlock(&inode->i_lock);
58783 pipe = alloc_pipe_info();
58784 if (!pipe)
58785 return -ENOMEM;
58786- pipe->files = 1;
58787+ atomic_set(&pipe->files, 1);
58788 spin_lock(&inode->i_lock);
58789 if (unlikely(inode->i_pipe)) {
58790- inode->i_pipe->files++;
58791+ atomic_inc(&inode->i_pipe->files);
58792 spin_unlock(&inode->i_lock);
58793 free_pipe_info(pipe);
58794 pipe = inode->i_pipe;
58795@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
58796 * opened, even when there is no process writing the FIFO.
58797 */
58798 pipe->r_counter++;
58799- if (pipe->readers++ == 0)
58800+ if (atomic_inc_return(&pipe->readers) == 1)
58801 wake_up_partner(pipe);
58802
58803- if (!is_pipe && !pipe->writers) {
58804+ if (!is_pipe && !atomic_read(&pipe->writers)) {
58805 if ((filp->f_flags & O_NONBLOCK)) {
58806 /* suppress POLLHUP until we have
58807 * seen a writer */
58808@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
58809 * errno=ENXIO when there is no process reading the FIFO.
58810 */
58811 ret = -ENXIO;
58812- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
58813+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
58814 goto err;
58815
58816 pipe->w_counter++;
58817- if (!pipe->writers++)
58818+ if (atomic_inc_return(&pipe->writers) == 1)
58819 wake_up_partner(pipe);
58820
58821- if (!is_pipe && !pipe->readers) {
58822+ if (!is_pipe && !atomic_read(&pipe->readers)) {
58823 if (wait_for_partner(pipe, &pipe->r_counter))
58824 goto err_wr;
58825 }
58826@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
58827 * the process can at least talk to itself.
58828 */
58829
58830- pipe->readers++;
58831- pipe->writers++;
58832+ atomic_inc(&pipe->readers);
58833+ atomic_inc(&pipe->writers);
58834 pipe->r_counter++;
58835 pipe->w_counter++;
58836- if (pipe->readers == 1 || pipe->writers == 1)
58837+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
58838 wake_up_partner(pipe);
58839 break;
58840
58841@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
58842 return 0;
58843
58844 err_rd:
58845- if (!--pipe->readers)
58846+ if (atomic_dec_and_test(&pipe->readers))
58847 wake_up_interruptible(&pipe->wait);
58848 ret = -ERESTARTSYS;
58849 goto err;
58850
58851 err_wr:
58852- if (!--pipe->writers)
58853+ if (atomic_dec_and_test(&pipe->writers))
58854 wake_up_interruptible(&pipe->wait);
58855 ret = -ERESTARTSYS;
58856 goto err;
58857
58858 err:
58859 spin_lock(&inode->i_lock);
58860- if (!--pipe->files) {
58861+ if (atomic_dec_and_test(&pipe->files)) {
58862 inode->i_pipe = NULL;
58863 kill = 1;
58864 }
58865diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
58866index 15af622..0e9f4467 100644
58867--- a/fs/proc/Kconfig
58868+++ b/fs/proc/Kconfig
58869@@ -30,12 +30,12 @@ config PROC_FS
58870
58871 config PROC_KCORE
58872 bool "/proc/kcore support" if !ARM
58873- depends on PROC_FS && MMU
58874+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
58875
58876 config PROC_VMCORE
58877 bool "/proc/vmcore support"
58878- depends on PROC_FS && CRASH_DUMP
58879- default y
58880+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
58881+ default n
58882 help
58883 Exports the dump image of crashed kernel in ELF format.
58884
58885@@ -59,8 +59,8 @@ config PROC_SYSCTL
58886 limited in memory.
58887
58888 config PROC_PAGE_MONITOR
58889- default y
58890- depends on PROC_FS && MMU
58891+ default n
58892+ depends on PROC_FS && MMU && !GRKERNSEC
58893 bool "Enable /proc page monitoring" if EXPERT
58894 help
58895 Various /proc files exist to monitor process memory utilization:
58896diff --git a/fs/proc/array.c b/fs/proc/array.c
58897index cbd0f1b..adec3f0 100644
58898--- a/fs/proc/array.c
58899+++ b/fs/proc/array.c
58900@@ -60,6 +60,7 @@
58901 #include <linux/tty.h>
58902 #include <linux/string.h>
58903 #include <linux/mman.h>
58904+#include <linux/grsecurity.h>
58905 #include <linux/proc_fs.h>
58906 #include <linux/ioport.h>
58907 #include <linux/uaccess.h>
58908@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
58909 seq_putc(m, '\n');
58910 }
58911
58912+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58913+static inline void task_pax(struct seq_file *m, struct task_struct *p)
58914+{
58915+ if (p->mm)
58916+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
58917+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
58918+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
58919+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
58920+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
58921+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
58922+ else
58923+ seq_printf(m, "PaX:\t-----\n");
58924+}
58925+#endif
58926+
58927 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
58928 struct pid *pid, struct task_struct *task)
58929 {
58930@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
58931 task_cpus_allowed(m, task);
58932 cpuset_task_status_allowed(m, task);
58933 task_context_switch_counts(m, task);
58934+
58935+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58936+ task_pax(m, task);
58937+#endif
58938+
58939+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
58940+ task_grsec_rbac(m, task);
58941+#endif
58942+
58943 return 0;
58944 }
58945
58946+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58947+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
58948+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
58949+ _mm->pax_flags & MF_PAX_SEGMEXEC))
58950+#endif
58951+
58952 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58953 struct pid *pid, struct task_struct *task, int whole)
58954 {
58955@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58956 char tcomm[sizeof(task->comm)];
58957 unsigned long flags;
58958
58959+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58960+ if (current->exec_id != m->exec_id) {
58961+ gr_log_badprocpid("stat");
58962+ return 0;
58963+ }
58964+#endif
58965+
58966 state = *get_task_state(task);
58967 vsize = eip = esp = 0;
58968 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
58969@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58970 gtime = task_gtime(task);
58971 }
58972
58973+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58974+ if (PAX_RAND_FLAGS(mm)) {
58975+ eip = 0;
58976+ esp = 0;
58977+ wchan = 0;
58978+ }
58979+#endif
58980+#ifdef CONFIG_GRKERNSEC_HIDESYM
58981+ wchan = 0;
58982+ eip =0;
58983+ esp =0;
58984+#endif
58985+
58986 /* scale priority and nice values from timeslices to -20..20 */
58987 /* to make it look like a "normal" Unix priority/nice value */
58988 priority = task_prio(task);
58989@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58990 seq_put_decimal_ull(m, ' ', vsize);
58991 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
58992 seq_put_decimal_ull(m, ' ', rsslim);
58993+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58994+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
58995+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
58996+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
58997+#else
58998 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
58999 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
59000 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
59001+#endif
59002 seq_put_decimal_ull(m, ' ', esp);
59003 seq_put_decimal_ull(m, ' ', eip);
59004 /* The signal information here is obsolete.
59005@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
59006 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
59007 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
59008
59009- if (mm && permitted) {
59010+ if (mm && permitted
59011+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59012+ && !PAX_RAND_FLAGS(mm)
59013+#endif
59014+ ) {
59015 seq_put_decimal_ull(m, ' ', mm->start_data);
59016 seq_put_decimal_ull(m, ' ', mm->end_data);
59017 seq_put_decimal_ull(m, ' ', mm->start_brk);
59018@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
59019 struct pid *pid, struct task_struct *task)
59020 {
59021 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
59022- struct mm_struct *mm = get_task_mm(task);
59023+ struct mm_struct *mm;
59024
59025+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59026+ if (current->exec_id != m->exec_id) {
59027+ gr_log_badprocpid("statm");
59028+ return 0;
59029+ }
59030+#endif
59031+ mm = get_task_mm(task);
59032 if (mm) {
59033 size = task_statm(mm, &shared, &text, &data, &resident);
59034 mmput(mm);
59035@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
59036 return 0;
59037 }
59038
59039+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59040+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
59041+{
59042+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
59043+}
59044+#endif
59045+
59046 #ifdef CONFIG_CHECKPOINT_RESTORE
59047 static struct pid *
59048 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
59049diff --git a/fs/proc/base.c b/fs/proc/base.c
59050index c3834da..b402b2b 100644
59051--- a/fs/proc/base.c
59052+++ b/fs/proc/base.c
59053@@ -113,6 +113,14 @@ struct pid_entry {
59054 union proc_op op;
59055 };
59056
59057+struct getdents_callback {
59058+ struct linux_dirent __user * current_dir;
59059+ struct linux_dirent __user * previous;
59060+ struct file * file;
59061+ int count;
59062+ int error;
59063+};
59064+
59065 #define NOD(NAME, MODE, IOP, FOP, OP) { \
59066 .name = (NAME), \
59067 .len = sizeof(NAME) - 1, \
59068@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
59069 if (!mm->arg_end)
59070 goto out_mm; /* Shh! No looking before we're done */
59071
59072+ if (gr_acl_handle_procpidmem(task))
59073+ goto out_mm;
59074+
59075 len = mm->arg_end - mm->arg_start;
59076
59077 if (len > PAGE_SIZE)
59078@@ -237,12 +248,28 @@ out:
59079 return res;
59080 }
59081
59082+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59083+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
59084+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
59085+ _mm->pax_flags & MF_PAX_SEGMEXEC))
59086+#endif
59087+
59088 static int proc_pid_auxv(struct task_struct *task, char *buffer)
59089 {
59090 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
59091 int res = PTR_ERR(mm);
59092 if (mm && !IS_ERR(mm)) {
59093 unsigned int nwords = 0;
59094+
59095+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59096+ /* allow if we're currently ptracing this task */
59097+ if (PAX_RAND_FLAGS(mm) &&
59098+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
59099+ mmput(mm);
59100+ return 0;
59101+ }
59102+#endif
59103+
59104 do {
59105 nwords += 2;
59106 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
59107@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
59108 }
59109
59110
59111-#ifdef CONFIG_KALLSYMS
59112+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59113 /*
59114 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
59115 * Returns the resolved symbol. If that fails, simply return the address.
59116@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
59117 mutex_unlock(&task->signal->cred_guard_mutex);
59118 }
59119
59120-#ifdef CONFIG_STACKTRACE
59121+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59122
59123 #define MAX_STACK_TRACE_DEPTH 64
59124
59125@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
59126 return count;
59127 }
59128
59129-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59130+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59131 static int proc_pid_syscall(struct task_struct *task, char *buffer)
59132 {
59133 long nr;
59134@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
59135 /************************************************************************/
59136
59137 /* permission checks */
59138-static int proc_fd_access_allowed(struct inode *inode)
59139+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
59140 {
59141 struct task_struct *task;
59142 int allowed = 0;
59143@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
59144 */
59145 task = get_proc_task(inode);
59146 if (task) {
59147- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
59148+ if (log)
59149+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
59150+ else
59151+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
59152 put_task_struct(task);
59153 }
59154 return allowed;
59155@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
59156 struct task_struct *task,
59157 int hide_pid_min)
59158 {
59159+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59160+ return false;
59161+
59162+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59163+ rcu_read_lock();
59164+ {
59165+ const struct cred *tmpcred = current_cred();
59166+ const struct cred *cred = __task_cred(task);
59167+
59168+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
59169+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59170+ || in_group_p(grsec_proc_gid)
59171+#endif
59172+ ) {
59173+ rcu_read_unlock();
59174+ return true;
59175+ }
59176+ }
59177+ rcu_read_unlock();
59178+
59179+ if (!pid->hide_pid)
59180+ return false;
59181+#endif
59182+
59183 if (pid->hide_pid < hide_pid_min)
59184 return true;
59185 if (in_group_p(pid->pid_gid))
59186 return true;
59187+
59188 return ptrace_may_access(task, PTRACE_MODE_READ);
59189 }
59190
59191@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
59192 put_task_struct(task);
59193
59194 if (!has_perms) {
59195+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59196+ {
59197+#else
59198 if (pid->hide_pid == 2) {
59199+#endif
59200 /*
59201 * Let's make getdents(), stat(), and open()
59202 * consistent with each other. If a process
59203@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
59204 if (!task)
59205 return -ESRCH;
59206
59207+ if (gr_acl_handle_procpidmem(task)) {
59208+ put_task_struct(task);
59209+ return -EPERM;
59210+ }
59211+
59212 mm = mm_access(task, mode);
59213 put_task_struct(task);
59214
59215@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
59216
59217 file->private_data = mm;
59218
59219+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59220+ file->f_version = current->exec_id;
59221+#endif
59222+
59223 return 0;
59224 }
59225
59226@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
59227 ssize_t copied;
59228 char *page;
59229
59230+#ifdef CONFIG_GRKERNSEC
59231+ if (write)
59232+ return -EPERM;
59233+#endif
59234+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59235+ if (file->f_version != current->exec_id) {
59236+ gr_log_badprocpid("mem");
59237+ return 0;
59238+ }
59239+#endif
59240+
59241 if (!mm)
59242 return 0;
59243
59244@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
59245 goto free;
59246
59247 while (count > 0) {
59248- int this_len = min_t(int, count, PAGE_SIZE);
59249+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
59250
59251 if (write && copy_from_user(page, buf, this_len)) {
59252 copied = -EFAULT;
59253@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
59254 if (!mm)
59255 return 0;
59256
59257+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59258+ if (file->f_version != current->exec_id) {
59259+ gr_log_badprocpid("environ");
59260+ return 0;
59261+ }
59262+#endif
59263+
59264 page = (char *)__get_free_page(GFP_TEMPORARY);
59265 if (!page)
59266 return -ENOMEM;
59267@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
59268 goto free;
59269 while (count > 0) {
59270 size_t this_len, max_len;
59271- int retval;
59272+ ssize_t retval;
59273
59274 if (src >= (mm->env_end - mm->env_start))
59275 break;
59276@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
59277 int error = -EACCES;
59278
59279 /* Are we allowed to snoop on the tasks file descriptors? */
59280- if (!proc_fd_access_allowed(inode))
59281+ if (!proc_fd_access_allowed(inode, 0))
59282 goto out;
59283
59284 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
59285@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
59286 struct path path;
59287
59288 /* Are we allowed to snoop on the tasks file descriptors? */
59289- if (!proc_fd_access_allowed(inode))
59290- goto out;
59291+ /* logging this is needed for learning on chromium to work properly,
59292+ but we don't want to flood the logs from 'ps' which does a readlink
59293+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
59294+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
59295+ */
59296+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
59297+ if (!proc_fd_access_allowed(inode,0))
59298+ goto out;
59299+ } else {
59300+ if (!proc_fd_access_allowed(inode,1))
59301+ goto out;
59302+ }
59303
59304 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
59305 if (error)
59306@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
59307 rcu_read_lock();
59308 cred = __task_cred(task);
59309 inode->i_uid = cred->euid;
59310+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59311+ inode->i_gid = grsec_proc_gid;
59312+#else
59313 inode->i_gid = cred->egid;
59314+#endif
59315 rcu_read_unlock();
59316 }
59317 security_task_to_inode(task, inode);
59318@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
59319 return -ENOENT;
59320 }
59321 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
59322+#ifdef CONFIG_GRKERNSEC_PROC_USER
59323+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
59324+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59325+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
59326+#endif
59327 task_dumpable(task)) {
59328 cred = __task_cred(task);
59329 stat->uid = cred->euid;
59330+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59331+ stat->gid = grsec_proc_gid;
59332+#else
59333 stat->gid = cred->egid;
59334+#endif
59335 }
59336 }
59337 rcu_read_unlock();
59338@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
59339
59340 if (task) {
59341 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
59342+#ifdef CONFIG_GRKERNSEC_PROC_USER
59343+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
59344+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59345+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
59346+#endif
59347 task_dumpable(task)) {
59348 rcu_read_lock();
59349 cred = __task_cred(task);
59350 inode->i_uid = cred->euid;
59351+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59352+ inode->i_gid = grsec_proc_gid;
59353+#else
59354 inode->i_gid = cred->egid;
59355+#endif
59356 rcu_read_unlock();
59357 } else {
59358 inode->i_uid = GLOBAL_ROOT_UID;
59359@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
59360 if (!task)
59361 goto out_no_task;
59362
59363+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59364+ goto out;
59365+
59366 /*
59367 * Yes, it does not scale. And it should not. Don't add
59368 * new entries into /proc/<tgid>/ without very good reasons.
59369@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
59370 if (!task)
59371 goto out_no_task;
59372
59373+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59374+ goto out;
59375+
59376 ret = 0;
59377 i = filp->f_pos;
59378 switch (i) {
59379@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
59380 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
59381 #endif
59382 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
59383-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59384+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59385 INF("syscall", S_IRUGO, proc_pid_syscall),
59386 #endif
59387 INF("cmdline", S_IRUGO, proc_pid_cmdline),
59388@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
59389 #ifdef CONFIG_SECURITY
59390 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
59391 #endif
59392-#ifdef CONFIG_KALLSYMS
59393+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59394 INF("wchan", S_IRUGO, proc_pid_wchan),
59395 #endif
59396-#ifdef CONFIG_STACKTRACE
59397+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59398 ONE("stack", S_IRUGO, proc_pid_stack),
59399 #endif
59400 #ifdef CONFIG_SCHEDSTATS
59401@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
59402 #ifdef CONFIG_HARDWALL
59403 INF("hardwall", S_IRUGO, proc_pid_hardwall),
59404 #endif
59405+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59406+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
59407+#endif
59408 #ifdef CONFIG_USER_NS
59409 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
59410 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
59411@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
59412 if (!inode)
59413 goto out;
59414
59415+#ifdef CONFIG_GRKERNSEC_PROC_USER
59416+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
59417+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59418+ inode->i_gid = grsec_proc_gid;
59419+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
59420+#else
59421 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
59422+#endif
59423 inode->i_op = &proc_tgid_base_inode_operations;
59424 inode->i_fop = &proc_tgid_base_operations;
59425 inode->i_flags|=S_IMMUTABLE;
59426@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
59427 if (!task)
59428 goto out;
59429
59430+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59431+ goto out_put_task;
59432+
59433 result = proc_pid_instantiate(dir, dentry, task, NULL);
59434+out_put_task:
59435 put_task_struct(task);
59436 out:
59437 return result;
59438@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
59439 static int fake_filldir(void *buf, const char *name, int namelen,
59440 loff_t offset, u64 ino, unsigned d_type)
59441 {
59442+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
59443+ __buf->error = -EINVAL;
59444 return 0;
59445 }
59446
59447@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
59448 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
59449 #endif
59450 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
59451-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59452+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59453 INF("syscall", S_IRUGO, proc_pid_syscall),
59454 #endif
59455 INF("cmdline", S_IRUGO, proc_pid_cmdline),
59456@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
59457 #ifdef CONFIG_SECURITY
59458 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
59459 #endif
59460-#ifdef CONFIG_KALLSYMS
59461+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59462 INF("wchan", S_IRUGO, proc_pid_wchan),
59463 #endif
59464-#ifdef CONFIG_STACKTRACE
59465+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59466 ONE("stack", S_IRUGO, proc_pid_stack),
59467 #endif
59468 #ifdef CONFIG_SCHEDSTATS
59469diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
59470index 82676e3..5f8518a 100644
59471--- a/fs/proc/cmdline.c
59472+++ b/fs/proc/cmdline.c
59473@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
59474
59475 static int __init proc_cmdline_init(void)
59476 {
59477+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59478+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
59479+#else
59480 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
59481+#endif
59482 return 0;
59483 }
59484 module_init(proc_cmdline_init);
59485diff --git a/fs/proc/devices.c b/fs/proc/devices.c
59486index b143471..bb105e5 100644
59487--- a/fs/proc/devices.c
59488+++ b/fs/proc/devices.c
59489@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
59490
59491 static int __init proc_devices_init(void)
59492 {
59493+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59494+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
59495+#else
59496 proc_create("devices", 0, NULL, &proc_devinfo_operations);
59497+#endif
59498 return 0;
59499 }
59500 module_init(proc_devices_init);
59501diff --git a/fs/proc/fd.c b/fs/proc/fd.c
59502index d7a4a28..0201742 100644
59503--- a/fs/proc/fd.c
59504+++ b/fs/proc/fd.c
59505@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
59506 if (!task)
59507 return -ENOENT;
59508
59509- files = get_files_struct(task);
59510+ if (!gr_acl_handle_procpidmem(task))
59511+ files = get_files_struct(task);
59512 put_task_struct(task);
59513
59514 if (files) {
59515@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
59516 */
59517 int proc_fd_permission(struct inode *inode, int mask)
59518 {
59519+ struct task_struct *task;
59520 int rv = generic_permission(inode, mask);
59521- if (rv == 0)
59522- return 0;
59523+
59524 if (task_pid(current) == proc_pid(inode))
59525 rv = 0;
59526+
59527+ task = get_proc_task(inode);
59528+ if (task == NULL)
59529+ return rv;
59530+
59531+ if (gr_acl_handle_procpidmem(task))
59532+ rv = -EACCES;
59533+
59534+ put_task_struct(task);
59535+
59536 return rv;
59537 }
59538
59539diff --git a/fs/proc/inode.c b/fs/proc/inode.c
59540index 073aea6..0630370 100644
59541--- a/fs/proc/inode.c
59542+++ b/fs/proc/inode.c
59543@@ -23,11 +23,17 @@
59544 #include <linux/slab.h>
59545 #include <linux/mount.h>
59546 #include <linux/magic.h>
59547+#include <linux/grsecurity.h>
59548
59549 #include <asm/uaccess.h>
59550
59551 #include "internal.h"
59552
59553+#ifdef CONFIG_PROC_SYSCTL
59554+extern const struct inode_operations proc_sys_inode_operations;
59555+extern const struct inode_operations proc_sys_dir_operations;
59556+#endif
59557+
59558 static void proc_evict_inode(struct inode *inode)
59559 {
59560 struct proc_dir_entry *de;
59561@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
59562 ns = PROC_I(inode)->ns.ns;
59563 if (ns_ops && ns)
59564 ns_ops->put(ns);
59565+
59566+#ifdef CONFIG_PROC_SYSCTL
59567+ if (inode->i_op == &proc_sys_inode_operations ||
59568+ inode->i_op == &proc_sys_dir_operations)
59569+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
59570+#endif
59571+
59572 }
59573
59574 static struct kmem_cache * proc_inode_cachep;
59575@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
59576 if (de->mode) {
59577 inode->i_mode = de->mode;
59578 inode->i_uid = de->uid;
59579+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59580+ inode->i_gid = grsec_proc_gid;
59581+#else
59582 inode->i_gid = de->gid;
59583+#endif
59584 }
59585 if (de->size)
59586 inode->i_size = de->size;
59587diff --git a/fs/proc/internal.h b/fs/proc/internal.h
59588index d600fb0..3b495fe 100644
59589--- a/fs/proc/internal.h
59590+++ b/fs/proc/internal.h
59591@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
59592 struct pid *, struct task_struct *);
59593 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
59594 struct pid *, struct task_struct *);
59595+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59596+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
59597+#endif
59598
59599 /*
59600 * base.c
59601diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
59602index 0a22194..a9fc8c1 100644
59603--- a/fs/proc/kcore.c
59604+++ b/fs/proc/kcore.c
59605@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59606 * the addresses in the elf_phdr on our list.
59607 */
59608 start = kc_offset_to_vaddr(*fpos - elf_buflen);
59609- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
59610+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
59611+ if (tsz > buflen)
59612 tsz = buflen;
59613-
59614+
59615 while (buflen) {
59616 struct kcore_list *m;
59617
59618@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59619 kfree(elf_buf);
59620 } else {
59621 if (kern_addr_valid(start)) {
59622- unsigned long n;
59623+ char *elf_buf;
59624+ mm_segment_t oldfs;
59625
59626- n = copy_to_user(buffer, (char *)start, tsz);
59627- /*
59628- * We cannot distinguish between fault on source
59629- * and fault on destination. When this happens
59630- * we clear too and hope it will trigger the
59631- * EFAULT again.
59632- */
59633- if (n) {
59634- if (clear_user(buffer + tsz - n,
59635- n))
59636+ elf_buf = kmalloc(tsz, GFP_KERNEL);
59637+ if (!elf_buf)
59638+ return -ENOMEM;
59639+ oldfs = get_fs();
59640+ set_fs(KERNEL_DS);
59641+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
59642+ set_fs(oldfs);
59643+ if (copy_to_user(buffer, elf_buf, tsz)) {
59644+ kfree(elf_buf);
59645 return -EFAULT;
59646+ }
59647 }
59648+ set_fs(oldfs);
59649+ kfree(elf_buf);
59650 } else {
59651 if (clear_user(buffer, tsz))
59652 return -EFAULT;
59653@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59654
59655 static int open_kcore(struct inode *inode, struct file *filp)
59656 {
59657+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59658+ return -EPERM;
59659+#endif
59660 if (!capable(CAP_SYS_RAWIO))
59661 return -EPERM;
59662 if (kcore_need_update)
59663diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
59664index 5aa847a..f77c8d4 100644
59665--- a/fs/proc/meminfo.c
59666+++ b/fs/proc/meminfo.c
59667@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
59668 vmi.used >> 10,
59669 vmi.largest_chunk >> 10
59670 #ifdef CONFIG_MEMORY_FAILURE
59671- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
59672+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
59673 #endif
59674 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
59675 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
59676diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
59677index ccfd99b..1b7e255 100644
59678--- a/fs/proc/nommu.c
59679+++ b/fs/proc/nommu.c
59680@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
59681 if (len < 1)
59682 len = 1;
59683 seq_printf(m, "%*c", len, ' ');
59684- seq_path(m, &file->f_path, "");
59685+ seq_path(m, &file->f_path, "\n\\");
59686 }
59687
59688 seq_putc(m, '\n');
59689diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
59690index 986e832..6e8e859 100644
59691--- a/fs/proc/proc_net.c
59692+++ b/fs/proc/proc_net.c
59693@@ -23,6 +23,7 @@
59694 #include <linux/nsproxy.h>
59695 #include <net/net_namespace.h>
59696 #include <linux/seq_file.h>
59697+#include <linux/grsecurity.h>
59698
59699 #include "internal.h"
59700
59701@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
59702 struct task_struct *task;
59703 struct nsproxy *ns;
59704 struct net *net = NULL;
59705+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59706+ const struct cred *cred = current_cred();
59707+#endif
59708+
59709+#ifdef CONFIG_GRKERNSEC_PROC_USER
59710+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
59711+ return net;
59712+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59713+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
59714+ return net;
59715+#endif
59716
59717 rcu_read_lock();
59718 task = pid_task(proc_pid(dir), PIDTYPE_PID);
59719diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
59720index ac05f33..1e6dc7e 100644
59721--- a/fs/proc/proc_sysctl.c
59722+++ b/fs/proc/proc_sysctl.c
59723@@ -13,11 +13,15 @@
59724 #include <linux/module.h>
59725 #include "internal.h"
59726
59727+extern int gr_handle_chroot_sysctl(const int op);
59728+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59729+ const int op);
59730+
59731 static const struct dentry_operations proc_sys_dentry_operations;
59732 static const struct file_operations proc_sys_file_operations;
59733-static const struct inode_operations proc_sys_inode_operations;
59734+const struct inode_operations proc_sys_inode_operations;
59735 static const struct file_operations proc_sys_dir_file_operations;
59736-static const struct inode_operations proc_sys_dir_operations;
59737+const struct inode_operations proc_sys_dir_operations;
59738
59739 void proc_sys_poll_notify(struct ctl_table_poll *poll)
59740 {
59741@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
59742
59743 err = NULL;
59744 d_set_d_op(dentry, &proc_sys_dentry_operations);
59745+
59746+ gr_handle_proc_create(dentry, inode);
59747+
59748 d_add(dentry, inode);
59749
59750 out:
59751@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59752 struct inode *inode = file_inode(filp);
59753 struct ctl_table_header *head = grab_header(inode);
59754 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
59755+ int op = write ? MAY_WRITE : MAY_READ;
59756 ssize_t error;
59757 size_t res;
59758
59759@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59760 * and won't be until we finish.
59761 */
59762 error = -EPERM;
59763- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
59764+ if (sysctl_perm(head, table, op))
59765 goto out;
59766
59767 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
59768@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59769 if (!table->proc_handler)
59770 goto out;
59771
59772+#ifdef CONFIG_GRKERNSEC
59773+ error = -EPERM;
59774+ if (gr_handle_chroot_sysctl(op))
59775+ goto out;
59776+ dget(filp->f_path.dentry);
59777+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
59778+ dput(filp->f_path.dentry);
59779+ goto out;
59780+ }
59781+ dput(filp->f_path.dentry);
59782+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
59783+ goto out;
59784+ if (write && !capable(CAP_SYS_ADMIN))
59785+ goto out;
59786+#endif
59787+
59788 /* careful: calling conventions are nasty here */
59789 res = count;
59790 error = table->proc_handler(table, write, buf, &res, ppos);
59791@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
59792 return -ENOMEM;
59793 } else {
59794 d_set_d_op(child, &proc_sys_dentry_operations);
59795+
59796+ gr_handle_proc_create(child, inode);
59797+
59798 d_add(child, inode);
59799 }
59800 } else {
59801@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
59802 if ((*pos)++ < file->f_pos)
59803 return 0;
59804
59805+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
59806+ return 0;
59807+
59808 if (unlikely(S_ISLNK(table->mode)))
59809 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
59810 else
59811@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
59812 if (IS_ERR(head))
59813 return PTR_ERR(head);
59814
59815+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
59816+ return -ENOENT;
59817+
59818 generic_fillattr(inode, stat);
59819 if (table)
59820 stat->mode = (stat->mode & S_IFMT) | table->mode;
59821@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
59822 .llseek = generic_file_llseek,
59823 };
59824
59825-static const struct inode_operations proc_sys_inode_operations = {
59826+const struct inode_operations proc_sys_inode_operations = {
59827 .permission = proc_sys_permission,
59828 .setattr = proc_sys_setattr,
59829 .getattr = proc_sys_getattr,
59830 };
59831
59832-static const struct inode_operations proc_sys_dir_operations = {
59833+const struct inode_operations proc_sys_dir_operations = {
59834 .lookup = proc_sys_lookup,
59835 .permission = proc_sys_permission,
59836 .setattr = proc_sys_setattr,
59837@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
59838 static struct ctl_dir *new_dir(struct ctl_table_set *set,
59839 const char *name, int namelen)
59840 {
59841- struct ctl_table *table;
59842+ ctl_table_no_const *table;
59843 struct ctl_dir *new;
59844 struct ctl_node *node;
59845 char *new_name;
59846@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
59847 return NULL;
59848
59849 node = (struct ctl_node *)(new + 1);
59850- table = (struct ctl_table *)(node + 1);
59851+ table = (ctl_table_no_const *)(node + 1);
59852 new_name = (char *)(table + 2);
59853 memcpy(new_name, name, namelen);
59854 new_name[namelen] = '\0';
59855@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
59856 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
59857 struct ctl_table_root *link_root)
59858 {
59859- struct ctl_table *link_table, *entry, *link;
59860+ ctl_table_no_const *link_table, *link;
59861+ struct ctl_table *entry;
59862 struct ctl_table_header *links;
59863 struct ctl_node *node;
59864 char *link_name;
59865@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
59866 return NULL;
59867
59868 node = (struct ctl_node *)(links + 1);
59869- link_table = (struct ctl_table *)(node + nr_entries);
59870+ link_table = (ctl_table_no_const *)(node + nr_entries);
59871 link_name = (char *)&link_table[nr_entries + 1];
59872
59873 for (link = link_table, entry = table; entry->procname; link++, entry++) {
59874@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59875 struct ctl_table_header ***subheader, struct ctl_table_set *set,
59876 struct ctl_table *table)
59877 {
59878- struct ctl_table *ctl_table_arg = NULL;
59879- struct ctl_table *entry, *files;
59880+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
59881+ struct ctl_table *entry;
59882 int nr_files = 0;
59883 int nr_dirs = 0;
59884 int err = -ENOMEM;
59885@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59886 nr_files++;
59887 }
59888
59889- files = table;
59890 /* If there are mixed files and directories we need a new table */
59891 if (nr_dirs && nr_files) {
59892- struct ctl_table *new;
59893+ ctl_table_no_const *new;
59894 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
59895 GFP_KERNEL);
59896 if (!files)
59897@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59898 /* Register everything except a directory full of subdirectories */
59899 if (nr_files || !nr_dirs) {
59900 struct ctl_table_header *header;
59901- header = __register_sysctl_table(set, path, files);
59902+ header = __register_sysctl_table(set, path, files ? files : table);
59903 if (!header) {
59904 kfree(ctl_table_arg);
59905 goto out;
59906diff --git a/fs/proc/root.c b/fs/proc/root.c
59907index 41a6ea9..23eaa92 100644
59908--- a/fs/proc/root.c
59909+++ b/fs/proc/root.c
59910@@ -182,7 +182,15 @@ void __init proc_root_init(void)
59911 #ifdef CONFIG_PROC_DEVICETREE
59912 proc_device_tree_init();
59913 #endif
59914+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59915+#ifdef CONFIG_GRKERNSEC_PROC_USER
59916+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
59917+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59918+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
59919+#endif
59920+#else
59921 proc_mkdir("bus", NULL);
59922+#endif
59923 proc_sys_init();
59924 }
59925
59926diff --git a/fs/proc/self.c b/fs/proc/self.c
59927index 6b6a993..807cccc 100644
59928--- a/fs/proc/self.c
59929+++ b/fs/proc/self.c
59930@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
59931 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
59932 void *cookie)
59933 {
59934- char *s = nd_get_link(nd);
59935+ const char *s = nd_get_link(nd);
59936 if (!IS_ERR(s))
59937 kfree(s);
59938 }
59939diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
59940index 65fc60a..350cc48 100644
59941--- a/fs/proc/task_mmu.c
59942+++ b/fs/proc/task_mmu.c
59943@@ -11,12 +11,19 @@
59944 #include <linux/rmap.h>
59945 #include <linux/swap.h>
59946 #include <linux/swapops.h>
59947+#include <linux/grsecurity.h>
59948
59949 #include <asm/elf.h>
59950 #include <asm/uaccess.h>
59951 #include <asm/tlbflush.h>
59952 #include "internal.h"
59953
59954+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59955+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
59956+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
59957+ _mm->pax_flags & MF_PAX_SEGMEXEC))
59958+#endif
59959+
59960 void task_mem(struct seq_file *m, struct mm_struct *mm)
59961 {
59962 unsigned long data, text, lib, swap;
59963@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
59964 "VmExe:\t%8lu kB\n"
59965 "VmLib:\t%8lu kB\n"
59966 "VmPTE:\t%8lu kB\n"
59967- "VmSwap:\t%8lu kB\n",
59968- hiwater_vm << (PAGE_SHIFT-10),
59969+ "VmSwap:\t%8lu kB\n"
59970+
59971+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59972+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
59973+#endif
59974+
59975+ ,hiwater_vm << (PAGE_SHIFT-10),
59976 total_vm << (PAGE_SHIFT-10),
59977 mm->locked_vm << (PAGE_SHIFT-10),
59978 mm->pinned_vm << (PAGE_SHIFT-10),
59979@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
59980 data << (PAGE_SHIFT-10),
59981 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
59982 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
59983- swap << (PAGE_SHIFT-10));
59984+ swap << (PAGE_SHIFT-10)
59985+
59986+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59987+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59988+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
59989+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
59990+#else
59991+ , mm->context.user_cs_base
59992+ , mm->context.user_cs_limit
59993+#endif
59994+#endif
59995+
59996+ );
59997 }
59998
59999 unsigned long task_vsize(struct mm_struct *mm)
60000@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
60001 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
60002 }
60003
60004- /* We don't show the stack guard page in /proc/maps */
60005+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60006+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
60007+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
60008+#else
60009 start = vma->vm_start;
60010- if (stack_guard_page_start(vma, start))
60011- start += PAGE_SIZE;
60012 end = vma->vm_end;
60013- if (stack_guard_page_end(vma, end))
60014- end -= PAGE_SIZE;
60015+#endif
60016
60017 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
60018 start,
60019@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
60020 flags & VM_WRITE ? 'w' : '-',
60021 flags & VM_EXEC ? 'x' : '-',
60022 flags & VM_MAYSHARE ? 's' : 'p',
60023+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60024+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
60025+#else
60026 pgoff,
60027+#endif
60028 MAJOR(dev), MINOR(dev), ino, &len);
60029
60030 /*
60031@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
60032 */
60033 if (file) {
60034 pad_len_spaces(m, len);
60035- seq_path(m, &file->f_path, "\n");
60036+ seq_path(m, &file->f_path, "\n\\");
60037 goto done;
60038 }
60039
60040@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
60041 * Thread stack in /proc/PID/task/TID/maps or
60042 * the main process stack.
60043 */
60044- if (!is_pid || (vma->vm_start <= mm->start_stack &&
60045- vma->vm_end >= mm->start_stack)) {
60046+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
60047+ (vma->vm_start <= mm->start_stack &&
60048+ vma->vm_end >= mm->start_stack)) {
60049 name = "[stack]";
60050 } else {
60051 /* Thread stack in /proc/PID/maps */
60052@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
60053 struct proc_maps_private *priv = m->private;
60054 struct task_struct *task = priv->task;
60055
60056+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60057+ if (current->exec_id != m->exec_id) {
60058+ gr_log_badprocpid("maps");
60059+ return 0;
60060+ }
60061+#endif
60062+
60063 show_map_vma(m, vma, is_pid);
60064
60065 if (m->count < m->size) /* vma is copied successfully */
60066@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
60067 .private = &mss,
60068 };
60069
60070+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60071+ if (current->exec_id != m->exec_id) {
60072+ gr_log_badprocpid("smaps");
60073+ return 0;
60074+ }
60075+#endif
60076 memset(&mss, 0, sizeof mss);
60077- mss.vma = vma;
60078- /* mmap_sem is held in m_start */
60079- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
60080- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
60081-
60082+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60083+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
60084+#endif
60085+ mss.vma = vma;
60086+ /* mmap_sem is held in m_start */
60087+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
60088+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
60089+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60090+ }
60091+#endif
60092 show_map_vma(m, vma, is_pid);
60093
60094 seq_printf(m,
60095@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
60096 "KernelPageSize: %8lu kB\n"
60097 "MMUPageSize: %8lu kB\n"
60098 "Locked: %8lu kB\n",
60099+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60100+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
60101+#else
60102 (vma->vm_end - vma->vm_start) >> 10,
60103+#endif
60104 mss.resident >> 10,
60105 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
60106 mss.shared_clean >> 10,
60107@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
60108 int n;
60109 char buffer[50];
60110
60111+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60112+ if (current->exec_id != m->exec_id) {
60113+ gr_log_badprocpid("numa_maps");
60114+ return 0;
60115+ }
60116+#endif
60117+
60118 if (!mm)
60119 return 0;
60120
60121@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
60122 mpol_to_str(buffer, sizeof(buffer), pol);
60123 mpol_cond_put(pol);
60124
60125+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60126+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
60127+#else
60128 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
60129+#endif
60130
60131 if (file) {
60132 seq_printf(m, " file=");
60133- seq_path(m, &file->f_path, "\n\t= ");
60134+ seq_path(m, &file->f_path, "\n\t\\= ");
60135 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
60136 seq_printf(m, " heap");
60137 } else {
60138diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
60139index 56123a6..5a2f6ec 100644
60140--- a/fs/proc/task_nommu.c
60141+++ b/fs/proc/task_nommu.c
60142@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
60143 else
60144 bytes += kobjsize(mm);
60145
60146- if (current->fs && current->fs->users > 1)
60147+ if (current->fs && atomic_read(&current->fs->users) > 1)
60148 sbytes += kobjsize(current->fs);
60149 else
60150 bytes += kobjsize(current->fs);
60151@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
60152
60153 if (file) {
60154 pad_len_spaces(m, len);
60155- seq_path(m, &file->f_path, "");
60156+ seq_path(m, &file->f_path, "\n\\");
60157 } else if (mm) {
60158 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
60159
60160diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
60161index 17f7e08..e4b1529 100644
60162--- a/fs/proc/vmcore.c
60163+++ b/fs/proc/vmcore.c
60164@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
60165 nr_bytes = count;
60166
60167 /* If pfn is not ram, return zeros for sparse dump files */
60168- if (pfn_is_ram(pfn) == 0)
60169- memset(buf, 0, nr_bytes);
60170- else {
60171+ if (pfn_is_ram(pfn) == 0) {
60172+ if (userbuf) {
60173+ if (clear_user((char __force_user *)buf, nr_bytes))
60174+ return -EFAULT;
60175+ } else
60176+ memset(buf, 0, nr_bytes);
60177+ } else {
60178 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
60179 offset, userbuf);
60180 if (tmp < 0)
60181@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
60182 if (tsz > nr_bytes)
60183 tsz = nr_bytes;
60184
60185- tmp = read_from_oldmem(buffer, tsz, &start, 1);
60186+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
60187 if (tmp < 0)
60188 return tmp;
60189 buflen -= tsz;
60190diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
60191index b00fcc9..e0c6381 100644
60192--- a/fs/qnx6/qnx6.h
60193+++ b/fs/qnx6/qnx6.h
60194@@ -74,7 +74,7 @@ enum {
60195 BYTESEX_BE,
60196 };
60197
60198-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
60199+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
60200 {
60201 if (sbi->s_bytesex == BYTESEX_LE)
60202 return le64_to_cpu((__force __le64)n);
60203@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
60204 return (__force __fs64)cpu_to_be64(n);
60205 }
60206
60207-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
60208+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
60209 {
60210 if (sbi->s_bytesex == BYTESEX_LE)
60211 return le32_to_cpu((__force __le32)n);
60212diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
60213index 16e8abb..2dcf914 100644
60214--- a/fs/quota/netlink.c
60215+++ b/fs/quota/netlink.c
60216@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
60217 void quota_send_warning(struct kqid qid, dev_t dev,
60218 const char warntype)
60219 {
60220- static atomic_t seq;
60221+ static atomic_unchecked_t seq;
60222 struct sk_buff *skb;
60223 void *msg_head;
60224 int ret;
60225@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
60226 "VFS: Not enough memory to send quota warning.\n");
60227 return;
60228 }
60229- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
60230+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
60231 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
60232 if (!msg_head) {
60233 printk(KERN_ERR
60234diff --git a/fs/read_write.c b/fs/read_write.c
60235index 2cefa41..c7e2fe0 100644
60236--- a/fs/read_write.c
60237+++ b/fs/read_write.c
60238@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
60239
60240 old_fs = get_fs();
60241 set_fs(get_ds());
60242- p = (__force const char __user *)buf;
60243+ p = (const char __force_user *)buf;
60244 if (count > MAX_RW_COUNT)
60245 count = MAX_RW_COUNT;
60246 if (file->f_op->write)
60247diff --git a/fs/readdir.c b/fs/readdir.c
60248index fee38e0..12fdf47 100644
60249--- a/fs/readdir.c
60250+++ b/fs/readdir.c
60251@@ -17,6 +17,7 @@
60252 #include <linux/security.h>
60253 #include <linux/syscalls.h>
60254 #include <linux/unistd.h>
60255+#include <linux/namei.h>
60256
60257 #include <asm/uaccess.h>
60258
60259@@ -67,6 +68,7 @@ struct old_linux_dirent {
60260
60261 struct readdir_callback {
60262 struct old_linux_dirent __user * dirent;
60263+ struct file * file;
60264 int result;
60265 };
60266
60267@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
60268 buf->result = -EOVERFLOW;
60269 return -EOVERFLOW;
60270 }
60271+
60272+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60273+ return 0;
60274+
60275 buf->result++;
60276 dirent = buf->dirent;
60277 if (!access_ok(VERIFY_WRITE, dirent,
60278@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60279
60280 buf.result = 0;
60281 buf.dirent = dirent;
60282+ buf.file = f.file;
60283
60284 error = vfs_readdir(f.file, fillonedir, &buf);
60285 if (buf.result)
60286@@ -139,6 +146,7 @@ struct linux_dirent {
60287 struct getdents_callback {
60288 struct linux_dirent __user * current_dir;
60289 struct linux_dirent __user * previous;
60290+ struct file * file;
60291 int count;
60292 int error;
60293 };
60294@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
60295 buf->error = -EOVERFLOW;
60296 return -EOVERFLOW;
60297 }
60298+
60299+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60300+ return 0;
60301+
60302 dirent = buf->previous;
60303 if (dirent) {
60304 if (__put_user(offset, &dirent->d_off))
60305@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
60306 buf.previous = NULL;
60307 buf.count = count;
60308 buf.error = 0;
60309+ buf.file = f.file;
60310
60311 error = vfs_readdir(f.file, filldir, &buf);
60312 if (error >= 0)
60313@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
60314 struct getdents_callback64 {
60315 struct linux_dirent64 __user * current_dir;
60316 struct linux_dirent64 __user * previous;
60317+ struct file *file;
60318 int count;
60319 int error;
60320 };
60321@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
60322 buf->error = -EINVAL; /* only used if we fail.. */
60323 if (reclen > buf->count)
60324 return -EINVAL;
60325+
60326+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60327+ return 0;
60328+
60329 dirent = buf->previous;
60330 if (dirent) {
60331 if (__put_user(offset, &dirent->d_off))
60332@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60333
60334 buf.current_dir = dirent;
60335 buf.previous = NULL;
60336+ buf.file = f.file;
60337 buf.count = count;
60338 buf.error = 0;
60339
60340@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60341 error = buf.error;
60342 lastdirent = buf.previous;
60343 if (lastdirent) {
60344- typeof(lastdirent->d_off) d_off = f.file->f_pos;
60345+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
60346 if (__put_user(d_off, &lastdirent->d_off))
60347 error = -EFAULT;
60348 else
60349diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
60350index 2b7882b..1c5ef48 100644
60351--- a/fs/reiserfs/do_balan.c
60352+++ b/fs/reiserfs/do_balan.c
60353@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
60354 return;
60355 }
60356
60357- atomic_inc(&(fs_generation(tb->tb_sb)));
60358+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
60359 do_balance_starts(tb);
60360
60361 /* balance leaf returns 0 except if combining L R and S into
60362diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
60363index 1d48974..2f8f4e0 100644
60364--- a/fs/reiserfs/procfs.c
60365+++ b/fs/reiserfs/procfs.c
60366@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
60367 "SMALL_TAILS " : "NO_TAILS ",
60368 replay_only(sb) ? "REPLAY_ONLY " : "",
60369 convert_reiserfs(sb) ? "CONV " : "",
60370- atomic_read(&r->s_generation_counter),
60371+ atomic_read_unchecked(&r->s_generation_counter),
60372 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
60373 SF(s_do_balance), SF(s_unneeded_left_neighbor),
60374 SF(s_good_search_by_key_reada), SF(s_bmaps),
60375diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
60376index 157e474..65a6114 100644
60377--- a/fs/reiserfs/reiserfs.h
60378+++ b/fs/reiserfs/reiserfs.h
60379@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
60380 /* Comment? -Hans */
60381 wait_queue_head_t s_wait;
60382 /* To be obsoleted soon by per buffer seals.. -Hans */
60383- atomic_t s_generation_counter; // increased by one every time the
60384+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60385 // tree gets re-balanced
60386 unsigned long s_properties; /* File system properties. Currently holds
60387 on-disk FS format */
60388@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60389 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60390
60391 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60392-#define get_generation(s) atomic_read (&fs_generation(s))
60393+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60394 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60395 #define __fs_changed(gen,s) (gen != get_generation (s))
60396 #define fs_changed(gen,s) \
60397diff --git a/fs/select.c b/fs/select.c
60398index 8c1c96c..a0f9b6d 100644
60399--- a/fs/select.c
60400+++ b/fs/select.c
60401@@ -20,6 +20,7 @@
60402 #include <linux/export.h>
60403 #include <linux/slab.h>
60404 #include <linux/poll.h>
60405+#include <linux/security.h>
60406 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
60407 #include <linux/file.h>
60408 #include <linux/fdtable.h>
60409@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
60410 struct poll_list *walk = head;
60411 unsigned long todo = nfds;
60412
60413+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
60414 if (nfds > rlimit(RLIMIT_NOFILE))
60415 return -EINVAL;
60416
60417diff --git a/fs/seq_file.c b/fs/seq_file.c
60418index 774c1eb..b67582a 100644
60419--- a/fs/seq_file.c
60420+++ b/fs/seq_file.c
60421@@ -10,6 +10,7 @@
60422 #include <linux/seq_file.h>
60423 #include <linux/slab.h>
60424 #include <linux/cred.h>
60425+#include <linux/sched.h>
60426
60427 #include <asm/uaccess.h>
60428 #include <asm/page.h>
60429@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
60430 #ifdef CONFIG_USER_NS
60431 p->user_ns = file->f_cred->user_ns;
60432 #endif
60433+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60434+ p->exec_id = current->exec_id;
60435+#endif
60436
60437 /*
60438 * Wrappers around seq_open(e.g. swaps_open) need to be
60439@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
60440 return 0;
60441 }
60442 if (!m->buf) {
60443- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
60444+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
60445 if (!m->buf)
60446 return -ENOMEM;
60447 }
60448@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
60449 Eoverflow:
60450 m->op->stop(m, p);
60451 kfree(m->buf);
60452- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
60453+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
60454 return !m->buf ? -ENOMEM : -EAGAIN;
60455 }
60456
60457@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
60458
60459 /* grab buffer if we didn't have one */
60460 if (!m->buf) {
60461- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
60462+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
60463 if (!m->buf)
60464 goto Enomem;
60465 }
60466@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
60467 goto Fill;
60468 m->op->stop(m, p);
60469 kfree(m->buf);
60470- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
60471+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
60472 if (!m->buf)
60473 goto Enomem;
60474 m->count = 0;
60475@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
60476 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
60477 void *data)
60478 {
60479- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
60480+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
60481 int res = -ENOMEM;
60482
60483 if (op) {
60484diff --git a/fs/splice.c b/fs/splice.c
60485index d37431d..81c3044 100644
60486--- a/fs/splice.c
60487+++ b/fs/splice.c
60488@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60489 pipe_lock(pipe);
60490
60491 for (;;) {
60492- if (!pipe->readers) {
60493+ if (!atomic_read(&pipe->readers)) {
60494 send_sig(SIGPIPE, current, 0);
60495 if (!ret)
60496 ret = -EPIPE;
60497@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60498 page_nr++;
60499 ret += buf->len;
60500
60501- if (pipe->files)
60502+ if (atomic_read(&pipe->files))
60503 do_wakeup = 1;
60504
60505 if (!--spd->nr_pages)
60506@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60507 do_wakeup = 0;
60508 }
60509
60510- pipe->waiting_writers++;
60511+ atomic_inc(&pipe->waiting_writers);
60512 pipe_wait(pipe);
60513- pipe->waiting_writers--;
60514+ atomic_dec(&pipe->waiting_writers);
60515 }
60516
60517 pipe_unlock(pipe);
60518@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
60519 old_fs = get_fs();
60520 set_fs(get_ds());
60521 /* The cast to a user pointer is valid due to the set_fs() */
60522- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
60523+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
60524 set_fs(old_fs);
60525
60526 return res;
60527@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
60528 old_fs = get_fs();
60529 set_fs(get_ds());
60530 /* The cast to a user pointer is valid due to the set_fs() */
60531- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
60532+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
60533 set_fs(old_fs);
60534
60535 return res;
60536@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
60537 goto err;
60538
60539 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
60540- vec[i].iov_base = (void __user *) page_address(page);
60541+ vec[i].iov_base = (void __force_user *) page_address(page);
60542 vec[i].iov_len = this_len;
60543 spd.pages[i] = page;
60544 spd.nr_pages++;
60545@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
60546 ops->release(pipe, buf);
60547 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
60548 pipe->nrbufs--;
60549- if (pipe->files)
60550+ if (atomic_read(&pipe->files))
60551 sd->need_wakeup = true;
60552 }
60553
60554@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
60555 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
60556 {
60557 while (!pipe->nrbufs) {
60558- if (!pipe->writers)
60559+ if (!atomic_read(&pipe->writers))
60560 return 0;
60561
60562- if (!pipe->waiting_writers && sd->num_spliced)
60563+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
60564 return 0;
60565
60566 if (sd->flags & SPLICE_F_NONBLOCK)
60567@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
60568 * out of the pipe right after the splice_to_pipe(). So set
60569 * PIPE_READERS appropriately.
60570 */
60571- pipe->readers = 1;
60572+ atomic_set(&pipe->readers, 1);
60573
60574 current->splice_pipe = pipe;
60575 }
60576@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60577 ret = -ERESTARTSYS;
60578 break;
60579 }
60580- if (!pipe->writers)
60581+ if (!atomic_read(&pipe->writers))
60582 break;
60583- if (!pipe->waiting_writers) {
60584+ if (!atomic_read(&pipe->waiting_writers)) {
60585 if (flags & SPLICE_F_NONBLOCK) {
60586 ret = -EAGAIN;
60587 break;
60588@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60589 pipe_lock(pipe);
60590
60591 while (pipe->nrbufs >= pipe->buffers) {
60592- if (!pipe->readers) {
60593+ if (!atomic_read(&pipe->readers)) {
60594 send_sig(SIGPIPE, current, 0);
60595 ret = -EPIPE;
60596 break;
60597@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60598 ret = -ERESTARTSYS;
60599 break;
60600 }
60601- pipe->waiting_writers++;
60602+ atomic_inc(&pipe->waiting_writers);
60603 pipe_wait(pipe);
60604- pipe->waiting_writers--;
60605+ atomic_dec(&pipe->waiting_writers);
60606 }
60607
60608 pipe_unlock(pipe);
60609@@ -1854,14 +1854,14 @@ retry:
60610 pipe_double_lock(ipipe, opipe);
60611
60612 do {
60613- if (!opipe->readers) {
60614+ if (!atomic_read(&opipe->readers)) {
60615 send_sig(SIGPIPE, current, 0);
60616 if (!ret)
60617 ret = -EPIPE;
60618 break;
60619 }
60620
60621- if (!ipipe->nrbufs && !ipipe->writers)
60622+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
60623 break;
60624
60625 /*
60626@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
60627 pipe_double_lock(ipipe, opipe);
60628
60629 do {
60630- if (!opipe->readers) {
60631+ if (!atomic_read(&opipe->readers)) {
60632 send_sig(SIGPIPE, current, 0);
60633 if (!ret)
60634 ret = -EPIPE;
60635@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
60636 * return EAGAIN if we have the potential of some data in the
60637 * future, otherwise just return 0
60638 */
60639- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
60640+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
60641 ret = -EAGAIN;
60642
60643 pipe_unlock(ipipe);
60644diff --git a/fs/stat.c b/fs/stat.c
60645index 04ce1ac..a13dd1e 100644
60646--- a/fs/stat.c
60647+++ b/fs/stat.c
60648@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
60649 stat->gid = inode->i_gid;
60650 stat->rdev = inode->i_rdev;
60651 stat->size = i_size_read(inode);
60652- stat->atime = inode->i_atime;
60653- stat->mtime = inode->i_mtime;
60654+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
60655+ stat->atime = inode->i_ctime;
60656+ stat->mtime = inode->i_ctime;
60657+ } else {
60658+ stat->atime = inode->i_atime;
60659+ stat->mtime = inode->i_mtime;
60660+ }
60661 stat->ctime = inode->i_ctime;
60662 stat->blksize = (1 << inode->i_blkbits);
60663 stat->blocks = inode->i_blocks;
60664@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
60665 if (retval)
60666 return retval;
60667
60668- if (inode->i_op->getattr)
60669- return inode->i_op->getattr(path->mnt, path->dentry, stat);
60670+ if (inode->i_op->getattr) {
60671+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
60672+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
60673+ stat->atime = stat->ctime;
60674+ stat->mtime = stat->ctime;
60675+ }
60676+ return retval;
60677+ }
60678
60679 generic_fillattr(inode, stat);
60680 return 0;
60681diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
60682index 15c68f9..36a8b3e 100644
60683--- a/fs/sysfs/bin.c
60684+++ b/fs/sysfs/bin.c
60685@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
60686 return ret;
60687 }
60688
60689-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
60690- void *buf, int len, int write)
60691+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
60692+ void *buf, size_t len, int write)
60693 {
60694 struct file *file = vma->vm_file;
60695 struct bin_buffer *bb = file->private_data;
60696 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
60697- int ret;
60698+ ssize_t ret;
60699
60700 if (!bb->vm_ops)
60701 return -EINVAL;
60702diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
60703index e8e0e71..79c28ac5 100644
60704--- a/fs/sysfs/dir.c
60705+++ b/fs/sysfs/dir.c
60706@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
60707 *
60708 * Returns 31 bit hash of ns + name (so it fits in an off_t )
60709 */
60710-static unsigned int sysfs_name_hash(const void *ns, const char *name)
60711+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
60712 {
60713 unsigned long hash = init_name_hash();
60714 unsigned int len = strlen(name);
60715@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
60716 struct sysfs_dirent *sd;
60717 int rc;
60718
60719+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60720+ const char *parent_name = parent_sd->s_name;
60721+
60722+ mode = S_IFDIR | S_IRWXU;
60723+
60724+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
60725+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
60726+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
60727+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
60728+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60729+#endif
60730+
60731 /* allocate */
60732 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
60733 if (!sd)
60734diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
60735index 602f56d..6853db8 100644
60736--- a/fs/sysfs/file.c
60737+++ b/fs/sysfs/file.c
60738@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
60739
60740 struct sysfs_open_dirent {
60741 atomic_t refcnt;
60742- atomic_t event;
60743+ atomic_unchecked_t event;
60744 wait_queue_head_t poll;
60745 struct list_head buffers; /* goes through sysfs_buffer.list */
60746 };
60747@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
60748 if (!sysfs_get_active(attr_sd))
60749 return -ENODEV;
60750
60751- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
60752+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
60753 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
60754
60755 sysfs_put_active(attr_sd);
60756@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
60757 return -ENOMEM;
60758
60759 atomic_set(&new_od->refcnt, 0);
60760- atomic_set(&new_od->event, 1);
60761+ atomic_set_unchecked(&new_od->event, 1);
60762 init_waitqueue_head(&new_od->poll);
60763 INIT_LIST_HEAD(&new_od->buffers);
60764 goto retry;
60765@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
60766
60767 sysfs_put_active(attr_sd);
60768
60769- if (buffer->event != atomic_read(&od->event))
60770+ if (buffer->event != atomic_read_unchecked(&od->event))
60771 goto trigger;
60772
60773 return DEFAULT_POLLMASK;
60774@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
60775
60776 od = sd->s_attr.open;
60777 if (od) {
60778- atomic_inc(&od->event);
60779+ atomic_inc_unchecked(&od->event);
60780 wake_up_interruptible(&od->poll);
60781 }
60782
60783diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
60784index 8c940df..25b733e 100644
60785--- a/fs/sysfs/symlink.c
60786+++ b/fs/sysfs/symlink.c
60787@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60788
60789 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60790 {
60791- char *page = nd_get_link(nd);
60792+ const char *page = nd_get_link(nd);
60793 if (!IS_ERR(page))
60794 free_page((unsigned long)page);
60795 }
60796diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
60797index 69d4889..a810bd4 100644
60798--- a/fs/sysv/sysv.h
60799+++ b/fs/sysv/sysv.h
60800@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
60801 #endif
60802 }
60803
60804-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
60805+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
60806 {
60807 if (sbi->s_bytesex == BYTESEX_PDP)
60808 return PDP_swab((__force __u32)n);
60809diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
60810index e18b988..f1d4ad0f 100644
60811--- a/fs/ubifs/io.c
60812+++ b/fs/ubifs/io.c
60813@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
60814 return err;
60815 }
60816
60817-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
60818+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
60819 {
60820 int err;
60821
60822diff --git a/fs/udf/misc.c b/fs/udf/misc.c
60823index c175b4d..8f36a16 100644
60824--- a/fs/udf/misc.c
60825+++ b/fs/udf/misc.c
60826@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
60827
60828 u8 udf_tag_checksum(const struct tag *t)
60829 {
60830- u8 *data = (u8 *)t;
60831+ const u8 *data = (const u8 *)t;
60832 u8 checksum = 0;
60833 int i;
60834 for (i = 0; i < sizeof(struct tag); ++i)
60835diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
60836index 8d974c4..b82f6ec 100644
60837--- a/fs/ufs/swab.h
60838+++ b/fs/ufs/swab.h
60839@@ -22,7 +22,7 @@ enum {
60840 BYTESEX_BE
60841 };
60842
60843-static inline u64
60844+static inline u64 __intentional_overflow(-1)
60845 fs64_to_cpu(struct super_block *sbp, __fs64 n)
60846 {
60847 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
60848@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
60849 return (__force __fs64)cpu_to_be64(n);
60850 }
60851
60852-static inline u32
60853+static inline u32 __intentional_overflow(-1)
60854 fs32_to_cpu(struct super_block *sbp, __fs32 n)
60855 {
60856 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
60857diff --git a/fs/utimes.c b/fs/utimes.c
60858index f4fb7ec..3fe03c0 100644
60859--- a/fs/utimes.c
60860+++ b/fs/utimes.c
60861@@ -1,6 +1,7 @@
60862 #include <linux/compiler.h>
60863 #include <linux/file.h>
60864 #include <linux/fs.h>
60865+#include <linux/security.h>
60866 #include <linux/linkage.h>
60867 #include <linux/mount.h>
60868 #include <linux/namei.h>
60869@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
60870 goto mnt_drop_write_and_out;
60871 }
60872 }
60873+
60874+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
60875+ error = -EACCES;
60876+ goto mnt_drop_write_and_out;
60877+ }
60878+
60879 mutex_lock(&inode->i_mutex);
60880 error = notify_change(path->dentry, &newattrs);
60881 mutex_unlock(&inode->i_mutex);
60882diff --git a/fs/xattr.c b/fs/xattr.c
60883index 3377dff..4d074d9 100644
60884--- a/fs/xattr.c
60885+++ b/fs/xattr.c
60886@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
60887 return rc;
60888 }
60889
60890+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60891+ssize_t
60892+pax_getxattr(struct dentry *dentry, void *value, size_t size)
60893+{
60894+ struct inode *inode = dentry->d_inode;
60895+ ssize_t error;
60896+
60897+ error = inode_permission(inode, MAY_EXEC);
60898+ if (error)
60899+ return error;
60900+
60901+ if (inode->i_op->getxattr)
60902+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
60903+ else
60904+ error = -EOPNOTSUPP;
60905+
60906+ return error;
60907+}
60908+EXPORT_SYMBOL(pax_getxattr);
60909+#endif
60910+
60911 ssize_t
60912 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
60913 {
60914@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
60915 * Extended attribute SET operations
60916 */
60917 static long
60918-setxattr(struct dentry *d, const char __user *name, const void __user *value,
60919+setxattr(struct path *path, const char __user *name, const void __user *value,
60920 size_t size, int flags)
60921 {
60922 int error;
60923@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
60924 posix_acl_fix_xattr_from_user(kvalue, size);
60925 }
60926
60927- error = vfs_setxattr(d, kname, kvalue, size, flags);
60928+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
60929+ error = -EACCES;
60930+ goto out;
60931+ }
60932+
60933+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
60934 out:
60935 if (vvalue)
60936 vfree(vvalue);
60937@@ -377,7 +403,7 @@ retry:
60938 return error;
60939 error = mnt_want_write(path.mnt);
60940 if (!error) {
60941- error = setxattr(path.dentry, name, value, size, flags);
60942+ error = setxattr(&path, name, value, size, flags);
60943 mnt_drop_write(path.mnt);
60944 }
60945 path_put(&path);
60946@@ -401,7 +427,7 @@ retry:
60947 return error;
60948 error = mnt_want_write(path.mnt);
60949 if (!error) {
60950- error = setxattr(path.dentry, name, value, size, flags);
60951+ error = setxattr(&path, name, value, size, flags);
60952 mnt_drop_write(path.mnt);
60953 }
60954 path_put(&path);
60955@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
60956 const void __user *,value, size_t, size, int, flags)
60957 {
60958 struct fd f = fdget(fd);
60959- struct dentry *dentry;
60960 int error = -EBADF;
60961
60962 if (!f.file)
60963 return error;
60964- dentry = f.file->f_path.dentry;
60965- audit_inode(NULL, dentry, 0);
60966+ audit_inode(NULL, f.file->f_path.dentry, 0);
60967 error = mnt_want_write_file(f.file);
60968 if (!error) {
60969- error = setxattr(dentry, name, value, size, flags);
60970+ error = setxattr(&f.file->f_path, name, value, size, flags);
60971 mnt_drop_write_file(f.file);
60972 }
60973 fdput(f);
60974diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
60975index 9fbea87..6b19972 100644
60976--- a/fs/xattr_acl.c
60977+++ b/fs/xattr_acl.c
60978@@ -76,8 +76,8 @@ struct posix_acl *
60979 posix_acl_from_xattr(struct user_namespace *user_ns,
60980 const void *value, size_t size)
60981 {
60982- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
60983- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
60984+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
60985+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
60986 int count;
60987 struct posix_acl *acl;
60988 struct posix_acl_entry *acl_e;
60989diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
60990index 8904284..ee0e14b 100644
60991--- a/fs/xfs/xfs_bmap.c
60992+++ b/fs/xfs/xfs_bmap.c
60993@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
60994
60995 #else
60996 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
60997-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
60998+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
60999 #endif /* DEBUG */
61000
61001 /*
61002diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
61003index 6157424..ac98f6d 100644
61004--- a/fs/xfs/xfs_dir2_sf.c
61005+++ b/fs/xfs/xfs_dir2_sf.c
61006@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
61007 }
61008
61009 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
61010- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
61011+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
61012+ char name[sfep->namelen];
61013+ memcpy(name, sfep->name, sfep->namelen);
61014+ if (filldir(dirent, name, sfep->namelen,
61015+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
61016+ *offset = off & 0x7fffffff;
61017+ return 0;
61018+ }
61019+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
61020 off & 0x7fffffff, ino, DT_UNKNOWN)) {
61021 *offset = off & 0x7fffffff;
61022 return 0;
61023diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
61024index 5e99968..45bd327 100644
61025--- a/fs/xfs/xfs_ioctl.c
61026+++ b/fs/xfs/xfs_ioctl.c
61027@@ -127,7 +127,7 @@ xfs_find_handle(
61028 }
61029
61030 error = -EFAULT;
61031- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
61032+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
61033 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
61034 goto out_put;
61035
61036diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
61037index ca9ecaa..60100c7 100644
61038--- a/fs/xfs/xfs_iops.c
61039+++ b/fs/xfs/xfs_iops.c
61040@@ -395,7 +395,7 @@ xfs_vn_put_link(
61041 struct nameidata *nd,
61042 void *p)
61043 {
61044- char *s = nd_get_link(nd);
61045+ const char *s = nd_get_link(nd);
61046
61047 if (!IS_ERR(s))
61048 kfree(s);
61049diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
61050new file mode 100644
61051index 0000000..76e84b9
61052--- /dev/null
61053+++ b/grsecurity/Kconfig
61054@@ -0,0 +1,1063 @@
61055+#
61056+# grecurity configuration
61057+#
61058+menu "Memory Protections"
61059+depends on GRKERNSEC
61060+
61061+config GRKERNSEC_KMEM
61062+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
61063+ default y if GRKERNSEC_CONFIG_AUTO
61064+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
61065+ help
61066+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
61067+ be written to or read from to modify or leak the contents of the running
61068+ kernel. /dev/port will also not be allowed to be opened and support
61069+ for /dev/cpu/*/msr will be removed. If you have module
61070+ support disabled, enabling this will close up five ways that are
61071+ currently used to insert malicious code into the running kernel.
61072+
61073+ Even with all these features enabled, we still highly recommend that
61074+ you use the RBAC system, as it is still possible for an attacker to
61075+ modify the running kernel through privileged I/O granted by ioperm/iopl.
61076+
61077+ If you are not using XFree86, you may be able to stop this additional
61078+ case by enabling the 'Disable privileged I/O' option. Though nothing
61079+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
61080+ but only to video memory, which is the only writing we allow in this
61081+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
61082+ not be allowed to mprotect it with PROT_WRITE later.
61083+ Enabling this feature will prevent the "cpupower" and "powertop" tools
61084+ from working.
61085+
61086+ It is highly recommended that you say Y here if you meet all the
61087+ conditions above.
61088+
61089+config GRKERNSEC_VM86
61090+ bool "Restrict VM86 mode"
61091+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61092+ depends on X86_32
61093+
61094+ help
61095+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
61096+ make use of a special execution mode on 32bit x86 processors called
61097+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
61098+ video cards and will still work with this option enabled. The purpose
61099+ of the option is to prevent exploitation of emulation errors in
61100+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
61101+ Nearly all users should be able to enable this option.
61102+
61103+config GRKERNSEC_IO
61104+ bool "Disable privileged I/O"
61105+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61106+ depends on X86
61107+ select RTC_CLASS
61108+ select RTC_INTF_DEV
61109+ select RTC_DRV_CMOS
61110+
61111+ help
61112+ If you say Y here, all ioperm and iopl calls will return an error.
61113+ Ioperm and iopl can be used to modify the running kernel.
61114+ Unfortunately, some programs need this access to operate properly,
61115+ the most notable of which are XFree86 and hwclock. hwclock can be
61116+ remedied by having RTC support in the kernel, so real-time
61117+ clock support is enabled if this option is enabled, to ensure
61118+ that hwclock operates correctly. XFree86 still will not
61119+ operate correctly with this option enabled, so DO NOT CHOOSE Y
61120+ IF YOU USE XFree86. If you use XFree86 and you still want to
61121+ protect your kernel against modification, use the RBAC system.
61122+
61123+config GRKERNSEC_JIT_HARDEN
61124+ bool "Harden BPF JIT against spray attacks"
61125+ default y if GRKERNSEC_CONFIG_AUTO
61126+ depends on BPF_JIT
61127+ help
61128+ If you say Y here, the native code generated by the kernel's Berkeley
61129+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
61130+ attacks that attempt to fit attacker-beneficial instructions in
61131+ 32bit immediate fields of JIT-generated native instructions. The
61132+ attacker will generally aim to cause an unintended instruction sequence
61133+ of JIT-generated native code to execute by jumping into the middle of
61134+ a generated instruction. This feature effectively randomizes the 32bit
61135+ immediate constants present in the generated code to thwart such attacks.
61136+
61137+ If you're using KERNEXEC, it's recommended that you enable this option
61138+ to supplement the hardening of the kernel.
61139+
61140+config GRKERNSEC_PERF_HARDEN
61141+ bool "Disable unprivileged PERF_EVENTS usage by default"
61142+ default y if GRKERNSEC_CONFIG_AUTO
61143+ depends on PERF_EVENTS
61144+ help
61145+ If you say Y here, the range of acceptable values for the
61146+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
61147+ default to a new value: 3. When the sysctl is set to this value, no
61148+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
61149+
61150+ Though PERF_EVENTS can be used legitimately for performance monitoring
61151+ and low-level application profiling, it is forced on regardless of
61152+ configuration, has been at fault for several vulnerabilities, and
61153+ creates new opportunities for side channels and other information leaks.
61154+
61155+ This feature puts PERF_EVENTS into a secure default state and permits
61156+ the administrator to change out of it temporarily if unprivileged
61157+ application profiling is needed.
61158+
61159+config GRKERNSEC_RAND_THREADSTACK
61160+ bool "Insert random gaps between thread stacks"
61161+ default y if GRKERNSEC_CONFIG_AUTO
61162+ depends on PAX_RANDMMAP && !PPC
61163+ help
61164+ If you say Y here, a random-sized gap will be enforced between allocated
61165+ thread stacks. Glibc's NPTL and other threading libraries that
61166+ pass MAP_STACK to the kernel for thread stack allocation are supported.
61167+ The implementation currently provides 8 bits of entropy for the gap.
61168+
61169+ Many distributions do not compile threaded remote services with the
61170+ -fstack-check argument to GCC, causing the variable-sized stack-based
61171+ allocator, alloca(), to not probe the stack on allocation. This
61172+ permits an unbounded alloca() to skip over any guard page and potentially
61173+ modify another thread's stack reliably. An enforced random gap
61174+ reduces the reliability of such an attack and increases the chance
61175+ that such a read/write to another thread's stack instead lands in
61176+ an unmapped area, causing a crash and triggering grsecurity's
61177+ anti-bruteforcing logic.
61178+
61179+config GRKERNSEC_PROC_MEMMAP
61180+ bool "Harden ASLR against information leaks and entropy reduction"
61181+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
61182+ depends on PAX_NOEXEC || PAX_ASLR
61183+ help
61184+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
61185+ give no information about the addresses of its mappings if
61186+ PaX features that rely on random addresses are enabled on the task.
61187+ In addition to sanitizing this information and disabling other
61188+ dangerous sources of information, this option causes reads of sensitive
61189+ /proc/<pid> entries where the file descriptor was opened in a different
61190+ task than the one performing the read. Such attempts are logged.
61191+ This option also limits argv/env strings for suid/sgid binaries
61192+ to 512KB to prevent a complete exhaustion of the stack entropy provided
61193+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
61194+ binaries to prevent alternative mmap layouts from being abused.
61195+
61196+ If you use PaX it is essential that you say Y here as it closes up
61197+ several holes that make full ASLR useless locally.
61198+
61199+config GRKERNSEC_BRUTE
61200+ bool "Deter exploit bruteforcing"
61201+ default y if GRKERNSEC_CONFIG_AUTO
61202+ help
61203+ If you say Y here, attempts to bruteforce exploits against forking
61204+ daemons such as apache or sshd, as well as against suid/sgid binaries
61205+ will be deterred. When a child of a forking daemon is killed by PaX
61206+ or crashes due to an illegal instruction or other suspicious signal,
61207+ the parent process will be delayed 30 seconds upon every subsequent
61208+ fork until the administrator is able to assess the situation and
61209+ restart the daemon.
61210+ In the suid/sgid case, the attempt is logged, the user has all their
61211+ existing instances of the suid/sgid binary terminated and will
61212+ be unable to execute any suid/sgid binaries for 15 minutes.
61213+
61214+ It is recommended that you also enable signal logging in the auditing
61215+ section so that logs are generated when a process triggers a suspicious
61216+ signal.
61217+ If the sysctl option is enabled, a sysctl option with name
61218+ "deter_bruteforce" is created.
61219+
61220+
61221+config GRKERNSEC_MODHARDEN
61222+ bool "Harden module auto-loading"
61223+ default y if GRKERNSEC_CONFIG_AUTO
61224+ depends on MODULES
61225+ help
61226+ If you say Y here, module auto-loading in response to use of some
61227+ feature implemented by an unloaded module will be restricted to
61228+ root users. Enabling this option helps defend against attacks
61229+ by unprivileged users who abuse the auto-loading behavior to
61230+ cause a vulnerable module to load that is then exploited.
61231+
61232+ If this option prevents a legitimate use of auto-loading for a
61233+ non-root user, the administrator can execute modprobe manually
61234+ with the exact name of the module mentioned in the alert log.
61235+ Alternatively, the administrator can add the module to the list
61236+ of modules loaded at boot by modifying init scripts.
61237+
61238+ Modification of init scripts will most likely be needed on
61239+ Ubuntu servers with encrypted home directory support enabled,
61240+ as the first non-root user logging in will cause the ecb(aes),
61241+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
61242+
61243+config GRKERNSEC_HIDESYM
61244+ bool "Hide kernel symbols"
61245+ default y if GRKERNSEC_CONFIG_AUTO
61246+ select PAX_USERCOPY_SLABS
61247+ help
61248+ If you say Y here, getting information on loaded modules, and
61249+ displaying all kernel symbols through a syscall will be restricted
61250+ to users with CAP_SYS_MODULE. For software compatibility reasons,
61251+ /proc/kallsyms will be restricted to the root user. The RBAC
61252+ system can hide that entry even from root.
61253+
61254+ This option also prevents leaking of kernel addresses through
61255+ several /proc entries.
61256+
61257+ Note that this option is only effective provided the following
61258+ conditions are met:
61259+ 1) The kernel using grsecurity is not precompiled by some distribution
61260+ 2) You have also enabled GRKERNSEC_DMESG
61261+ 3) You are using the RBAC system and hiding other files such as your
61262+ kernel image and System.map. Alternatively, enabling this option
61263+ causes the permissions on /boot, /lib/modules, and the kernel
61264+ source directory to change at compile time to prevent
61265+ reading by non-root users.
61266+ If the above conditions are met, this option will aid in providing a
61267+ useful protection against local kernel exploitation of overflows
61268+ and arbitrary read/write vulnerabilities.
61269+
61270+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
61271+ in addition to this feature.
61272+
61273+config GRKERNSEC_KERN_LOCKOUT
61274+ bool "Active kernel exploit response"
61275+ default y if GRKERNSEC_CONFIG_AUTO
61276+ depends on X86 || ARM || PPC || SPARC
61277+ help
61278+ If you say Y here, when a PaX alert is triggered due to suspicious
61279+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
61280+ or an OOPS occurs due to bad memory accesses, instead of just
61281+ terminating the offending process (and potentially allowing
61282+ a subsequent exploit from the same user), we will take one of two
61283+ actions:
61284+ If the user was root, we will panic the system
61285+ If the user was non-root, we will log the attempt, terminate
61286+ all processes owned by the user, then prevent them from creating
61287+ any new processes until the system is restarted
61288+ This deters repeated kernel exploitation/bruteforcing attempts
61289+ and is useful for later forensics.
61290+
61291+endmenu
61292+menu "Role Based Access Control Options"
61293+depends on GRKERNSEC
61294+
61295+config GRKERNSEC_RBAC_DEBUG
61296+ bool
61297+
61298+config GRKERNSEC_NO_RBAC
61299+ bool "Disable RBAC system"
61300+ help
61301+ If you say Y here, the /dev/grsec device will be removed from the kernel,
61302+ preventing the RBAC system from being enabled. You should only say Y
61303+ here if you have no intention of using the RBAC system, so as to prevent
61304+ an attacker with root access from misusing the RBAC system to hide files
61305+ and processes when loadable module support and /dev/[k]mem have been
61306+ locked down.
61307+
61308+config GRKERNSEC_ACL_HIDEKERN
61309+ bool "Hide kernel processes"
61310+ help
61311+ If you say Y here, all kernel threads will be hidden to all
61312+ processes but those whose subject has the "view hidden processes"
61313+ flag.
61314+
61315+config GRKERNSEC_ACL_MAXTRIES
61316+ int "Maximum tries before password lockout"
61317+ default 3
61318+ help
61319+ This option enforces the maximum number of times a user can attempt
61320+ to authorize themselves with the grsecurity RBAC system before being
61321+ denied the ability to attempt authorization again for a specified time.
61322+ The lower the number, the harder it will be to brute-force a password.
61323+
61324+config GRKERNSEC_ACL_TIMEOUT
61325+ int "Time to wait after max password tries, in seconds"
61326+ default 30
61327+ help
61328+ This option specifies the time the user must wait after attempting to
61329+ authorize to the RBAC system with the maximum number of invalid
61330+ passwords. The higher the number, the harder it will be to brute-force
61331+ a password.
61332+
61333+endmenu
61334+menu "Filesystem Protections"
61335+depends on GRKERNSEC
61336+
61337+config GRKERNSEC_PROC
61338+ bool "Proc restrictions"
61339+ default y if GRKERNSEC_CONFIG_AUTO
61340+ help
61341+ If you say Y here, the permissions of the /proc filesystem
61342+ will be altered to enhance system security and privacy. You MUST
61343+ choose either a user only restriction or a user and group restriction.
61344+ Depending upon the option you choose, you can either restrict users to
61345+ see only the processes they themselves run, or choose a group that can
61346+ view all processes and files normally restricted to root if you choose
61347+ the "restrict to user only" option. NOTE: If you're running identd or
61348+ ntpd as a non-root user, you will have to run it as the group you
61349+ specify here.
61350+
61351+config GRKERNSEC_PROC_USER
61352+ bool "Restrict /proc to user only"
61353+ depends on GRKERNSEC_PROC
61354+ help
61355+ If you say Y here, non-root users will only be able to view their own
61356+ processes, and restricts them from viewing network-related information,
61357+ and viewing kernel symbol and module information.
61358+
61359+config GRKERNSEC_PROC_USERGROUP
61360+ bool "Allow special group"
61361+ default y if GRKERNSEC_CONFIG_AUTO
61362+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
61363+ help
61364+ If you say Y here, you will be able to select a group that will be
61365+ able to view all processes and network-related information. If you've
61366+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
61367+ remain hidden. This option is useful if you want to run identd as
61368+ a non-root user. The group you select may also be chosen at boot time
61369+ via "grsec_proc_gid=" on the kernel commandline.
61370+
61371+config GRKERNSEC_PROC_GID
61372+ int "GID for special group"
61373+ depends on GRKERNSEC_PROC_USERGROUP
61374+ default 1001
61375+
61376+config GRKERNSEC_PROC_ADD
61377+ bool "Additional restrictions"
61378+ default y if GRKERNSEC_CONFIG_AUTO
61379+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
61380+ help
61381+ If you say Y here, additional restrictions will be placed on
61382+ /proc that keep normal users from viewing device information and
61383+ slabinfo information that could be useful for exploits.
61384+
61385+config GRKERNSEC_LINK
61386+ bool "Linking restrictions"
61387+ default y if GRKERNSEC_CONFIG_AUTO
61388+ help
61389+ If you say Y here, /tmp race exploits will be prevented, since users
61390+ will no longer be able to follow symlinks owned by other users in
61391+ world-writable +t directories (e.g. /tmp), unless the owner of the
61392+ symlink is the owner of the directory. users will also not be
61393+ able to hardlink to files they do not own. If the sysctl option is
61394+ enabled, a sysctl option with name "linking_restrictions" is created.
61395+
61396+config GRKERNSEC_SYMLINKOWN
61397+ bool "Kernel-enforced SymlinksIfOwnerMatch"
61398+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
61399+ help
61400+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
61401+ that prevents it from being used as a security feature. As Apache
61402+ verifies the symlink by performing a stat() against the target of
61403+ the symlink before it is followed, an attacker can setup a symlink
61404+ to point to a same-owned file, then replace the symlink with one
61405+ that targets another user's file just after Apache "validates" the
61406+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
61407+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
61408+ will be in place for the group you specify. If the sysctl option
61409+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
61410+ created.
61411+
61412+config GRKERNSEC_SYMLINKOWN_GID
61413+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
61414+ depends on GRKERNSEC_SYMLINKOWN
61415+ default 1006
61416+ help
61417+ Setting this GID determines what group kernel-enforced
61418+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
61419+ is enabled, a sysctl option with name "symlinkown_gid" is created.
61420+
61421+config GRKERNSEC_FIFO
61422+ bool "FIFO restrictions"
61423+ default y if GRKERNSEC_CONFIG_AUTO
61424+ help
61425+ If you say Y here, users will not be able to write to FIFOs they don't
61426+ own in world-writable +t directories (e.g. /tmp), unless the owner of
61427+ the FIFO is the same owner of the directory it's held in. If the sysctl
61428+ option is enabled, a sysctl option with name "fifo_restrictions" is
61429+ created.
61430+
61431+config GRKERNSEC_SYSFS_RESTRICT
61432+ bool "Sysfs/debugfs restriction"
61433+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61434+ depends on SYSFS
61435+ help
61436+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
61437+ any filesystem normally mounted under it (e.g. debugfs) will be
61438+ mostly accessible only by root. These filesystems generally provide access
61439+ to hardware and debug information that isn't appropriate for unprivileged
61440+ users of the system. Sysfs and debugfs have also become a large source
61441+ of new vulnerabilities, ranging from infoleaks to local compromise.
61442+ There has been very little oversight with an eye toward security involved
61443+ in adding new exporters of information to these filesystems, so their
61444+ use is discouraged.
61445+ For reasons of compatibility, a few directories have been whitelisted
61446+ for access by non-root users:
61447+ /sys/fs/selinux
61448+ /sys/fs/fuse
61449+ /sys/devices/system/cpu
61450+
61451+config GRKERNSEC_ROFS
61452+ bool "Runtime read-only mount protection"
61453+ help
61454+ If you say Y here, a sysctl option with name "romount_protect" will
61455+ be created. By setting this option to 1 at runtime, filesystems
61456+ will be protected in the following ways:
61457+ * No new writable mounts will be allowed
61458+ * Existing read-only mounts won't be able to be remounted read/write
61459+ * Write operations will be denied on all block devices
61460+ This option acts independently of grsec_lock: once it is set to 1,
61461+ it cannot be turned off. Therefore, please be mindful of the resulting
61462+ behavior if this option is enabled in an init script on a read-only
61463+ filesystem. This feature is mainly intended for secure embedded systems.
61464+
61465+config GRKERNSEC_DEVICE_SIDECHANNEL
61466+ bool "Eliminate stat/notify-based device sidechannels"
61467+ default y if GRKERNSEC_CONFIG_AUTO
61468+ help
61469+ If you say Y here, timing analyses on block or character
61470+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
61471+ will be thwarted for unprivileged users. If a process without
61472+ CAP_MKNOD stats such a device, the last access and last modify times
61473+ will match the device's create time. No access or modify events
61474+ will be triggered through inotify/dnotify/fanotify for such devices.
61475+ This feature will prevent attacks that may at a minimum
61476+ allow an attacker to determine the administrator's password length.
61477+
61478+config GRKERNSEC_CHROOT
61479+ bool "Chroot jail restrictions"
61480+ default y if GRKERNSEC_CONFIG_AUTO
61481+ help
61482+ If you say Y here, you will be able to choose several options that will
61483+ make breaking out of a chrooted jail much more difficult. If you
61484+ encounter no software incompatibilities with the following options, it
61485+ is recommended that you enable each one.
61486+
61487+config GRKERNSEC_CHROOT_MOUNT
61488+ bool "Deny mounts"
61489+ default y if GRKERNSEC_CONFIG_AUTO
61490+ depends on GRKERNSEC_CHROOT
61491+ help
61492+ If you say Y here, processes inside a chroot will not be able to
61493+ mount or remount filesystems. If the sysctl option is enabled, a
61494+ sysctl option with name "chroot_deny_mount" is created.
61495+
61496+config GRKERNSEC_CHROOT_DOUBLE
61497+ bool "Deny double-chroots"
61498+ default y if GRKERNSEC_CONFIG_AUTO
61499+ depends on GRKERNSEC_CHROOT
61500+ help
61501+ If you say Y here, processes inside a chroot will not be able to chroot
61502+ again outside the chroot. This is a widely used method of breaking
61503+ out of a chroot jail and should not be allowed. If the sysctl
61504+ option is enabled, a sysctl option with name
61505+ "chroot_deny_chroot" is created.
61506+
61507+config GRKERNSEC_CHROOT_PIVOT
61508+ bool "Deny pivot_root in chroot"
61509+ default y if GRKERNSEC_CONFIG_AUTO
61510+ depends on GRKERNSEC_CHROOT
61511+ help
61512+ If you say Y here, processes inside a chroot will not be able to use
61513+ a function called pivot_root() that was introduced in Linux 2.3.41. It
61514+ works similar to chroot in that it changes the root filesystem. This
61515+ function could be misused in a chrooted process to attempt to break out
61516+ of the chroot, and therefore should not be allowed. If the sysctl
61517+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
61518+ created.
61519+
61520+config GRKERNSEC_CHROOT_CHDIR
61521+ bool "Enforce chdir(\"/\") on all chroots"
61522+ default y if GRKERNSEC_CONFIG_AUTO
61523+ depends on GRKERNSEC_CHROOT
61524+ help
61525+ If you say Y here, the current working directory of all newly-chrooted
61526+ applications will be set to the the root directory of the chroot.
61527+ The man page on chroot(2) states:
61528+ Note that this call does not change the current working
61529+ directory, so that `.' can be outside the tree rooted at
61530+ `/'. In particular, the super-user can escape from a
61531+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
61532+
61533+ It is recommended that you say Y here, since it's not known to break
61534+ any software. If the sysctl option is enabled, a sysctl option with
61535+ name "chroot_enforce_chdir" is created.
61536+
61537+config GRKERNSEC_CHROOT_CHMOD
61538+ bool "Deny (f)chmod +s"
61539+ default y if GRKERNSEC_CONFIG_AUTO
61540+ depends on GRKERNSEC_CHROOT
61541+ help
61542+ If you say Y here, processes inside a chroot will not be able to chmod
61543+ or fchmod files to make them have suid or sgid bits. This protects
61544+ against another published method of breaking a chroot. If the sysctl
61545+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
61546+ created.
61547+
61548+config GRKERNSEC_CHROOT_FCHDIR
61549+ bool "Deny fchdir out of chroot"
61550+ default y if GRKERNSEC_CONFIG_AUTO
61551+ depends on GRKERNSEC_CHROOT
61552+ help
61553+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
61554+ to a file descriptor of the chrooting process that points to a directory
61555+ outside the filesystem will be stopped. If the sysctl option
61556+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
61557+
61558+config GRKERNSEC_CHROOT_MKNOD
61559+ bool "Deny mknod"
61560+ default y if GRKERNSEC_CONFIG_AUTO
61561+ depends on GRKERNSEC_CHROOT
61562+ help
61563+ If you say Y here, processes inside a chroot will not be allowed to
61564+ mknod. The problem with using mknod inside a chroot is that it
61565+ would allow an attacker to create a device entry that is the same
61566+ as one on the physical root of your system, which could range from
61567+ anything from the console device to a device for your harddrive (which
61568+ they could then use to wipe the drive or steal data). It is recommended
61569+ that you say Y here, unless you run into software incompatibilities.
61570+ If the sysctl option is enabled, a sysctl option with name
61571+ "chroot_deny_mknod" is created.
61572+
61573+config GRKERNSEC_CHROOT_SHMAT
61574+ bool "Deny shmat() out of chroot"
61575+ default y if GRKERNSEC_CONFIG_AUTO
61576+ depends on GRKERNSEC_CHROOT
61577+ help
61578+ If you say Y here, processes inside a chroot will not be able to attach
61579+ to shared memory segments that were created outside of the chroot jail.
61580+ It is recommended that you say Y here. If the sysctl option is enabled,
61581+ a sysctl option with name "chroot_deny_shmat" is created.
61582+
61583+config GRKERNSEC_CHROOT_UNIX
61584+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
61585+ default y if GRKERNSEC_CONFIG_AUTO
61586+ depends on GRKERNSEC_CHROOT
61587+ help
61588+ If you say Y here, processes inside a chroot will not be able to
61589+ connect to abstract (meaning not belonging to a filesystem) Unix
61590+ domain sockets that were bound outside of a chroot. It is recommended
61591+ that you say Y here. If the sysctl option is enabled, a sysctl option
61592+ with name "chroot_deny_unix" is created.
61593+
61594+config GRKERNSEC_CHROOT_FINDTASK
61595+ bool "Protect outside processes"
61596+ default y if GRKERNSEC_CONFIG_AUTO
61597+ depends on GRKERNSEC_CHROOT
61598+ help
61599+ If you say Y here, processes inside a chroot will not be able to
61600+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
61601+ getsid, or view any process outside of the chroot. If the sysctl
61602+ option is enabled, a sysctl option with name "chroot_findtask" is
61603+ created.
61604+
61605+config GRKERNSEC_CHROOT_NICE
61606+ bool "Restrict priority changes"
61607+ default y if GRKERNSEC_CONFIG_AUTO
61608+ depends on GRKERNSEC_CHROOT
61609+ help
61610+ If you say Y here, processes inside a chroot will not be able to raise
61611+ the priority of processes in the chroot, or alter the priority of
61612+ processes outside the chroot. This provides more security than simply
61613+ removing CAP_SYS_NICE from the process' capability set. If the
61614+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
61615+ is created.
61616+
61617+config GRKERNSEC_CHROOT_SYSCTL
61618+ bool "Deny sysctl writes"
61619+ default y if GRKERNSEC_CONFIG_AUTO
61620+ depends on GRKERNSEC_CHROOT
61621+ help
61622+ If you say Y here, an attacker in a chroot will not be able to
61623+ write to sysctl entries, either by sysctl(2) or through a /proc
61624+ interface. It is strongly recommended that you say Y here. If the
61625+ sysctl option is enabled, a sysctl option with name
61626+ "chroot_deny_sysctl" is created.
61627+
61628+config GRKERNSEC_CHROOT_CAPS
61629+ bool "Capability restrictions"
61630+ default y if GRKERNSEC_CONFIG_AUTO
61631+ depends on GRKERNSEC_CHROOT
61632+ help
61633+ If you say Y here, the capabilities on all processes within a
61634+ chroot jail will be lowered to stop module insertion, raw i/o,
61635+ system and net admin tasks, rebooting the system, modifying immutable
61636+ files, modifying IPC owned by another, and changing the system time.
61637+ This is left an option because it can break some apps. Disable this
61638+ if your chrooted apps are having problems performing those kinds of
61639+ tasks. If the sysctl option is enabled, a sysctl option with
61640+ name "chroot_caps" is created.
61641+
61642+config GRKERNSEC_CHROOT_INITRD
61643+ bool "Exempt initrd tasks from restrictions"
61644+ default y if GRKERNSEC_CONFIG_AUTO
61645+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
61646+ help
61647+ If you say Y here, tasks started prior to init will be exempted from
61648+ grsecurity's chroot restrictions. This option is mainly meant to
61649+ resolve Plymouth's performing privileged operations unnecessarily
61650+ in a chroot.
61651+
61652+endmenu
61653+menu "Kernel Auditing"
61654+depends on GRKERNSEC
61655+
61656+config GRKERNSEC_AUDIT_GROUP
61657+ bool "Single group for auditing"
61658+ help
61659+ If you say Y here, the exec and chdir logging features will only operate
61660+ on a group you specify. This option is recommended if you only want to
61661+ watch certain users instead of having a large amount of logs from the
61662+ entire system. If the sysctl option is enabled, a sysctl option with
61663+ name "audit_group" is created.
61664+
61665+config GRKERNSEC_AUDIT_GID
61666+ int "GID for auditing"
61667+ depends on GRKERNSEC_AUDIT_GROUP
61668+ default 1007
61669+
61670+config GRKERNSEC_EXECLOG
61671+ bool "Exec logging"
61672+ help
61673+ If you say Y here, all execve() calls will be logged (since the
61674+ other exec*() calls are frontends to execve(), all execution
61675+ will be logged). Useful for shell-servers that like to keep track
61676+ of their users. If the sysctl option is enabled, a sysctl option with
61677+ name "exec_logging" is created.
61678+ WARNING: This option when enabled will produce a LOT of logs, especially
61679+ on an active system.
61680+
61681+config GRKERNSEC_RESLOG
61682+ bool "Resource logging"
61683+ default y if GRKERNSEC_CONFIG_AUTO
61684+ help
61685+ If you say Y here, all attempts to overstep resource limits will
61686+ be logged with the resource name, the requested size, and the current
61687+ limit. It is highly recommended that you say Y here. If the sysctl
61688+ option is enabled, a sysctl option with name "resource_logging" is
61689+ created. If the RBAC system is enabled, the sysctl value is ignored.
61690+
61691+config GRKERNSEC_CHROOT_EXECLOG
61692+ bool "Log execs within chroot"
61693+ help
61694+ If you say Y here, all executions inside a chroot jail will be logged
61695+ to syslog. This can cause a large amount of logs if certain
61696+ applications (eg. djb's daemontools) are installed on the system, and
61697+ is therefore left as an option. If the sysctl option is enabled, a
61698+ sysctl option with name "chroot_execlog" is created.
61699+
61700+config GRKERNSEC_AUDIT_PTRACE
61701+ bool "Ptrace logging"
61702+ help
61703+ If you say Y here, all attempts to attach to a process via ptrace
61704+ will be logged. If the sysctl option is enabled, a sysctl option
61705+ with name "audit_ptrace" is created.
61706+
61707+config GRKERNSEC_AUDIT_CHDIR
61708+ bool "Chdir logging"
61709+ help
61710+ If you say Y here, all chdir() calls will be logged. If the sysctl
61711+ option is enabled, a sysctl option with name "audit_chdir" is created.
61712+
61713+config GRKERNSEC_AUDIT_MOUNT
61714+ bool "(Un)Mount logging"
61715+ help
61716+ If you say Y here, all mounts and unmounts will be logged. If the
61717+ sysctl option is enabled, a sysctl option with name "audit_mount" is
61718+ created.
61719+
61720+config GRKERNSEC_SIGNAL
61721+ bool "Signal logging"
61722+ default y if GRKERNSEC_CONFIG_AUTO
61723+ help
61724+ If you say Y here, certain important signals will be logged, such as
61725+ SIGSEGV, which will as a result inform you of when a error in a program
61726+ occurred, which in some cases could mean a possible exploit attempt.
61727+ If the sysctl option is enabled, a sysctl option with name
61728+ "signal_logging" is created.
61729+
61730+config GRKERNSEC_FORKFAIL
61731+ bool "Fork failure logging"
61732+ help
61733+ If you say Y here, all failed fork() attempts will be logged.
61734+ This could suggest a fork bomb, or someone attempting to overstep
61735+ their process limit. If the sysctl option is enabled, a sysctl option
61736+ with name "forkfail_logging" is created.
61737+
61738+config GRKERNSEC_TIME
61739+ bool "Time change logging"
61740+ default y if GRKERNSEC_CONFIG_AUTO
61741+ help
61742+ If you say Y here, any changes of the system clock will be logged.
61743+ If the sysctl option is enabled, a sysctl option with name
61744+ "timechange_logging" is created.
61745+
61746+config GRKERNSEC_PROC_IPADDR
61747+ bool "/proc/<pid>/ipaddr support"
61748+ default y if GRKERNSEC_CONFIG_AUTO
61749+ help
61750+ If you say Y here, a new entry will be added to each /proc/<pid>
61751+ directory that contains the IP address of the person using the task.
61752+ The IP is carried across local TCP and AF_UNIX stream sockets.
61753+ This information can be useful for IDS/IPSes to perform remote response
61754+ to a local attack. The entry is readable by only the owner of the
61755+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
61756+ the RBAC system), and thus does not create privacy concerns.
61757+
61758+config GRKERNSEC_RWXMAP_LOG
61759+ bool 'Denied RWX mmap/mprotect logging'
61760+ default y if GRKERNSEC_CONFIG_AUTO
61761+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
61762+ help
61763+ If you say Y here, calls to mmap() and mprotect() with explicit
61764+ usage of PROT_WRITE and PROT_EXEC together will be logged when
61765+ denied by the PAX_MPROTECT feature. This feature will also
61766+ log other problematic scenarios that can occur when PAX_MPROTECT
61767+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
61768+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
61769+ is created.
61770+
61771+endmenu
61772+
61773+menu "Executable Protections"
61774+depends on GRKERNSEC
61775+
61776+config GRKERNSEC_DMESG
61777+ bool "Dmesg(8) restriction"
61778+ default y if GRKERNSEC_CONFIG_AUTO
61779+ help
61780+ If you say Y here, non-root users will not be able to use dmesg(8)
61781+ to view the contents of the kernel's circular log buffer.
61782+ The kernel's log buffer often contains kernel addresses and other
61783+ identifying information useful to an attacker in fingerprinting a
61784+ system for a targeted exploit.
61785+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
61786+ created.
61787+
61788+config GRKERNSEC_HARDEN_PTRACE
61789+ bool "Deter ptrace-based process snooping"
61790+ default y if GRKERNSEC_CONFIG_AUTO
61791+ help
61792+ If you say Y here, TTY sniffers and other malicious monitoring
61793+ programs implemented through ptrace will be defeated. If you
61794+ have been using the RBAC system, this option has already been
61795+ enabled for several years for all users, with the ability to make
61796+ fine-grained exceptions.
61797+
61798+ This option only affects the ability of non-root users to ptrace
61799+ processes that are not a descendent of the ptracing process.
61800+ This means that strace ./binary and gdb ./binary will still work,
61801+ but attaching to arbitrary processes will not. If the sysctl
61802+ option is enabled, a sysctl option with name "harden_ptrace" is
61803+ created.
61804+
61805+config GRKERNSEC_PTRACE_READEXEC
61806+ bool "Require read access to ptrace sensitive binaries"
61807+ default y if GRKERNSEC_CONFIG_AUTO
61808+ help
61809+ If you say Y here, unprivileged users will not be able to ptrace unreadable
61810+ binaries. This option is useful in environments that
61811+ remove the read bits (e.g. file mode 4711) from suid binaries to
61812+ prevent infoleaking of their contents. This option adds
61813+ consistency to the use of that file mode, as the binary could normally
61814+ be read out when run without privileges while ptracing.
61815+
61816+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
61817+ is created.
61818+
61819+config GRKERNSEC_SETXID
61820+ bool "Enforce consistent multithreaded privileges"
61821+ default y if GRKERNSEC_CONFIG_AUTO
61822+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
61823+ help
61824+ If you say Y here, a change from a root uid to a non-root uid
61825+ in a multithreaded application will cause the resulting uids,
61826+ gids, supplementary groups, and capabilities in that thread
61827+ to be propagated to the other threads of the process. In most
61828+ cases this is unnecessary, as glibc will emulate this behavior
61829+ on behalf of the application. Other libcs do not act in the
61830+ same way, allowing the other threads of the process to continue
61831+ running with root privileges. If the sysctl option is enabled,
61832+ a sysctl option with name "consistent_setxid" is created.
61833+
61834+config GRKERNSEC_TPE
61835+ bool "Trusted Path Execution (TPE)"
61836+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
61837+ help
61838+ If you say Y here, you will be able to choose a gid to add to the
61839+ supplementary groups of users you want to mark as "untrusted."
61840+ These users will not be able to execute any files that are not in
61841+ root-owned directories writable only by root. If the sysctl option
61842+ is enabled, a sysctl option with name "tpe" is created.
61843+
61844+config GRKERNSEC_TPE_ALL
61845+ bool "Partially restrict all non-root users"
61846+ depends on GRKERNSEC_TPE
61847+ help
61848+ If you say Y here, all non-root users will be covered under
61849+ a weaker TPE restriction. This is separate from, and in addition to,
61850+ the main TPE options that you have selected elsewhere. Thus, if a
61851+ "trusted" GID is chosen, this restriction applies to even that GID.
61852+ Under this restriction, all non-root users will only be allowed to
61853+ execute files in directories they own that are not group or
61854+ world-writable, or in directories owned by root and writable only by
61855+ root. If the sysctl option is enabled, a sysctl option with name
61856+ "tpe_restrict_all" is created.
61857+
61858+config GRKERNSEC_TPE_INVERT
61859+ bool "Invert GID option"
61860+ depends on GRKERNSEC_TPE
61861+ help
61862+ If you say Y here, the group you specify in the TPE configuration will
61863+ decide what group TPE restrictions will be *disabled* for. This
61864+ option is useful if you want TPE restrictions to be applied to most
61865+ users on the system. If the sysctl option is enabled, a sysctl option
61866+ with name "tpe_invert" is created. Unlike other sysctl options, this
61867+ entry will default to on for backward-compatibility.
61868+
61869+config GRKERNSEC_TPE_GID
61870+ int
61871+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
61872+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
61873+
61874+config GRKERNSEC_TPE_UNTRUSTED_GID
61875+ int "GID for TPE-untrusted users"
61876+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
61877+ default 1005
61878+ help
61879+ Setting this GID determines what group TPE restrictions will be
61880+ *enabled* for. If the sysctl option is enabled, a sysctl option
61881+ with name "tpe_gid" is created.
61882+
61883+config GRKERNSEC_TPE_TRUSTED_GID
61884+ int "GID for TPE-trusted users"
61885+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
61886+ default 1005
61887+ help
61888+ Setting this GID determines what group TPE restrictions will be
61889+ *disabled* for. If the sysctl option is enabled, a sysctl option
61890+ with name "tpe_gid" is created.
61891+
61892+endmenu
61893+menu "Network Protections"
61894+depends on GRKERNSEC
61895+
61896+config GRKERNSEC_RANDNET
61897+ bool "Larger entropy pools"
61898+ default y if GRKERNSEC_CONFIG_AUTO
61899+ help
61900+ If you say Y here, the entropy pools used for many features of Linux
61901+ and grsecurity will be doubled in size. Since several grsecurity
61902+ features use additional randomness, it is recommended that you say Y
61903+ here. Saying Y here has a similar effect as modifying
61904+ /proc/sys/kernel/random/poolsize.
61905+
61906+config GRKERNSEC_BLACKHOLE
61907+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
61908+ default y if GRKERNSEC_CONFIG_AUTO
61909+ depends on NET
61910+ help
61911+ If you say Y here, neither TCP resets nor ICMP
61912+ destination-unreachable packets will be sent in response to packets
61913+ sent to ports for which no associated listening process exists.
61914+ This feature supports both IPV4 and IPV6 and exempts the
61915+ loopback interface from blackholing. Enabling this feature
61916+ makes a host more resilient to DoS attacks and reduces network
61917+ visibility against scanners.
61918+
61919+ The blackhole feature as-implemented is equivalent to the FreeBSD
61920+ blackhole feature, as it prevents RST responses to all packets, not
61921+ just SYNs. Under most application behavior this causes no
61922+ problems, but applications (like haproxy) may not close certain
61923+ connections in a way that cleanly terminates them on the remote
61924+ end, leaving the remote host in LAST_ACK state. Because of this
61925+ side-effect and to prevent intentional LAST_ACK DoSes, this
61926+ feature also adds automatic mitigation against such attacks.
61927+ The mitigation drastically reduces the amount of time a socket
61928+ can spend in LAST_ACK state. If you're using haproxy and not
61929+ all servers it connects to have this option enabled, consider
61930+ disabling this feature on the haproxy host.
61931+
61932+ If the sysctl option is enabled, two sysctl options with names
61933+ "ip_blackhole" and "lastack_retries" will be created.
61934+ While "ip_blackhole" takes the standard zero/non-zero on/off
61935+ toggle, "lastack_retries" uses the same kinds of values as
61936+ "tcp_retries1" and "tcp_retries2". The default value of 4
61937+ prevents a socket from lasting more than 45 seconds in LAST_ACK
61938+ state.
61939+
61940+config GRKERNSEC_NO_SIMULT_CONNECT
61941+ bool "Disable TCP Simultaneous Connect"
61942+ default y if GRKERNSEC_CONFIG_AUTO
61943+ depends on NET
61944+ help
61945+ If you say Y here, a feature by Willy Tarreau will be enabled that
61946+ removes a weakness in Linux's strict implementation of TCP that
61947+ allows two clients to connect to each other without either entering
61948+ a listening state. The weakness allows an attacker to easily prevent
61949+ a client from connecting to a known server provided the source port
61950+ for the connection is guessed correctly.
61951+
61952+ As the weakness could be used to prevent an antivirus or IPS from
61953+ fetching updates, or prevent an SSL gateway from fetching a CRL,
61954+ it should be eliminated by enabling this option. Though Linux is
61955+ one of few operating systems supporting simultaneous connect, it
61956+ has no legitimate use in practice and is rarely supported by firewalls.
61957+
61958+config GRKERNSEC_SOCKET
61959+ bool "Socket restrictions"
61960+ depends on NET
61961+ help
61962+ If you say Y here, you will be able to choose from several options.
61963+ If you assign a GID on your system and add it to the supplementary
61964+ groups of users you want to restrict socket access to, this patch
61965+ will perform up to three things, based on the option(s) you choose.
61966+
61967+config GRKERNSEC_SOCKET_ALL
61968+ bool "Deny any sockets to group"
61969+ depends on GRKERNSEC_SOCKET
61970+ help
61971+ If you say Y here, you will be able to choose a GID of whose users will
61972+ be unable to connect to other hosts from your machine or run server
61973+ applications from your machine. If the sysctl option is enabled, a
61974+ sysctl option with name "socket_all" is created.
61975+
61976+config GRKERNSEC_SOCKET_ALL_GID
61977+ int "GID to deny all sockets for"
61978+ depends on GRKERNSEC_SOCKET_ALL
61979+ default 1004
61980+ help
61981+ Here you can choose the GID to disable socket access for. Remember to
61982+ add the users you want socket access disabled for to the GID
61983+ specified here. If the sysctl option is enabled, a sysctl option
61984+ with name "socket_all_gid" is created.
61985+
61986+config GRKERNSEC_SOCKET_CLIENT
61987+ bool "Deny client sockets to group"
61988+ depends on GRKERNSEC_SOCKET
61989+ help
61990+ If you say Y here, you will be able to choose a GID of whose users will
61991+ be unable to connect to other hosts from your machine, but will be
61992+ able to run servers. If this option is enabled, all users in the group
61993+ you specify will have to use passive mode when initiating ftp transfers
61994+ from the shell on your machine. If the sysctl option is enabled, a
61995+ sysctl option with name "socket_client" is created.
61996+
61997+config GRKERNSEC_SOCKET_CLIENT_GID
61998+ int "GID to deny client sockets for"
61999+ depends on GRKERNSEC_SOCKET_CLIENT
62000+ default 1003
62001+ help
62002+ Here you can choose the GID to disable client socket access for.
62003+ Remember to add the users you want client socket access disabled for to
62004+ the GID specified here. If the sysctl option is enabled, a sysctl
62005+ option with name "socket_client_gid" is created.
62006+
62007+config GRKERNSEC_SOCKET_SERVER
62008+ bool "Deny server sockets to group"
62009+ depends on GRKERNSEC_SOCKET
62010+ help
62011+ If you say Y here, you will be able to choose a GID of whose users will
62012+ be unable to run server applications from your machine. If the sysctl
62013+ option is enabled, a sysctl option with name "socket_server" is created.
62014+
62015+config GRKERNSEC_SOCKET_SERVER_GID
62016+ int "GID to deny server sockets for"
62017+ depends on GRKERNSEC_SOCKET_SERVER
62018+ default 1002
62019+ help
62020+ Here you can choose the GID to disable server socket access for.
62021+ Remember to add the users you want server socket access disabled for to
62022+ the GID specified here. If the sysctl option is enabled, a sysctl
62023+ option with name "socket_server_gid" is created.
62024+
62025+endmenu
62026+
62027+menu "Physical Protections"
62028+depends on GRKERNSEC
62029+
62030+config GRKERNSEC_DENYUSB
62031+ bool "Deny new USB connections after toggle"
62032+ default y if GRKERNSEC_CONFIG_AUTO
62033+ help
62034+ If you say Y here, a new sysctl option with name "deny_new_usb"
62035+ will be created. Setting its value to 1 will prevent any new
62036+ USB devices from being recognized by the OS. Any attempted USB
62037+ device insertion will be logged. This option is intended to be
62038+ used against custom USB devices designed to exploit vulnerabilities
62039+ in various USB device drivers.
62040+
62041+ For greatest effectiveness, this sysctl should be set after any
62042+ relevant init scripts. Once set, it cannot be unset.
62043+
62044+endmenu
62045+
62046+menu "Sysctl Support"
62047+depends on GRKERNSEC && SYSCTL
62048+
62049+config GRKERNSEC_SYSCTL
62050+ bool "Sysctl support"
62051+ default y if GRKERNSEC_CONFIG_AUTO
62052+ help
62053+ If you say Y here, you will be able to change the options that
62054+ grsecurity runs with at bootup, without having to recompile your
62055+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
62056+ to enable (1) or disable (0) various features. All the sysctl entries
62057+ are mutable until the "grsec_lock" entry is set to a non-zero value.
62058+ All features enabled in the kernel configuration are disabled at boot
62059+ if you do not say Y to the "Turn on features by default" option.
62060+ All options should be set at startup, and the grsec_lock entry should
62061+ be set to a non-zero value after all the options are set.
62062+ *THIS IS EXTREMELY IMPORTANT*
62063+
62064+config GRKERNSEC_SYSCTL_DISTRO
62065+ bool "Extra sysctl support for distro makers (READ HELP)"
62066+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
62067+ help
62068+ If you say Y here, additional sysctl options will be created
62069+ for features that affect processes running as root. Therefore,
62070+ it is critical when using this option that the grsec_lock entry be
62071+ enabled after boot. Only distros with prebuilt kernel packages
62072+ with this option enabled that can ensure grsec_lock is enabled
62073+ after boot should use this option.
62074+ *Failure to set grsec_lock after boot makes all grsec features
62075+ this option covers useless*
62076+
62077+ Currently this option creates the following sysctl entries:
62078+ "Disable Privileged I/O": "disable_priv_io"
62079+
62080+config GRKERNSEC_SYSCTL_ON
62081+ bool "Turn on features by default"
62082+ default y if GRKERNSEC_CONFIG_AUTO
62083+ depends on GRKERNSEC_SYSCTL
62084+ help
62085+ If you say Y here, instead of having all features enabled in the
62086+ kernel configuration disabled at boot time, the features will be
62087+ enabled at boot time. It is recommended you say Y here unless
62088+ there is some reason you would want all sysctl-tunable features to
62089+ be disabled by default. As mentioned elsewhere, it is important
62090+ to enable the grsec_lock entry once you have finished modifying
62091+ the sysctl entries.
62092+
62093+endmenu
62094+menu "Logging Options"
62095+depends on GRKERNSEC
62096+
62097+config GRKERNSEC_FLOODTIME
62098+ int "Seconds in between log messages (minimum)"
62099+ default 10
62100+ help
62101+ This option allows you to enforce the number of seconds between
62102+ grsecurity log messages. The default should be suitable for most
62103+ people, however, if you choose to change it, choose a value small enough
62104+ to allow informative logs to be produced, but large enough to
62105+ prevent flooding.
62106+
62107+config GRKERNSEC_FLOODBURST
62108+ int "Number of messages in a burst (maximum)"
62109+ default 6
62110+ help
62111+ This option allows you to choose the maximum number of messages allowed
62112+ within the flood time interval you chose in a separate option. The
62113+ default should be suitable for most people, however if you find that
62114+ many of your logs are being interpreted as flooding, you may want to
62115+ raise this value.
62116+
62117+endmenu
62118diff --git a/grsecurity/Makefile b/grsecurity/Makefile
62119new file mode 100644
62120index 0000000..b0b77d5
62121--- /dev/null
62122+++ b/grsecurity/Makefile
62123@@ -0,0 +1,43 @@
62124+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
62125+# during 2001-2009 it has been completely redesigned by Brad Spengler
62126+# into an RBAC system
62127+#
62128+# All code in this directory and various hooks inserted throughout the kernel
62129+# are copyright Brad Spengler - Open Source Security, Inc., and released
62130+# under the GPL v2 or higher
62131+
62132+KBUILD_CFLAGS += -Werror
62133+
62134+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
62135+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
62136+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
62137+ grsec_usb.o
62138+
62139+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
62140+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
62141+ gracl_learn.o grsec_log.o
62142+ifdef CONFIG_COMPAT
62143+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
62144+endif
62145+
62146+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
62147+
62148+ifdef CONFIG_NET
62149+obj-y += grsec_sock.o
62150+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
62151+endif
62152+
62153+ifndef CONFIG_GRKERNSEC
62154+obj-y += grsec_disabled.o
62155+endif
62156+
62157+ifdef CONFIG_GRKERNSEC_HIDESYM
62158+extra-y := grsec_hidesym.o
62159+$(obj)/grsec_hidesym.o:
62160+ @-chmod -f 500 /boot
62161+ @-chmod -f 500 /lib/modules
62162+ @-chmod -f 500 /lib64/modules
62163+ @-chmod -f 500 /lib32/modules
62164+ @-chmod -f 700 .
62165+ @echo ' grsec: protected kernel image paths'
62166+endif
62167diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
62168new file mode 100644
62169index 0000000..c0793fd
62170--- /dev/null
62171+++ b/grsecurity/gracl.c
62172@@ -0,0 +1,4178 @@
62173+#include <linux/kernel.h>
62174+#include <linux/module.h>
62175+#include <linux/sched.h>
62176+#include <linux/mm.h>
62177+#include <linux/file.h>
62178+#include <linux/fs.h>
62179+#include <linux/namei.h>
62180+#include <linux/mount.h>
62181+#include <linux/tty.h>
62182+#include <linux/proc_fs.h>
62183+#include <linux/lglock.h>
62184+#include <linux/slab.h>
62185+#include <linux/vmalloc.h>
62186+#include <linux/types.h>
62187+#include <linux/sysctl.h>
62188+#include <linux/netdevice.h>
62189+#include <linux/ptrace.h>
62190+#include <linux/gracl.h>
62191+#include <linux/gralloc.h>
62192+#include <linux/security.h>
62193+#include <linux/grinternal.h>
62194+#include <linux/pid_namespace.h>
62195+#include <linux/stop_machine.h>
62196+#include <linux/fdtable.h>
62197+#include <linux/percpu.h>
62198+#include <linux/lglock.h>
62199+#include <linux/hugetlb.h>
62200+#include <linux/posix-timers.h>
62201+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62202+#include <linux/magic.h>
62203+#include <linux/pagemap.h>
62204+#include "../fs/btrfs/async-thread.h"
62205+#include "../fs/btrfs/ctree.h"
62206+#include "../fs/btrfs/btrfs_inode.h"
62207+#endif
62208+#include "../fs/mount.h"
62209+
62210+#include <asm/uaccess.h>
62211+#include <asm/errno.h>
62212+#include <asm/mman.h>
62213+
62214+extern struct lglock vfsmount_lock;
62215+
62216+static struct acl_role_db acl_role_set;
62217+static struct name_db name_set;
62218+static struct inodev_db inodev_set;
62219+
62220+/* for keeping track of userspace pointers used for subjects, so we
62221+ can share references in the kernel as well
62222+*/
62223+
62224+static struct path real_root;
62225+
62226+static struct acl_subj_map_db subj_map_set;
62227+
62228+static struct acl_role_label *default_role;
62229+
62230+static struct acl_role_label *role_list;
62231+
62232+static u16 acl_sp_role_value;
62233+
62234+extern char *gr_shared_page[4];
62235+static DEFINE_MUTEX(gr_dev_mutex);
62236+DEFINE_RWLOCK(gr_inode_lock);
62237+
62238+struct gr_arg *gr_usermode;
62239+
62240+static unsigned int gr_status __read_only = GR_STATUS_INIT;
62241+
62242+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
62243+extern void gr_clear_learn_entries(void);
62244+
62245+unsigned char *gr_system_salt;
62246+unsigned char *gr_system_sum;
62247+
62248+static struct sprole_pw **acl_special_roles = NULL;
62249+static __u16 num_sprole_pws = 0;
62250+
62251+static struct acl_role_label *kernel_role = NULL;
62252+
62253+static unsigned int gr_auth_attempts = 0;
62254+static unsigned long gr_auth_expires = 0UL;
62255+
62256+#ifdef CONFIG_NET
62257+extern struct vfsmount *sock_mnt;
62258+#endif
62259+
62260+extern struct vfsmount *pipe_mnt;
62261+extern struct vfsmount *shm_mnt;
62262+
62263+#ifdef CONFIG_HUGETLBFS
62264+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
62265+#endif
62266+
62267+static struct acl_object_label *fakefs_obj_rw;
62268+static struct acl_object_label *fakefs_obj_rwx;
62269+
62270+extern int gr_init_uidset(void);
62271+extern void gr_free_uidset(void);
62272+extern void gr_remove_uid(uid_t uid);
62273+extern int gr_find_uid(uid_t uid);
62274+
62275+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
62276+{
62277+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
62278+ return -EFAULT;
62279+
62280+ return 0;
62281+}
62282+
62283+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
62284+{
62285+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
62286+ return -EFAULT;
62287+
62288+ return 0;
62289+}
62290+
62291+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
62292+{
62293+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
62294+ return -EFAULT;
62295+
62296+ return 0;
62297+}
62298+
62299+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
62300+{
62301+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
62302+ return -EFAULT;
62303+
62304+ return 0;
62305+}
62306+
62307+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
62308+{
62309+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
62310+ return -EFAULT;
62311+
62312+ return 0;
62313+}
62314+
62315+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
62316+{
62317+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
62318+ return -EFAULT;
62319+
62320+ return 0;
62321+}
62322+
62323+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
62324+{
62325+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
62326+ return -EFAULT;
62327+
62328+ return 0;
62329+}
62330+
62331+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
62332+{
62333+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
62334+ return -EFAULT;
62335+
62336+ return 0;
62337+}
62338+
62339+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
62340+{
62341+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
62342+ return -EFAULT;
62343+
62344+ return 0;
62345+}
62346+
62347+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
62348+{
62349+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
62350+ return -EFAULT;
62351+
62352+ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
62353+ return -EINVAL;
62354+
62355+ return 0;
62356+}
62357+
62358+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
62359+{
62360+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
62361+ return -EFAULT;
62362+
62363+ return 0;
62364+}
62365+
62366+static size_t get_gr_arg_wrapper_size_normal(void)
62367+{
62368+ return sizeof(struct gr_arg_wrapper);
62369+}
62370+
62371+#ifdef CONFIG_COMPAT
62372+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
62373+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
62374+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
62375+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
62376+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
62377+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
62378+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
62379+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
62380+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
62381+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
62382+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
62383+extern size_t get_gr_arg_wrapper_size_compat(void);
62384+
62385+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
62386+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
62387+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
62388+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
62389+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
62390+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
62391+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
62392+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
62393+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
62394+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
62395+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
62396+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
62397+
62398+#else
62399+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
62400+#define copy_gr_arg copy_gr_arg_normal
62401+#define copy_gr_hash_struct copy_gr_hash_struct_normal
62402+#define copy_acl_object_label copy_acl_object_label_normal
62403+#define copy_acl_subject_label copy_acl_subject_label_normal
62404+#define copy_acl_role_label copy_acl_role_label_normal
62405+#define copy_acl_ip_label copy_acl_ip_label_normal
62406+#define copy_pointer_from_array copy_pointer_from_array_normal
62407+#define copy_sprole_pw copy_sprole_pw_normal
62408+#define copy_role_transition copy_role_transition_normal
62409+#define copy_role_allowed_ip copy_role_allowed_ip_normal
62410+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
62411+#endif
62412+
62413+__inline__ int
62414+gr_acl_is_enabled(void)
62415+{
62416+ return (gr_status & GR_READY);
62417+}
62418+
62419+static inline dev_t __get_dev(const struct dentry *dentry)
62420+{
62421+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62422+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
62423+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
62424+ else
62425+#endif
62426+ return dentry->d_sb->s_dev;
62427+}
62428+
62429+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62430+{
62431+ return __get_dev(dentry);
62432+}
62433+
62434+static char gr_task_roletype_to_char(struct task_struct *task)
62435+{
62436+ switch (task->role->roletype &
62437+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
62438+ GR_ROLE_SPECIAL)) {
62439+ case GR_ROLE_DEFAULT:
62440+ return 'D';
62441+ case GR_ROLE_USER:
62442+ return 'U';
62443+ case GR_ROLE_GROUP:
62444+ return 'G';
62445+ case GR_ROLE_SPECIAL:
62446+ return 'S';
62447+ }
62448+
62449+ return 'X';
62450+}
62451+
62452+char gr_roletype_to_char(void)
62453+{
62454+ return gr_task_roletype_to_char(current);
62455+}
62456+
62457+__inline__ int
62458+gr_acl_tpe_check(void)
62459+{
62460+ if (unlikely(!(gr_status & GR_READY)))
62461+ return 0;
62462+ if (current->role->roletype & GR_ROLE_TPE)
62463+ return 1;
62464+ else
62465+ return 0;
62466+}
62467+
62468+int
62469+gr_handle_rawio(const struct inode *inode)
62470+{
62471+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62472+ if (inode && S_ISBLK(inode->i_mode) &&
62473+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62474+ !capable(CAP_SYS_RAWIO))
62475+ return 1;
62476+#endif
62477+ return 0;
62478+}
62479+
62480+static int
62481+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
62482+{
62483+ if (likely(lena != lenb))
62484+ return 0;
62485+
62486+ return !memcmp(a, b, lena);
62487+}
62488+
62489+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
62490+{
62491+ *buflen -= namelen;
62492+ if (*buflen < 0)
62493+ return -ENAMETOOLONG;
62494+ *buffer -= namelen;
62495+ memcpy(*buffer, str, namelen);
62496+ return 0;
62497+}
62498+
62499+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
62500+{
62501+ return prepend(buffer, buflen, name->name, name->len);
62502+}
62503+
62504+static int prepend_path(const struct path *path, struct path *root,
62505+ char **buffer, int *buflen)
62506+{
62507+ struct dentry *dentry = path->dentry;
62508+ struct vfsmount *vfsmnt = path->mnt;
62509+ struct mount *mnt = real_mount(vfsmnt);
62510+ bool slash = false;
62511+ int error = 0;
62512+
62513+ while (dentry != root->dentry || vfsmnt != root->mnt) {
62514+ struct dentry * parent;
62515+
62516+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
62517+ /* Global root? */
62518+ if (!mnt_has_parent(mnt)) {
62519+ goto out;
62520+ }
62521+ dentry = mnt->mnt_mountpoint;
62522+ mnt = mnt->mnt_parent;
62523+ vfsmnt = &mnt->mnt;
62524+ continue;
62525+ }
62526+ parent = dentry->d_parent;
62527+ prefetch(parent);
62528+ spin_lock(&dentry->d_lock);
62529+ error = prepend_name(buffer, buflen, &dentry->d_name);
62530+ spin_unlock(&dentry->d_lock);
62531+ if (!error)
62532+ error = prepend(buffer, buflen, "/", 1);
62533+ if (error)
62534+ break;
62535+
62536+ slash = true;
62537+ dentry = parent;
62538+ }
62539+
62540+out:
62541+ if (!error && !slash)
62542+ error = prepend(buffer, buflen, "/", 1);
62543+
62544+ return error;
62545+}
62546+
62547+/* this must be called with vfsmount_lock and rename_lock held */
62548+
62549+static char *__our_d_path(const struct path *path, struct path *root,
62550+ char *buf, int buflen)
62551+{
62552+ char *res = buf + buflen;
62553+ int error;
62554+
62555+ prepend(&res, &buflen, "\0", 1);
62556+ error = prepend_path(path, root, &res, &buflen);
62557+ if (error)
62558+ return ERR_PTR(error);
62559+
62560+ return res;
62561+}
62562+
62563+static char *
62564+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
62565+{
62566+ char *retval;
62567+
62568+ retval = __our_d_path(path, root, buf, buflen);
62569+ if (unlikely(IS_ERR(retval)))
62570+ retval = strcpy(buf, "<path too long>");
62571+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
62572+ retval[1] = '\0';
62573+
62574+ return retval;
62575+}
62576+
62577+static char *
62578+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
62579+ char *buf, int buflen)
62580+{
62581+ struct path path;
62582+ char *res;
62583+
62584+ path.dentry = (struct dentry *)dentry;
62585+ path.mnt = (struct vfsmount *)vfsmnt;
62586+
62587+ /* we can use real_root.dentry, real_root.mnt, because this is only called
62588+ by the RBAC system */
62589+ res = gen_full_path(&path, &real_root, buf, buflen);
62590+
62591+ return res;
62592+}
62593+
62594+static char *
62595+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
62596+ char *buf, int buflen)
62597+{
62598+ char *res;
62599+ struct path path;
62600+ struct path root;
62601+ struct task_struct *reaper = init_pid_ns.child_reaper;
62602+
62603+ path.dentry = (struct dentry *)dentry;
62604+ path.mnt = (struct vfsmount *)vfsmnt;
62605+
62606+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
62607+ get_fs_root(reaper->fs, &root);
62608+
62609+ br_read_lock(&vfsmount_lock);
62610+ write_seqlock(&rename_lock);
62611+ res = gen_full_path(&path, &root, buf, buflen);
62612+ write_sequnlock(&rename_lock);
62613+ br_read_unlock(&vfsmount_lock);
62614+
62615+ path_put(&root);
62616+ return res;
62617+}
62618+
62619+static char *
62620+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
62621+{
62622+ char *ret;
62623+ br_read_lock(&vfsmount_lock);
62624+ write_seqlock(&rename_lock);
62625+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
62626+ PAGE_SIZE);
62627+ write_sequnlock(&rename_lock);
62628+ br_read_unlock(&vfsmount_lock);
62629+ return ret;
62630+}
62631+
62632+static char *
62633+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
62634+{
62635+ char *ret;
62636+ char *buf;
62637+ int buflen;
62638+
62639+ br_read_lock(&vfsmount_lock);
62640+ write_seqlock(&rename_lock);
62641+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
62642+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
62643+ buflen = (int)(ret - buf);
62644+ if (buflen >= 5)
62645+ prepend(&ret, &buflen, "/proc", 5);
62646+ else
62647+ ret = strcpy(buf, "<path too long>");
62648+ write_sequnlock(&rename_lock);
62649+ br_read_unlock(&vfsmount_lock);
62650+ return ret;
62651+}
62652+
62653+char *
62654+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
62655+{
62656+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
62657+ PAGE_SIZE);
62658+}
62659+
62660+char *
62661+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
62662+{
62663+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62664+ PAGE_SIZE);
62665+}
62666+
62667+char *
62668+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
62669+{
62670+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
62671+ PAGE_SIZE);
62672+}
62673+
62674+char *
62675+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
62676+{
62677+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
62678+ PAGE_SIZE);
62679+}
62680+
62681+char *
62682+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
62683+{
62684+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
62685+ PAGE_SIZE);
62686+}
62687+
62688+__inline__ __u32
62689+to_gr_audit(const __u32 reqmode)
62690+{
62691+ /* masks off auditable permission flags, then shifts them to create
62692+ auditing flags, and adds the special case of append auditing if
62693+ we're requesting write */
62694+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
62695+}
62696+
62697+struct acl_subject_label *
62698+lookup_subject_map(const struct acl_subject_label *userp)
62699+{
62700+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
62701+ struct subject_map *match;
62702+
62703+ match = subj_map_set.s_hash[index];
62704+
62705+ while (match && match->user != userp)
62706+ match = match->next;
62707+
62708+ if (match != NULL)
62709+ return match->kernel;
62710+ else
62711+ return NULL;
62712+}
62713+
62714+static void
62715+insert_subj_map_entry(struct subject_map *subjmap)
62716+{
62717+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
62718+ struct subject_map **curr;
62719+
62720+ subjmap->prev = NULL;
62721+
62722+ curr = &subj_map_set.s_hash[index];
62723+ if (*curr != NULL)
62724+ (*curr)->prev = subjmap;
62725+
62726+ subjmap->next = *curr;
62727+ *curr = subjmap;
62728+
62729+ return;
62730+}
62731+
62732+static struct acl_role_label *
62733+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
62734+ const gid_t gid)
62735+{
62736+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
62737+ struct acl_role_label *match;
62738+ struct role_allowed_ip *ipp;
62739+ unsigned int x;
62740+ u32 curr_ip = task->signal->curr_ip;
62741+
62742+ task->signal->saved_ip = curr_ip;
62743+
62744+ match = acl_role_set.r_hash[index];
62745+
62746+ while (match) {
62747+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
62748+ for (x = 0; x < match->domain_child_num; x++) {
62749+ if (match->domain_children[x] == uid)
62750+ goto found;
62751+ }
62752+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
62753+ break;
62754+ match = match->next;
62755+ }
62756+found:
62757+ if (match == NULL) {
62758+ try_group:
62759+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
62760+ match = acl_role_set.r_hash[index];
62761+
62762+ while (match) {
62763+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
62764+ for (x = 0; x < match->domain_child_num; x++) {
62765+ if (match->domain_children[x] == gid)
62766+ goto found2;
62767+ }
62768+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
62769+ break;
62770+ match = match->next;
62771+ }
62772+found2:
62773+ if (match == NULL)
62774+ match = default_role;
62775+ if (match->allowed_ips == NULL)
62776+ return match;
62777+ else {
62778+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
62779+ if (likely
62780+ ((ntohl(curr_ip) & ipp->netmask) ==
62781+ (ntohl(ipp->addr) & ipp->netmask)))
62782+ return match;
62783+ }
62784+ match = default_role;
62785+ }
62786+ } else if (match->allowed_ips == NULL) {
62787+ return match;
62788+ } else {
62789+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
62790+ if (likely
62791+ ((ntohl(curr_ip) & ipp->netmask) ==
62792+ (ntohl(ipp->addr) & ipp->netmask)))
62793+ return match;
62794+ }
62795+ goto try_group;
62796+ }
62797+
62798+ return match;
62799+}
62800+
62801+struct acl_subject_label *
62802+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
62803+ const struct acl_role_label *role)
62804+{
62805+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
62806+ struct acl_subject_label *match;
62807+
62808+ match = role->subj_hash[index];
62809+
62810+ while (match && (match->inode != ino || match->device != dev ||
62811+ (match->mode & GR_DELETED))) {
62812+ match = match->next;
62813+ }
62814+
62815+ if (match && !(match->mode & GR_DELETED))
62816+ return match;
62817+ else
62818+ return NULL;
62819+}
62820+
62821+struct acl_subject_label *
62822+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
62823+ const struct acl_role_label *role)
62824+{
62825+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
62826+ struct acl_subject_label *match;
62827+
62828+ match = role->subj_hash[index];
62829+
62830+ while (match && (match->inode != ino || match->device != dev ||
62831+ !(match->mode & GR_DELETED))) {
62832+ match = match->next;
62833+ }
62834+
62835+ if (match && (match->mode & GR_DELETED))
62836+ return match;
62837+ else
62838+ return NULL;
62839+}
62840+
62841+static struct acl_object_label *
62842+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
62843+ const struct acl_subject_label *subj)
62844+{
62845+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
62846+ struct acl_object_label *match;
62847+
62848+ match = subj->obj_hash[index];
62849+
62850+ while (match && (match->inode != ino || match->device != dev ||
62851+ (match->mode & GR_DELETED))) {
62852+ match = match->next;
62853+ }
62854+
62855+ if (match && !(match->mode & GR_DELETED))
62856+ return match;
62857+ else
62858+ return NULL;
62859+}
62860+
62861+static struct acl_object_label *
62862+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
62863+ const struct acl_subject_label *subj)
62864+{
62865+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
62866+ struct acl_object_label *match;
62867+
62868+ match = subj->obj_hash[index];
62869+
62870+ while (match && (match->inode != ino || match->device != dev ||
62871+ !(match->mode & GR_DELETED))) {
62872+ match = match->next;
62873+ }
62874+
62875+ if (match && (match->mode & GR_DELETED))
62876+ return match;
62877+
62878+ match = subj->obj_hash[index];
62879+
62880+ while (match && (match->inode != ino || match->device != dev ||
62881+ (match->mode & GR_DELETED))) {
62882+ match = match->next;
62883+ }
62884+
62885+ if (match && !(match->mode & GR_DELETED))
62886+ return match;
62887+ else
62888+ return NULL;
62889+}
62890+
62891+static struct name_entry *
62892+lookup_name_entry(const char *name)
62893+{
62894+ unsigned int len = strlen(name);
62895+ unsigned int key = full_name_hash(name, len);
62896+ unsigned int index = key % name_set.n_size;
62897+ struct name_entry *match;
62898+
62899+ match = name_set.n_hash[index];
62900+
62901+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
62902+ match = match->next;
62903+
62904+ return match;
62905+}
62906+
62907+static struct name_entry *
62908+lookup_name_entry_create(const char *name)
62909+{
62910+ unsigned int len = strlen(name);
62911+ unsigned int key = full_name_hash(name, len);
62912+ unsigned int index = key % name_set.n_size;
62913+ struct name_entry *match;
62914+
62915+ match = name_set.n_hash[index];
62916+
62917+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
62918+ !match->deleted))
62919+ match = match->next;
62920+
62921+ if (match && match->deleted)
62922+ return match;
62923+
62924+ match = name_set.n_hash[index];
62925+
62926+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
62927+ match->deleted))
62928+ match = match->next;
62929+
62930+ if (match && !match->deleted)
62931+ return match;
62932+ else
62933+ return NULL;
62934+}
62935+
62936+static struct inodev_entry *
62937+lookup_inodev_entry(const ino_t ino, const dev_t dev)
62938+{
62939+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
62940+ struct inodev_entry *match;
62941+
62942+ match = inodev_set.i_hash[index];
62943+
62944+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
62945+ match = match->next;
62946+
62947+ return match;
62948+}
62949+
62950+static void
62951+insert_inodev_entry(struct inodev_entry *entry)
62952+{
62953+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
62954+ inodev_set.i_size);
62955+ struct inodev_entry **curr;
62956+
62957+ entry->prev = NULL;
62958+
62959+ curr = &inodev_set.i_hash[index];
62960+ if (*curr != NULL)
62961+ (*curr)->prev = entry;
62962+
62963+ entry->next = *curr;
62964+ *curr = entry;
62965+
62966+ return;
62967+}
62968+
62969+static void
62970+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
62971+{
62972+ unsigned int index =
62973+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
62974+ struct acl_role_label **curr;
62975+ struct acl_role_label *tmp, *tmp2;
62976+
62977+ curr = &acl_role_set.r_hash[index];
62978+
62979+ /* simple case, slot is empty, just set it to our role */
62980+ if (*curr == NULL) {
62981+ *curr = role;
62982+ } else {
62983+ /* example:
62984+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
62985+ 2 -> 3
62986+ */
62987+ /* first check to see if we can already be reached via this slot */
62988+ tmp = *curr;
62989+ while (tmp && tmp != role)
62990+ tmp = tmp->next;
62991+ if (tmp == role) {
62992+ /* we don't need to add ourselves to this slot's chain */
62993+ return;
62994+ }
62995+ /* we need to add ourselves to this chain, two cases */
62996+ if (role->next == NULL) {
62997+ /* simple case, append the current chain to our role */
62998+ role->next = *curr;
62999+ *curr = role;
63000+ } else {
63001+ /* 1 -> 2 -> 3 -> 4
63002+ 2 -> 3 -> 4
63003+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
63004+ */
63005+ /* trickier case: walk our role's chain until we find
63006+ the role for the start of the current slot's chain */
63007+ tmp = role;
63008+ tmp2 = *curr;
63009+ while (tmp->next && tmp->next != tmp2)
63010+ tmp = tmp->next;
63011+ if (tmp->next == tmp2) {
63012+ /* from example above, we found 3, so just
63013+ replace this slot's chain with ours */
63014+ *curr = role;
63015+ } else {
63016+ /* we didn't find a subset of our role's chain
63017+ in the current slot's chain, so append their
63018+ chain to ours, and set us as the first role in
63019+ the slot's chain
63020+
63021+ we could fold this case with the case above,
63022+ but making it explicit for clarity
63023+ */
63024+ tmp->next = tmp2;
63025+ *curr = role;
63026+ }
63027+ }
63028+ }
63029+
63030+ return;
63031+}
63032+
63033+static void
63034+insert_acl_role_label(struct acl_role_label *role)
63035+{
63036+ int i;
63037+
63038+ if (role_list == NULL) {
63039+ role_list = role;
63040+ role->prev = NULL;
63041+ } else {
63042+ role->prev = role_list;
63043+ role_list = role;
63044+ }
63045+
63046+ /* used for hash chains */
63047+ role->next = NULL;
63048+
63049+ if (role->roletype & GR_ROLE_DOMAIN) {
63050+ for (i = 0; i < role->domain_child_num; i++)
63051+ __insert_acl_role_label(role, role->domain_children[i]);
63052+ } else
63053+ __insert_acl_role_label(role, role->uidgid);
63054+}
63055+
63056+static int
63057+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
63058+{
63059+ struct name_entry **curr, *nentry;
63060+ struct inodev_entry *ientry;
63061+ unsigned int len = strlen(name);
63062+ unsigned int key = full_name_hash(name, len);
63063+ unsigned int index = key % name_set.n_size;
63064+
63065+ curr = &name_set.n_hash[index];
63066+
63067+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
63068+ curr = &((*curr)->next);
63069+
63070+ if (*curr != NULL)
63071+ return 1;
63072+
63073+ nentry = acl_alloc(sizeof (struct name_entry));
63074+ if (nentry == NULL)
63075+ return 0;
63076+ ientry = acl_alloc(sizeof (struct inodev_entry));
63077+ if (ientry == NULL)
63078+ return 0;
63079+ ientry->nentry = nentry;
63080+
63081+ nentry->key = key;
63082+ nentry->name = name;
63083+ nentry->inode = inode;
63084+ nentry->device = device;
63085+ nentry->len = len;
63086+ nentry->deleted = deleted;
63087+
63088+ nentry->prev = NULL;
63089+ curr = &name_set.n_hash[index];
63090+ if (*curr != NULL)
63091+ (*curr)->prev = nentry;
63092+ nentry->next = *curr;
63093+ *curr = nentry;
63094+
63095+ /* insert us into the table searchable by inode/dev */
63096+ insert_inodev_entry(ientry);
63097+
63098+ return 1;
63099+}
63100+
63101+static void
63102+insert_acl_obj_label(struct acl_object_label *obj,
63103+ struct acl_subject_label *subj)
63104+{
63105+ unsigned int index =
63106+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
63107+ struct acl_object_label **curr;
63108+
63109+
63110+ obj->prev = NULL;
63111+
63112+ curr = &subj->obj_hash[index];
63113+ if (*curr != NULL)
63114+ (*curr)->prev = obj;
63115+
63116+ obj->next = *curr;
63117+ *curr = obj;
63118+
63119+ return;
63120+}
63121+
63122+static void
63123+insert_acl_subj_label(struct acl_subject_label *obj,
63124+ struct acl_role_label *role)
63125+{
63126+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
63127+ struct acl_subject_label **curr;
63128+
63129+ obj->prev = NULL;
63130+
63131+ curr = &role->subj_hash[index];
63132+ if (*curr != NULL)
63133+ (*curr)->prev = obj;
63134+
63135+ obj->next = *curr;
63136+ *curr = obj;
63137+
63138+ return;
63139+}
63140+
63141+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
63142+
63143+static void *
63144+create_table(__u32 * len, int elementsize)
63145+{
63146+ unsigned int table_sizes[] = {
63147+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
63148+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
63149+ 4194301, 8388593, 16777213, 33554393, 67108859
63150+ };
63151+ void *newtable = NULL;
63152+ unsigned int pwr = 0;
63153+
63154+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
63155+ table_sizes[pwr] <= *len)
63156+ pwr++;
63157+
63158+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
63159+ return newtable;
63160+
63161+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
63162+ newtable =
63163+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
63164+ else
63165+ newtable = vmalloc(table_sizes[pwr] * elementsize);
63166+
63167+ *len = table_sizes[pwr];
63168+
63169+ return newtable;
63170+}
63171+
63172+static int
63173+init_variables(const struct gr_arg *arg)
63174+{
63175+ struct task_struct *reaper = init_pid_ns.child_reaper;
63176+ unsigned int stacksize;
63177+
63178+ subj_map_set.s_size = arg->role_db.num_subjects;
63179+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
63180+ name_set.n_size = arg->role_db.num_objects;
63181+ inodev_set.i_size = arg->role_db.num_objects;
63182+
63183+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
63184+ !name_set.n_size || !inodev_set.i_size)
63185+ return 1;
63186+
63187+ if (!gr_init_uidset())
63188+ return 1;
63189+
63190+ /* set up the stack that holds allocation info */
63191+
63192+ stacksize = arg->role_db.num_pointers + 5;
63193+
63194+ if (!acl_alloc_stack_init(stacksize))
63195+ return 1;
63196+
63197+ /* grab reference for the real root dentry and vfsmount */
63198+ get_fs_root(reaper->fs, &real_root);
63199+
63200+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63201+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
63202+#endif
63203+
63204+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
63205+ if (fakefs_obj_rw == NULL)
63206+ return 1;
63207+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
63208+
63209+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
63210+ if (fakefs_obj_rwx == NULL)
63211+ return 1;
63212+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
63213+
63214+ subj_map_set.s_hash =
63215+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
63216+ acl_role_set.r_hash =
63217+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
63218+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
63219+ inodev_set.i_hash =
63220+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
63221+
63222+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
63223+ !name_set.n_hash || !inodev_set.i_hash)
63224+ return 1;
63225+
63226+ memset(subj_map_set.s_hash, 0,
63227+ sizeof(struct subject_map *) * subj_map_set.s_size);
63228+ memset(acl_role_set.r_hash, 0,
63229+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
63230+ memset(name_set.n_hash, 0,
63231+ sizeof (struct name_entry *) * name_set.n_size);
63232+ memset(inodev_set.i_hash, 0,
63233+ sizeof (struct inodev_entry *) * inodev_set.i_size);
63234+
63235+ return 0;
63236+}
63237+
63238+/* free information not needed after startup
63239+ currently contains user->kernel pointer mappings for subjects
63240+*/
63241+
63242+static void
63243+free_init_variables(void)
63244+{
63245+ __u32 i;
63246+
63247+ if (subj_map_set.s_hash) {
63248+ for (i = 0; i < subj_map_set.s_size; i++) {
63249+ if (subj_map_set.s_hash[i]) {
63250+ kfree(subj_map_set.s_hash[i]);
63251+ subj_map_set.s_hash[i] = NULL;
63252+ }
63253+ }
63254+
63255+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
63256+ PAGE_SIZE)
63257+ kfree(subj_map_set.s_hash);
63258+ else
63259+ vfree(subj_map_set.s_hash);
63260+ }
63261+
63262+ return;
63263+}
63264+
63265+static void
63266+free_variables(void)
63267+{
63268+ struct acl_subject_label *s;
63269+ struct acl_role_label *r;
63270+ struct task_struct *task, *task2;
63271+ unsigned int x;
63272+
63273+ gr_clear_learn_entries();
63274+
63275+ read_lock(&tasklist_lock);
63276+ do_each_thread(task2, task) {
63277+ task->acl_sp_role = 0;
63278+ task->acl_role_id = 0;
63279+ task->acl = NULL;
63280+ task->role = NULL;
63281+ } while_each_thread(task2, task);
63282+ read_unlock(&tasklist_lock);
63283+
63284+ /* release the reference to the real root dentry and vfsmount */
63285+ path_put(&real_root);
63286+ memset(&real_root, 0, sizeof(real_root));
63287+
63288+ /* free all object hash tables */
63289+
63290+ FOR_EACH_ROLE_START(r)
63291+ if (r->subj_hash == NULL)
63292+ goto next_role;
63293+ FOR_EACH_SUBJECT_START(r, s, x)
63294+ if (s->obj_hash == NULL)
63295+ break;
63296+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
63297+ kfree(s->obj_hash);
63298+ else
63299+ vfree(s->obj_hash);
63300+ FOR_EACH_SUBJECT_END(s, x)
63301+ FOR_EACH_NESTED_SUBJECT_START(r, s)
63302+ if (s->obj_hash == NULL)
63303+ break;
63304+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
63305+ kfree(s->obj_hash);
63306+ else
63307+ vfree(s->obj_hash);
63308+ FOR_EACH_NESTED_SUBJECT_END(s)
63309+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
63310+ kfree(r->subj_hash);
63311+ else
63312+ vfree(r->subj_hash);
63313+ r->subj_hash = NULL;
63314+next_role:
63315+ FOR_EACH_ROLE_END(r)
63316+
63317+ acl_free_all();
63318+
63319+ if (acl_role_set.r_hash) {
63320+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
63321+ PAGE_SIZE)
63322+ kfree(acl_role_set.r_hash);
63323+ else
63324+ vfree(acl_role_set.r_hash);
63325+ }
63326+ if (name_set.n_hash) {
63327+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
63328+ PAGE_SIZE)
63329+ kfree(name_set.n_hash);
63330+ else
63331+ vfree(name_set.n_hash);
63332+ }
63333+
63334+ if (inodev_set.i_hash) {
63335+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
63336+ PAGE_SIZE)
63337+ kfree(inodev_set.i_hash);
63338+ else
63339+ vfree(inodev_set.i_hash);
63340+ }
63341+
63342+ gr_free_uidset();
63343+
63344+ memset(&name_set, 0, sizeof (struct name_db));
63345+ memset(&inodev_set, 0, sizeof (struct inodev_db));
63346+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
63347+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
63348+
63349+ default_role = NULL;
63350+ kernel_role = NULL;
63351+ role_list = NULL;
63352+
63353+ return;
63354+}
63355+
63356+static struct acl_subject_label *
63357+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
63358+
63359+static int alloc_and_copy_string(char **name, unsigned int maxlen)
63360+{
63361+ unsigned int len = strnlen_user(*name, maxlen);
63362+ char *tmp;
63363+
63364+ if (!len || len >= maxlen)
63365+ return -EINVAL;
63366+
63367+ if ((tmp = (char *) acl_alloc(len)) == NULL)
63368+ return -ENOMEM;
63369+
63370+ if (copy_from_user(tmp, *name, len))
63371+ return -EFAULT;
63372+
63373+ tmp[len-1] = '\0';
63374+ *name = tmp;
63375+
63376+ return 0;
63377+}
63378+
63379+static int
63380+copy_user_glob(struct acl_object_label *obj)
63381+{
63382+ struct acl_object_label *g_tmp, **guser;
63383+ int error;
63384+
63385+ if (obj->globbed == NULL)
63386+ return 0;
63387+
63388+ guser = &obj->globbed;
63389+ while (*guser) {
63390+ g_tmp = (struct acl_object_label *)
63391+ acl_alloc(sizeof (struct acl_object_label));
63392+ if (g_tmp == NULL)
63393+ return -ENOMEM;
63394+
63395+ if (copy_acl_object_label(g_tmp, *guser))
63396+ return -EFAULT;
63397+
63398+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
63399+ if (error)
63400+ return error;
63401+
63402+ *guser = g_tmp;
63403+ guser = &(g_tmp->next);
63404+ }
63405+
63406+ return 0;
63407+}
63408+
63409+static int
63410+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
63411+ struct acl_role_label *role)
63412+{
63413+ struct acl_object_label *o_tmp;
63414+ int ret;
63415+
63416+ while (userp) {
63417+ if ((o_tmp = (struct acl_object_label *)
63418+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
63419+ return -ENOMEM;
63420+
63421+ if (copy_acl_object_label(o_tmp, userp))
63422+ return -EFAULT;
63423+
63424+ userp = o_tmp->prev;
63425+
63426+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
63427+ if (ret)
63428+ return ret;
63429+
63430+ insert_acl_obj_label(o_tmp, subj);
63431+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
63432+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
63433+ return -ENOMEM;
63434+
63435+ ret = copy_user_glob(o_tmp);
63436+ if (ret)
63437+ return ret;
63438+
63439+ if (o_tmp->nested) {
63440+ int already_copied;
63441+
63442+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
63443+ if (IS_ERR(o_tmp->nested))
63444+ return PTR_ERR(o_tmp->nested);
63445+
63446+ /* insert into nested subject list if we haven't copied this one yet
63447+ to prevent duplicate entries */
63448+ if (!already_copied) {
63449+ o_tmp->nested->next = role->hash->first;
63450+ role->hash->first = o_tmp->nested;
63451+ }
63452+ }
63453+ }
63454+
63455+ return 0;
63456+}
63457+
63458+static __u32
63459+count_user_subjs(struct acl_subject_label *userp)
63460+{
63461+ struct acl_subject_label s_tmp;
63462+ __u32 num = 0;
63463+
63464+ while (userp) {
63465+ if (copy_acl_subject_label(&s_tmp, userp))
63466+ break;
63467+
63468+ userp = s_tmp.prev;
63469+ }
63470+
63471+ return num;
63472+}
63473+
63474+static int
63475+copy_user_allowedips(struct acl_role_label *rolep)
63476+{
63477+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
63478+
63479+ ruserip = rolep->allowed_ips;
63480+
63481+ while (ruserip) {
63482+ rlast = rtmp;
63483+
63484+ if ((rtmp = (struct role_allowed_ip *)
63485+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
63486+ return -ENOMEM;
63487+
63488+ if (copy_role_allowed_ip(rtmp, ruserip))
63489+ return -EFAULT;
63490+
63491+ ruserip = rtmp->prev;
63492+
63493+ if (!rlast) {
63494+ rtmp->prev = NULL;
63495+ rolep->allowed_ips = rtmp;
63496+ } else {
63497+ rlast->next = rtmp;
63498+ rtmp->prev = rlast;
63499+ }
63500+
63501+ if (!ruserip)
63502+ rtmp->next = NULL;
63503+ }
63504+
63505+ return 0;
63506+}
63507+
63508+static int
63509+copy_user_transitions(struct acl_role_label *rolep)
63510+{
63511+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
63512+ int error;
63513+
63514+ rusertp = rolep->transitions;
63515+
63516+ while (rusertp) {
63517+ rlast = rtmp;
63518+
63519+ if ((rtmp = (struct role_transition *)
63520+ acl_alloc(sizeof (struct role_transition))) == NULL)
63521+ return -ENOMEM;
63522+
63523+ if (copy_role_transition(rtmp, rusertp))
63524+ return -EFAULT;
63525+
63526+ rusertp = rtmp->prev;
63527+
63528+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
63529+ if (error)
63530+ return error;
63531+
63532+ if (!rlast) {
63533+ rtmp->prev = NULL;
63534+ rolep->transitions = rtmp;
63535+ } else {
63536+ rlast->next = rtmp;
63537+ rtmp->prev = rlast;
63538+ }
63539+
63540+ if (!rusertp)
63541+ rtmp->next = NULL;
63542+ }
63543+
63544+ return 0;
63545+}
63546+
63547+static __u32 count_user_objs(const struct acl_object_label __user *userp)
63548+{
63549+ struct acl_object_label o_tmp;
63550+ __u32 num = 0;
63551+
63552+ while (userp) {
63553+ if (copy_acl_object_label(&o_tmp, userp))
63554+ break;
63555+
63556+ userp = o_tmp.prev;
63557+ num++;
63558+ }
63559+
63560+ return num;
63561+}
63562+
63563+static struct acl_subject_label *
63564+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
63565+{
63566+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
63567+ __u32 num_objs;
63568+ struct acl_ip_label **i_tmp, *i_utmp2;
63569+ struct gr_hash_struct ghash;
63570+ struct subject_map *subjmap;
63571+ unsigned int i_num;
63572+ int err;
63573+
63574+ if (already_copied != NULL)
63575+ *already_copied = 0;
63576+
63577+ s_tmp = lookup_subject_map(userp);
63578+
63579+ /* we've already copied this subject into the kernel, just return
63580+ the reference to it, and don't copy it over again
63581+ */
63582+ if (s_tmp) {
63583+ if (already_copied != NULL)
63584+ *already_copied = 1;
63585+ return(s_tmp);
63586+ }
63587+
63588+ if ((s_tmp = (struct acl_subject_label *)
63589+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
63590+ return ERR_PTR(-ENOMEM);
63591+
63592+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
63593+ if (subjmap == NULL)
63594+ return ERR_PTR(-ENOMEM);
63595+
63596+ subjmap->user = userp;
63597+ subjmap->kernel = s_tmp;
63598+ insert_subj_map_entry(subjmap);
63599+
63600+ if (copy_acl_subject_label(s_tmp, userp))
63601+ return ERR_PTR(-EFAULT);
63602+
63603+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
63604+ if (err)
63605+ return ERR_PTR(err);
63606+
63607+ if (!strcmp(s_tmp->filename, "/"))
63608+ role->root_label = s_tmp;
63609+
63610+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
63611+ return ERR_PTR(-EFAULT);
63612+
63613+ /* copy user and group transition tables */
63614+
63615+ if (s_tmp->user_trans_num) {
63616+ uid_t *uidlist;
63617+
63618+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
63619+ if (uidlist == NULL)
63620+ return ERR_PTR(-ENOMEM);
63621+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
63622+ return ERR_PTR(-EFAULT);
63623+
63624+ s_tmp->user_transitions = uidlist;
63625+ }
63626+
63627+ if (s_tmp->group_trans_num) {
63628+ gid_t *gidlist;
63629+
63630+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
63631+ if (gidlist == NULL)
63632+ return ERR_PTR(-ENOMEM);
63633+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
63634+ return ERR_PTR(-EFAULT);
63635+
63636+ s_tmp->group_transitions = gidlist;
63637+ }
63638+
63639+ /* set up object hash table */
63640+ num_objs = count_user_objs(ghash.first);
63641+
63642+ s_tmp->obj_hash_size = num_objs;
63643+ s_tmp->obj_hash =
63644+ (struct acl_object_label **)
63645+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
63646+
63647+ if (!s_tmp->obj_hash)
63648+ return ERR_PTR(-ENOMEM);
63649+
63650+ memset(s_tmp->obj_hash, 0,
63651+ s_tmp->obj_hash_size *
63652+ sizeof (struct acl_object_label *));
63653+
63654+ /* add in objects */
63655+ err = copy_user_objs(ghash.first, s_tmp, role);
63656+
63657+ if (err)
63658+ return ERR_PTR(err);
63659+
63660+ /* set pointer for parent subject */
63661+ if (s_tmp->parent_subject) {
63662+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
63663+
63664+ if (IS_ERR(s_tmp2))
63665+ return s_tmp2;
63666+
63667+ s_tmp->parent_subject = s_tmp2;
63668+ }
63669+
63670+ /* add in ip acls */
63671+
63672+ if (!s_tmp->ip_num) {
63673+ s_tmp->ips = NULL;
63674+ goto insert;
63675+ }
63676+
63677+ i_tmp =
63678+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
63679+ sizeof (struct acl_ip_label *));
63680+
63681+ if (!i_tmp)
63682+ return ERR_PTR(-ENOMEM);
63683+
63684+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
63685+ *(i_tmp + i_num) =
63686+ (struct acl_ip_label *)
63687+ acl_alloc(sizeof (struct acl_ip_label));
63688+ if (!*(i_tmp + i_num))
63689+ return ERR_PTR(-ENOMEM);
63690+
63691+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
63692+ return ERR_PTR(-EFAULT);
63693+
63694+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
63695+ return ERR_PTR(-EFAULT);
63696+
63697+ if ((*(i_tmp + i_num))->iface == NULL)
63698+ continue;
63699+
63700+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
63701+ if (err)
63702+ return ERR_PTR(err);
63703+ }
63704+
63705+ s_tmp->ips = i_tmp;
63706+
63707+insert:
63708+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
63709+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
63710+ return ERR_PTR(-ENOMEM);
63711+
63712+ return s_tmp;
63713+}
63714+
63715+static int
63716+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
63717+{
63718+ struct acl_subject_label s_pre;
63719+ struct acl_subject_label * ret;
63720+ int err;
63721+
63722+ while (userp) {
63723+ if (copy_acl_subject_label(&s_pre, userp))
63724+ return -EFAULT;
63725+
63726+ ret = do_copy_user_subj(userp, role, NULL);
63727+
63728+ err = PTR_ERR(ret);
63729+ if (IS_ERR(ret))
63730+ return err;
63731+
63732+ insert_acl_subj_label(ret, role);
63733+
63734+ userp = s_pre.prev;
63735+ }
63736+
63737+ return 0;
63738+}
63739+
63740+static int
63741+copy_user_acl(struct gr_arg *arg)
63742+{
63743+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
63744+ struct acl_subject_label *subj_list;
63745+ struct sprole_pw *sptmp;
63746+ struct gr_hash_struct *ghash;
63747+ uid_t *domainlist;
63748+ unsigned int r_num;
63749+ int err = 0;
63750+ __u16 i;
63751+ __u32 num_subjs;
63752+
63753+ /* we need a default and kernel role */
63754+ if (arg->role_db.num_roles < 2)
63755+ return -EINVAL;
63756+
63757+ /* copy special role authentication info from userspace */
63758+
63759+ num_sprole_pws = arg->num_sprole_pws;
63760+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
63761+
63762+ if (!acl_special_roles && num_sprole_pws)
63763+ return -ENOMEM;
63764+
63765+ for (i = 0; i < num_sprole_pws; i++) {
63766+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
63767+ if (!sptmp)
63768+ return -ENOMEM;
63769+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
63770+ return -EFAULT;
63771+
63772+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
63773+ if (err)
63774+ return err;
63775+
63776+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63777+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
63778+#endif
63779+
63780+ acl_special_roles[i] = sptmp;
63781+ }
63782+
63783+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
63784+
63785+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
63786+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
63787+
63788+ if (!r_tmp)
63789+ return -ENOMEM;
63790+
63791+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
63792+ return -EFAULT;
63793+
63794+ if (copy_acl_role_label(r_tmp, r_utmp2))
63795+ return -EFAULT;
63796+
63797+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
63798+ if (err)
63799+ return err;
63800+
63801+ if (!strcmp(r_tmp->rolename, "default")
63802+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
63803+ default_role = r_tmp;
63804+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
63805+ kernel_role = r_tmp;
63806+ }
63807+
63808+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
63809+ return -ENOMEM;
63810+
63811+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
63812+ return -EFAULT;
63813+
63814+ r_tmp->hash = ghash;
63815+
63816+ num_subjs = count_user_subjs(r_tmp->hash->first);
63817+
63818+ r_tmp->subj_hash_size = num_subjs;
63819+ r_tmp->subj_hash =
63820+ (struct acl_subject_label **)
63821+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
63822+
63823+ if (!r_tmp->subj_hash)
63824+ return -ENOMEM;
63825+
63826+ err = copy_user_allowedips(r_tmp);
63827+ if (err)
63828+ return err;
63829+
63830+ /* copy domain info */
63831+ if (r_tmp->domain_children != NULL) {
63832+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
63833+ if (domainlist == NULL)
63834+ return -ENOMEM;
63835+
63836+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
63837+ return -EFAULT;
63838+
63839+ r_tmp->domain_children = domainlist;
63840+ }
63841+
63842+ err = copy_user_transitions(r_tmp);
63843+ if (err)
63844+ return err;
63845+
63846+ memset(r_tmp->subj_hash, 0,
63847+ r_tmp->subj_hash_size *
63848+ sizeof (struct acl_subject_label *));
63849+
63850+ /* acquire the list of subjects, then NULL out
63851+ the list prior to parsing the subjects for this role,
63852+ as during this parsing the list is replaced with a list
63853+ of *nested* subjects for the role
63854+ */
63855+ subj_list = r_tmp->hash->first;
63856+
63857+ /* set nested subject list to null */
63858+ r_tmp->hash->first = NULL;
63859+
63860+ err = copy_user_subjs(subj_list, r_tmp);
63861+
63862+ if (err)
63863+ return err;
63864+
63865+ insert_acl_role_label(r_tmp);
63866+ }
63867+
63868+ if (default_role == NULL || kernel_role == NULL)
63869+ return -EINVAL;
63870+
63871+ return err;
63872+}
63873+
63874+static int
63875+gracl_init(struct gr_arg *args)
63876+{
63877+ int error = 0;
63878+
63879+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
63880+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
63881+
63882+ if (init_variables(args)) {
63883+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
63884+ error = -ENOMEM;
63885+ free_variables();
63886+ goto out;
63887+ }
63888+
63889+ error = copy_user_acl(args);
63890+ free_init_variables();
63891+ if (error) {
63892+ free_variables();
63893+ goto out;
63894+ }
63895+
63896+ if ((error = gr_set_acls(0))) {
63897+ free_variables();
63898+ goto out;
63899+ }
63900+
63901+ pax_open_kernel();
63902+ gr_status |= GR_READY;
63903+ pax_close_kernel();
63904+
63905+ out:
63906+ return error;
63907+}
63908+
63909+/* derived from glibc fnmatch() 0: match, 1: no match*/
63910+
63911+static int
63912+glob_match(const char *p, const char *n)
63913+{
63914+ char c;
63915+
63916+ while ((c = *p++) != '\0') {
63917+ switch (c) {
63918+ case '?':
63919+ if (*n == '\0')
63920+ return 1;
63921+ else if (*n == '/')
63922+ return 1;
63923+ break;
63924+ case '\\':
63925+ if (*n != c)
63926+ return 1;
63927+ break;
63928+ case '*':
63929+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
63930+ if (*n == '/')
63931+ return 1;
63932+ else if (c == '?') {
63933+ if (*n == '\0')
63934+ return 1;
63935+ else
63936+ ++n;
63937+ }
63938+ }
63939+ if (c == '\0') {
63940+ return 0;
63941+ } else {
63942+ const char *endp;
63943+
63944+ if ((endp = strchr(n, '/')) == NULL)
63945+ endp = n + strlen(n);
63946+
63947+ if (c == '[') {
63948+ for (--p; n < endp; ++n)
63949+ if (!glob_match(p, n))
63950+ return 0;
63951+ } else if (c == '/') {
63952+ while (*n != '\0' && *n != '/')
63953+ ++n;
63954+ if (*n == '/' && !glob_match(p, n + 1))
63955+ return 0;
63956+ } else {
63957+ for (--p; n < endp; ++n)
63958+ if (*n == c && !glob_match(p, n))
63959+ return 0;
63960+ }
63961+
63962+ return 1;
63963+ }
63964+ case '[':
63965+ {
63966+ int not;
63967+ char cold;
63968+
63969+ if (*n == '\0' || *n == '/')
63970+ return 1;
63971+
63972+ not = (*p == '!' || *p == '^');
63973+ if (not)
63974+ ++p;
63975+
63976+ c = *p++;
63977+ for (;;) {
63978+ unsigned char fn = (unsigned char)*n;
63979+
63980+ if (c == '\0')
63981+ return 1;
63982+ else {
63983+ if (c == fn)
63984+ goto matched;
63985+ cold = c;
63986+ c = *p++;
63987+
63988+ if (c == '-' && *p != ']') {
63989+ unsigned char cend = *p++;
63990+
63991+ if (cend == '\0')
63992+ return 1;
63993+
63994+ if (cold <= fn && fn <= cend)
63995+ goto matched;
63996+
63997+ c = *p++;
63998+ }
63999+ }
64000+
64001+ if (c == ']')
64002+ break;
64003+ }
64004+ if (!not)
64005+ return 1;
64006+ break;
64007+ matched:
64008+ while (c != ']') {
64009+ if (c == '\0')
64010+ return 1;
64011+
64012+ c = *p++;
64013+ }
64014+ if (not)
64015+ return 1;
64016+ }
64017+ break;
64018+ default:
64019+ if (c != *n)
64020+ return 1;
64021+ }
64022+
64023+ ++n;
64024+ }
64025+
64026+ if (*n == '\0')
64027+ return 0;
64028+
64029+ if (*n == '/')
64030+ return 0;
64031+
64032+ return 1;
64033+}
64034+
64035+static struct acl_object_label *
64036+chk_glob_label(struct acl_object_label *globbed,
64037+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
64038+{
64039+ struct acl_object_label *tmp;
64040+
64041+ if (*path == NULL)
64042+ *path = gr_to_filename_nolock(dentry, mnt);
64043+
64044+ tmp = globbed;
64045+
64046+ while (tmp) {
64047+ if (!glob_match(tmp->filename, *path))
64048+ return tmp;
64049+ tmp = tmp->next;
64050+ }
64051+
64052+ return NULL;
64053+}
64054+
64055+static struct acl_object_label *
64056+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64057+ const ino_t curr_ino, const dev_t curr_dev,
64058+ const struct acl_subject_label *subj, char **path, const int checkglob)
64059+{
64060+ struct acl_subject_label *tmpsubj;
64061+ struct acl_object_label *retval;
64062+ struct acl_object_label *retval2;
64063+
64064+ tmpsubj = (struct acl_subject_label *) subj;
64065+ read_lock(&gr_inode_lock);
64066+ do {
64067+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
64068+ if (retval) {
64069+ if (checkglob && retval->globbed) {
64070+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
64071+ if (retval2)
64072+ retval = retval2;
64073+ }
64074+ break;
64075+ }
64076+ } while ((tmpsubj = tmpsubj->parent_subject));
64077+ read_unlock(&gr_inode_lock);
64078+
64079+ return retval;
64080+}
64081+
64082+static __inline__ struct acl_object_label *
64083+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64084+ struct dentry *curr_dentry,
64085+ const struct acl_subject_label *subj, char **path, const int checkglob)
64086+{
64087+ int newglob = checkglob;
64088+ ino_t inode;
64089+ dev_t device;
64090+
64091+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
64092+ as we don't want a / * rule to match instead of the / object
64093+ don't do this for create lookups that call this function though, since they're looking up
64094+ on the parent and thus need globbing checks on all paths
64095+ */
64096+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
64097+ newglob = GR_NO_GLOB;
64098+
64099+ spin_lock(&curr_dentry->d_lock);
64100+ inode = curr_dentry->d_inode->i_ino;
64101+ device = __get_dev(curr_dentry);
64102+ spin_unlock(&curr_dentry->d_lock);
64103+
64104+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
64105+}
64106+
64107+#ifdef CONFIG_HUGETLBFS
64108+static inline bool
64109+is_hugetlbfs_mnt(const struct vfsmount *mnt)
64110+{
64111+ int i;
64112+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
64113+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
64114+ return true;
64115+ }
64116+
64117+ return false;
64118+}
64119+#endif
64120+
64121+static struct acl_object_label *
64122+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64123+ const struct acl_subject_label *subj, char *path, const int checkglob)
64124+{
64125+ struct dentry *dentry = (struct dentry *) l_dentry;
64126+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64127+ struct mount *real_mnt = real_mount(mnt);
64128+ struct acl_object_label *retval;
64129+ struct dentry *parent;
64130+
64131+ br_read_lock(&vfsmount_lock);
64132+ write_seqlock(&rename_lock);
64133+
64134+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
64135+#ifdef CONFIG_NET
64136+ mnt == sock_mnt ||
64137+#endif
64138+#ifdef CONFIG_HUGETLBFS
64139+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
64140+#endif
64141+ /* ignore Eric Biederman */
64142+ IS_PRIVATE(l_dentry->d_inode))) {
64143+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
64144+ goto out;
64145+ }
64146+
64147+ for (;;) {
64148+ if (dentry == real_root.dentry && mnt == real_root.mnt)
64149+ break;
64150+
64151+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64152+ if (!mnt_has_parent(real_mnt))
64153+ break;
64154+
64155+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64156+ if (retval != NULL)
64157+ goto out;
64158+
64159+ dentry = real_mnt->mnt_mountpoint;
64160+ real_mnt = real_mnt->mnt_parent;
64161+ mnt = &real_mnt->mnt;
64162+ continue;
64163+ }
64164+
64165+ parent = dentry->d_parent;
64166+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64167+ if (retval != NULL)
64168+ goto out;
64169+
64170+ dentry = parent;
64171+ }
64172+
64173+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64174+
64175+ /* real_root is pinned so we don't have to hold a reference */
64176+ if (retval == NULL)
64177+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
64178+out:
64179+ write_sequnlock(&rename_lock);
64180+ br_read_unlock(&vfsmount_lock);
64181+
64182+ BUG_ON(retval == NULL);
64183+
64184+ return retval;
64185+}
64186+
64187+static __inline__ struct acl_object_label *
64188+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64189+ const struct acl_subject_label *subj)
64190+{
64191+ char *path = NULL;
64192+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
64193+}
64194+
64195+static __inline__ struct acl_object_label *
64196+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64197+ const struct acl_subject_label *subj)
64198+{
64199+ char *path = NULL;
64200+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
64201+}
64202+
64203+static __inline__ struct acl_object_label *
64204+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64205+ const struct acl_subject_label *subj, char *path)
64206+{
64207+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
64208+}
64209+
64210+static struct acl_subject_label *
64211+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64212+ const struct acl_role_label *role)
64213+{
64214+ struct dentry *dentry = (struct dentry *) l_dentry;
64215+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64216+ struct mount *real_mnt = real_mount(mnt);
64217+ struct acl_subject_label *retval;
64218+ struct dentry *parent;
64219+
64220+ br_read_lock(&vfsmount_lock);
64221+ write_seqlock(&rename_lock);
64222+
64223+ for (;;) {
64224+ if (dentry == real_root.dentry && mnt == real_root.mnt)
64225+ break;
64226+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64227+ if (!mnt_has_parent(real_mnt))
64228+ break;
64229+
64230+ spin_lock(&dentry->d_lock);
64231+ read_lock(&gr_inode_lock);
64232+ retval =
64233+ lookup_acl_subj_label(dentry->d_inode->i_ino,
64234+ __get_dev(dentry), role);
64235+ read_unlock(&gr_inode_lock);
64236+ spin_unlock(&dentry->d_lock);
64237+ if (retval != NULL)
64238+ goto out;
64239+
64240+ dentry = real_mnt->mnt_mountpoint;
64241+ real_mnt = real_mnt->mnt_parent;
64242+ mnt = &real_mnt->mnt;
64243+ continue;
64244+ }
64245+
64246+ spin_lock(&dentry->d_lock);
64247+ read_lock(&gr_inode_lock);
64248+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64249+ __get_dev(dentry), role);
64250+ read_unlock(&gr_inode_lock);
64251+ parent = dentry->d_parent;
64252+ spin_unlock(&dentry->d_lock);
64253+
64254+ if (retval != NULL)
64255+ goto out;
64256+
64257+ dentry = parent;
64258+ }
64259+
64260+ spin_lock(&dentry->d_lock);
64261+ read_lock(&gr_inode_lock);
64262+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64263+ __get_dev(dentry), role);
64264+ read_unlock(&gr_inode_lock);
64265+ spin_unlock(&dentry->d_lock);
64266+
64267+ if (unlikely(retval == NULL)) {
64268+ /* real_root is pinned, we don't need to hold a reference */
64269+ read_lock(&gr_inode_lock);
64270+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
64271+ __get_dev(real_root.dentry), role);
64272+ read_unlock(&gr_inode_lock);
64273+ }
64274+out:
64275+ write_sequnlock(&rename_lock);
64276+ br_read_unlock(&vfsmount_lock);
64277+
64278+ BUG_ON(retval == NULL);
64279+
64280+ return retval;
64281+}
64282+
64283+static void
64284+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
64285+{
64286+ struct task_struct *task = current;
64287+ const struct cred *cred = current_cred();
64288+
64289+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
64290+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64291+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64292+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
64293+
64294+ return;
64295+}
64296+
64297+static void
64298+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
64299+{
64300+ struct task_struct *task = current;
64301+ const struct cred *cred = current_cred();
64302+
64303+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64304+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64305+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64306+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
64307+
64308+ return;
64309+}
64310+
64311+static void
64312+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
64313+{
64314+ struct task_struct *task = current;
64315+ const struct cred *cred = current_cred();
64316+
64317+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64318+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64319+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64320+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
64321+
64322+ return;
64323+}
64324+
64325+__u32
64326+gr_search_file(const struct dentry * dentry, const __u32 mode,
64327+ const struct vfsmount * mnt)
64328+{
64329+ __u32 retval = mode;
64330+ struct acl_subject_label *curracl;
64331+ struct acl_object_label *currobj;
64332+
64333+ if (unlikely(!(gr_status & GR_READY)))
64334+ return (mode & ~GR_AUDITS);
64335+
64336+ curracl = current->acl;
64337+
64338+ currobj = chk_obj_label(dentry, mnt, curracl);
64339+ retval = currobj->mode & mode;
64340+
64341+ /* if we're opening a specified transfer file for writing
64342+ (e.g. /dev/initctl), then transfer our role to init
64343+ */
64344+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
64345+ current->role->roletype & GR_ROLE_PERSIST)) {
64346+ struct task_struct *task = init_pid_ns.child_reaper;
64347+
64348+ if (task->role != current->role) {
64349+ task->acl_sp_role = 0;
64350+ task->acl_role_id = current->acl_role_id;
64351+ task->role = current->role;
64352+ rcu_read_lock();
64353+ read_lock(&grsec_exec_file_lock);
64354+ gr_apply_subject_to_task(task);
64355+ read_unlock(&grsec_exec_file_lock);
64356+ rcu_read_unlock();
64357+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
64358+ }
64359+ }
64360+
64361+ if (unlikely
64362+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
64363+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
64364+ __u32 new_mode = mode;
64365+
64366+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64367+
64368+ retval = new_mode;
64369+
64370+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
64371+ new_mode |= GR_INHERIT;
64372+
64373+ if (!(mode & GR_NOLEARN))
64374+ gr_log_learn(dentry, mnt, new_mode);
64375+ }
64376+
64377+ return retval;
64378+}
64379+
64380+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
64381+ const struct dentry *parent,
64382+ const struct vfsmount *mnt)
64383+{
64384+ struct name_entry *match;
64385+ struct acl_object_label *matchpo;
64386+ struct acl_subject_label *curracl;
64387+ char *path;
64388+
64389+ if (unlikely(!(gr_status & GR_READY)))
64390+ return NULL;
64391+
64392+ preempt_disable();
64393+ path = gr_to_filename_rbac(new_dentry, mnt);
64394+ match = lookup_name_entry_create(path);
64395+
64396+ curracl = current->acl;
64397+
64398+ if (match) {
64399+ read_lock(&gr_inode_lock);
64400+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
64401+ read_unlock(&gr_inode_lock);
64402+
64403+ if (matchpo) {
64404+ preempt_enable();
64405+ return matchpo;
64406+ }
64407+ }
64408+
64409+ // lookup parent
64410+
64411+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
64412+
64413+ preempt_enable();
64414+ return matchpo;
64415+}
64416+
64417+__u32
64418+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
64419+ const struct vfsmount * mnt, const __u32 mode)
64420+{
64421+ struct acl_object_label *matchpo;
64422+ __u32 retval;
64423+
64424+ if (unlikely(!(gr_status & GR_READY)))
64425+ return (mode & ~GR_AUDITS);
64426+
64427+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
64428+
64429+ retval = matchpo->mode & mode;
64430+
64431+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
64432+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
64433+ __u32 new_mode = mode;
64434+
64435+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64436+
64437+ gr_log_learn(new_dentry, mnt, new_mode);
64438+ return new_mode;
64439+ }
64440+
64441+ return retval;
64442+}
64443+
64444+__u32
64445+gr_check_link(const struct dentry * new_dentry,
64446+ const struct dentry * parent_dentry,
64447+ const struct vfsmount * parent_mnt,
64448+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
64449+{
64450+ struct acl_object_label *obj;
64451+ __u32 oldmode, newmode;
64452+ __u32 needmode;
64453+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
64454+ GR_DELETE | GR_INHERIT;
64455+
64456+ if (unlikely(!(gr_status & GR_READY)))
64457+ return (GR_CREATE | GR_LINK);
64458+
64459+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
64460+ oldmode = obj->mode;
64461+
64462+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
64463+ newmode = obj->mode;
64464+
64465+ needmode = newmode & checkmodes;
64466+
64467+ // old name for hardlink must have at least the permissions of the new name
64468+ if ((oldmode & needmode) != needmode)
64469+ goto bad;
64470+
64471+ // if old name had restrictions/auditing, make sure the new name does as well
64472+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
64473+
64474+ // don't allow hardlinking of suid/sgid/fcapped files without permission
64475+ if (is_privileged_binary(old_dentry))
64476+ needmode |= GR_SETID;
64477+
64478+ if ((newmode & needmode) != needmode)
64479+ goto bad;
64480+
64481+ // enforce minimum permissions
64482+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
64483+ return newmode;
64484+bad:
64485+ needmode = oldmode;
64486+ if (is_privileged_binary(old_dentry))
64487+ needmode |= GR_SETID;
64488+
64489+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
64490+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
64491+ return (GR_CREATE | GR_LINK);
64492+ } else if (newmode & GR_SUPPRESS)
64493+ return GR_SUPPRESS;
64494+ else
64495+ return 0;
64496+}
64497+
64498+int
64499+gr_check_hidden_task(const struct task_struct *task)
64500+{
64501+ if (unlikely(!(gr_status & GR_READY)))
64502+ return 0;
64503+
64504+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
64505+ return 1;
64506+
64507+ return 0;
64508+}
64509+
64510+int
64511+gr_check_protected_task(const struct task_struct *task)
64512+{
64513+ if (unlikely(!(gr_status & GR_READY) || !task))
64514+ return 0;
64515+
64516+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
64517+ task->acl != current->acl)
64518+ return 1;
64519+
64520+ return 0;
64521+}
64522+
64523+int
64524+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64525+{
64526+ struct task_struct *p;
64527+ int ret = 0;
64528+
64529+ if (unlikely(!(gr_status & GR_READY) || !pid))
64530+ return ret;
64531+
64532+ read_lock(&tasklist_lock);
64533+ do_each_pid_task(pid, type, p) {
64534+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
64535+ p->acl != current->acl) {
64536+ ret = 1;
64537+ goto out;
64538+ }
64539+ } while_each_pid_task(pid, type, p);
64540+out:
64541+ read_unlock(&tasklist_lock);
64542+
64543+ return ret;
64544+}
64545+
64546+void
64547+gr_copy_label(struct task_struct *tsk)
64548+{
64549+ tsk->signal->used_accept = 0;
64550+ tsk->acl_sp_role = 0;
64551+ tsk->acl_role_id = current->acl_role_id;
64552+ tsk->acl = current->acl;
64553+ tsk->role = current->role;
64554+ tsk->signal->curr_ip = current->signal->curr_ip;
64555+ tsk->signal->saved_ip = current->signal->saved_ip;
64556+ if (current->exec_file)
64557+ get_file(current->exec_file);
64558+ tsk->exec_file = current->exec_file;
64559+ tsk->is_writable = current->is_writable;
64560+ if (unlikely(current->signal->used_accept)) {
64561+ current->signal->curr_ip = 0;
64562+ current->signal->saved_ip = 0;
64563+ }
64564+
64565+ return;
64566+}
64567+
64568+static void
64569+gr_set_proc_res(struct task_struct *task)
64570+{
64571+ struct acl_subject_label *proc;
64572+ unsigned short i;
64573+
64574+ proc = task->acl;
64575+
64576+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
64577+ return;
64578+
64579+ for (i = 0; i < RLIM_NLIMITS; i++) {
64580+ if (!(proc->resmask & (1U << i)))
64581+ continue;
64582+
64583+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
64584+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
64585+
64586+ if (i == RLIMIT_CPU)
64587+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
64588+ }
64589+
64590+ return;
64591+}
64592+
64593+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
64594+
64595+int
64596+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64597+{
64598+ unsigned int i;
64599+ __u16 num;
64600+ uid_t *uidlist;
64601+ uid_t curuid;
64602+ int realok = 0;
64603+ int effectiveok = 0;
64604+ int fsok = 0;
64605+ uid_t globalreal, globaleffective, globalfs;
64606+
64607+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
64608+ struct user_struct *user;
64609+
64610+ if (!uid_valid(real))
64611+ goto skipit;
64612+
64613+ /* find user based on global namespace */
64614+
64615+ globalreal = GR_GLOBAL_UID(real);
64616+
64617+ user = find_user(make_kuid(&init_user_ns, globalreal));
64618+ if (user == NULL)
64619+ goto skipit;
64620+
64621+ if (gr_process_kernel_setuid_ban(user)) {
64622+ /* for find_user */
64623+ free_uid(user);
64624+ return 1;
64625+ }
64626+
64627+ /* for find_user */
64628+ free_uid(user);
64629+
64630+skipit:
64631+#endif
64632+
64633+ if (unlikely(!(gr_status & GR_READY)))
64634+ return 0;
64635+
64636+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64637+ gr_log_learn_uid_change(real, effective, fs);
64638+
64639+ num = current->acl->user_trans_num;
64640+ uidlist = current->acl->user_transitions;
64641+
64642+ if (uidlist == NULL)
64643+ return 0;
64644+
64645+ if (!uid_valid(real)) {
64646+ realok = 1;
64647+ globalreal = (uid_t)-1;
64648+ } else {
64649+ globalreal = GR_GLOBAL_UID(real);
64650+ }
64651+ if (!uid_valid(effective)) {
64652+ effectiveok = 1;
64653+ globaleffective = (uid_t)-1;
64654+ } else {
64655+ globaleffective = GR_GLOBAL_UID(effective);
64656+ }
64657+ if (!uid_valid(fs)) {
64658+ fsok = 1;
64659+ globalfs = (uid_t)-1;
64660+ } else {
64661+ globalfs = GR_GLOBAL_UID(fs);
64662+ }
64663+
64664+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
64665+ for (i = 0; i < num; i++) {
64666+ curuid = uidlist[i];
64667+ if (globalreal == curuid)
64668+ realok = 1;
64669+ if (globaleffective == curuid)
64670+ effectiveok = 1;
64671+ if (globalfs == curuid)
64672+ fsok = 1;
64673+ }
64674+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
64675+ for (i = 0; i < num; i++) {
64676+ curuid = uidlist[i];
64677+ if (globalreal == curuid)
64678+ break;
64679+ if (globaleffective == curuid)
64680+ break;
64681+ if (globalfs == curuid)
64682+ break;
64683+ }
64684+ /* not in deny list */
64685+ if (i == num) {
64686+ realok = 1;
64687+ effectiveok = 1;
64688+ fsok = 1;
64689+ }
64690+ }
64691+
64692+ if (realok && effectiveok && fsok)
64693+ return 0;
64694+ else {
64695+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
64696+ return 1;
64697+ }
64698+}
64699+
64700+int
64701+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64702+{
64703+ unsigned int i;
64704+ __u16 num;
64705+ gid_t *gidlist;
64706+ gid_t curgid;
64707+ int realok = 0;
64708+ int effectiveok = 0;
64709+ int fsok = 0;
64710+ gid_t globalreal, globaleffective, globalfs;
64711+
64712+ if (unlikely(!(gr_status & GR_READY)))
64713+ return 0;
64714+
64715+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64716+ gr_log_learn_gid_change(real, effective, fs);
64717+
64718+ num = current->acl->group_trans_num;
64719+ gidlist = current->acl->group_transitions;
64720+
64721+ if (gidlist == NULL)
64722+ return 0;
64723+
64724+ if (!gid_valid(real)) {
64725+ realok = 1;
64726+ globalreal = (gid_t)-1;
64727+ } else {
64728+ globalreal = GR_GLOBAL_GID(real);
64729+ }
64730+ if (!gid_valid(effective)) {
64731+ effectiveok = 1;
64732+ globaleffective = (gid_t)-1;
64733+ } else {
64734+ globaleffective = GR_GLOBAL_GID(effective);
64735+ }
64736+ if (!gid_valid(fs)) {
64737+ fsok = 1;
64738+ globalfs = (gid_t)-1;
64739+ } else {
64740+ globalfs = GR_GLOBAL_GID(fs);
64741+ }
64742+
64743+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
64744+ for (i = 0; i < num; i++) {
64745+ curgid = gidlist[i];
64746+ if (globalreal == curgid)
64747+ realok = 1;
64748+ if (globaleffective == curgid)
64749+ effectiveok = 1;
64750+ if (globalfs == curgid)
64751+ fsok = 1;
64752+ }
64753+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
64754+ for (i = 0; i < num; i++) {
64755+ curgid = gidlist[i];
64756+ if (globalreal == curgid)
64757+ break;
64758+ if (globaleffective == curgid)
64759+ break;
64760+ if (globalfs == curgid)
64761+ break;
64762+ }
64763+ /* not in deny list */
64764+ if (i == num) {
64765+ realok = 1;
64766+ effectiveok = 1;
64767+ fsok = 1;
64768+ }
64769+ }
64770+
64771+ if (realok && effectiveok && fsok)
64772+ return 0;
64773+ else {
64774+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
64775+ return 1;
64776+ }
64777+}
64778+
64779+extern int gr_acl_is_capable(const int cap);
64780+
64781+void
64782+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
64783+{
64784+ struct acl_role_label *role = task->role;
64785+ struct acl_subject_label *subj = NULL;
64786+ struct acl_object_label *obj;
64787+ struct file *filp;
64788+ uid_t uid;
64789+ gid_t gid;
64790+
64791+ if (unlikely(!(gr_status & GR_READY)))
64792+ return;
64793+
64794+ uid = GR_GLOBAL_UID(kuid);
64795+ gid = GR_GLOBAL_GID(kgid);
64796+
64797+ filp = task->exec_file;
64798+
64799+ /* kernel process, we'll give them the kernel role */
64800+ if (unlikely(!filp)) {
64801+ task->role = kernel_role;
64802+ task->acl = kernel_role->root_label;
64803+ return;
64804+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
64805+ role = lookup_acl_role_label(task, uid, gid);
64806+
64807+ /* don't change the role if we're not a privileged process */
64808+ if (role && task->role != role &&
64809+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
64810+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
64811+ return;
64812+
64813+ /* perform subject lookup in possibly new role
64814+ we can use this result below in the case where role == task->role
64815+ */
64816+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
64817+
64818+ /* if we changed uid/gid, but result in the same role
64819+ and are using inheritance, don't lose the inherited subject
64820+ if current subject is other than what normal lookup
64821+ would result in, we arrived via inheritance, don't
64822+ lose subject
64823+ */
64824+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
64825+ (subj == task->acl)))
64826+ task->acl = subj;
64827+
64828+ task->role = role;
64829+
64830+ task->is_writable = 0;
64831+
64832+ /* ignore additional mmap checks for processes that are writable
64833+ by the default ACL */
64834+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64835+ if (unlikely(obj->mode & GR_WRITE))
64836+ task->is_writable = 1;
64837+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64838+ if (unlikely(obj->mode & GR_WRITE))
64839+ task->is_writable = 1;
64840+
64841+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64842+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64843+#endif
64844+
64845+ gr_set_proc_res(task);
64846+
64847+ return;
64848+}
64849+
64850+int
64851+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64852+ const int unsafe_flags)
64853+{
64854+ struct task_struct *task = current;
64855+ struct acl_subject_label *newacl;
64856+ struct acl_object_label *obj;
64857+ __u32 retmode;
64858+
64859+ if (unlikely(!(gr_status & GR_READY)))
64860+ return 0;
64861+
64862+ newacl = chk_subj_label(dentry, mnt, task->role);
64863+
64864+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
64865+ did an exec
64866+ */
64867+ rcu_read_lock();
64868+ read_lock(&tasklist_lock);
64869+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
64870+ (task->parent->acl->mode & GR_POVERRIDE))) {
64871+ read_unlock(&tasklist_lock);
64872+ rcu_read_unlock();
64873+ goto skip_check;
64874+ }
64875+ read_unlock(&tasklist_lock);
64876+ rcu_read_unlock();
64877+
64878+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
64879+ !(task->role->roletype & GR_ROLE_GOD) &&
64880+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
64881+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
64882+ if (unsafe_flags & LSM_UNSAFE_SHARE)
64883+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
64884+ else
64885+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
64886+ return -EACCES;
64887+ }
64888+
64889+skip_check:
64890+
64891+ obj = chk_obj_label(dentry, mnt, task->acl);
64892+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
64893+
64894+ if (!(task->acl->mode & GR_INHERITLEARN) &&
64895+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
64896+ if (obj->nested)
64897+ task->acl = obj->nested;
64898+ else
64899+ task->acl = newacl;
64900+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
64901+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
64902+
64903+ task->is_writable = 0;
64904+
64905+ /* ignore additional mmap checks for processes that are writable
64906+ by the default ACL */
64907+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
64908+ if (unlikely(obj->mode & GR_WRITE))
64909+ task->is_writable = 1;
64910+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
64911+ if (unlikely(obj->mode & GR_WRITE))
64912+ task->is_writable = 1;
64913+
64914+ gr_set_proc_res(task);
64915+
64916+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64917+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64918+#endif
64919+ return 0;
64920+}
64921+
64922+/* always called with valid inodev ptr */
64923+static void
64924+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
64925+{
64926+ struct acl_object_label *matchpo;
64927+ struct acl_subject_label *matchps;
64928+ struct acl_subject_label *subj;
64929+ struct acl_role_label *role;
64930+ unsigned int x;
64931+
64932+ FOR_EACH_ROLE_START(role)
64933+ FOR_EACH_SUBJECT_START(role, subj, x)
64934+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
64935+ matchpo->mode |= GR_DELETED;
64936+ FOR_EACH_SUBJECT_END(subj,x)
64937+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
64938+ /* nested subjects aren't in the role's subj_hash table */
64939+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
64940+ matchpo->mode |= GR_DELETED;
64941+ FOR_EACH_NESTED_SUBJECT_END(subj)
64942+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
64943+ matchps->mode |= GR_DELETED;
64944+ FOR_EACH_ROLE_END(role)
64945+
64946+ inodev->nentry->deleted = 1;
64947+
64948+ return;
64949+}
64950+
64951+void
64952+gr_handle_delete(const ino_t ino, const dev_t dev)
64953+{
64954+ struct inodev_entry *inodev;
64955+
64956+ if (unlikely(!(gr_status & GR_READY)))
64957+ return;
64958+
64959+ write_lock(&gr_inode_lock);
64960+ inodev = lookup_inodev_entry(ino, dev);
64961+ if (inodev != NULL)
64962+ do_handle_delete(inodev, ino, dev);
64963+ write_unlock(&gr_inode_lock);
64964+
64965+ return;
64966+}
64967+
64968+static void
64969+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
64970+ const ino_t newinode, const dev_t newdevice,
64971+ struct acl_subject_label *subj)
64972+{
64973+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
64974+ struct acl_object_label *match;
64975+
64976+ match = subj->obj_hash[index];
64977+
64978+ while (match && (match->inode != oldinode ||
64979+ match->device != olddevice ||
64980+ !(match->mode & GR_DELETED)))
64981+ match = match->next;
64982+
64983+ if (match && (match->inode == oldinode)
64984+ && (match->device == olddevice)
64985+ && (match->mode & GR_DELETED)) {
64986+ if (match->prev == NULL) {
64987+ subj->obj_hash[index] = match->next;
64988+ if (match->next != NULL)
64989+ match->next->prev = NULL;
64990+ } else {
64991+ match->prev->next = match->next;
64992+ if (match->next != NULL)
64993+ match->next->prev = match->prev;
64994+ }
64995+ match->prev = NULL;
64996+ match->next = NULL;
64997+ match->inode = newinode;
64998+ match->device = newdevice;
64999+ match->mode &= ~GR_DELETED;
65000+
65001+ insert_acl_obj_label(match, subj);
65002+ }
65003+
65004+ return;
65005+}
65006+
65007+static void
65008+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
65009+ const ino_t newinode, const dev_t newdevice,
65010+ struct acl_role_label *role)
65011+{
65012+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
65013+ struct acl_subject_label *match;
65014+
65015+ match = role->subj_hash[index];
65016+
65017+ while (match && (match->inode != oldinode ||
65018+ match->device != olddevice ||
65019+ !(match->mode & GR_DELETED)))
65020+ match = match->next;
65021+
65022+ if (match && (match->inode == oldinode)
65023+ && (match->device == olddevice)
65024+ && (match->mode & GR_DELETED)) {
65025+ if (match->prev == NULL) {
65026+ role->subj_hash[index] = match->next;
65027+ if (match->next != NULL)
65028+ match->next->prev = NULL;
65029+ } else {
65030+ match->prev->next = match->next;
65031+ if (match->next != NULL)
65032+ match->next->prev = match->prev;
65033+ }
65034+ match->prev = NULL;
65035+ match->next = NULL;
65036+ match->inode = newinode;
65037+ match->device = newdevice;
65038+ match->mode &= ~GR_DELETED;
65039+
65040+ insert_acl_subj_label(match, role);
65041+ }
65042+
65043+ return;
65044+}
65045+
65046+static void
65047+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
65048+ const ino_t newinode, const dev_t newdevice)
65049+{
65050+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
65051+ struct inodev_entry *match;
65052+
65053+ match = inodev_set.i_hash[index];
65054+
65055+ while (match && (match->nentry->inode != oldinode ||
65056+ match->nentry->device != olddevice || !match->nentry->deleted))
65057+ match = match->next;
65058+
65059+ if (match && (match->nentry->inode == oldinode)
65060+ && (match->nentry->device == olddevice) &&
65061+ match->nentry->deleted) {
65062+ if (match->prev == NULL) {
65063+ inodev_set.i_hash[index] = match->next;
65064+ if (match->next != NULL)
65065+ match->next->prev = NULL;
65066+ } else {
65067+ match->prev->next = match->next;
65068+ if (match->next != NULL)
65069+ match->next->prev = match->prev;
65070+ }
65071+ match->prev = NULL;
65072+ match->next = NULL;
65073+ match->nentry->inode = newinode;
65074+ match->nentry->device = newdevice;
65075+ match->nentry->deleted = 0;
65076+
65077+ insert_inodev_entry(match);
65078+ }
65079+
65080+ return;
65081+}
65082+
65083+static void
65084+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
65085+{
65086+ struct acl_subject_label *subj;
65087+ struct acl_role_label *role;
65088+ unsigned int x;
65089+
65090+ FOR_EACH_ROLE_START(role)
65091+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
65092+
65093+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
65094+ if ((subj->inode == ino) && (subj->device == dev)) {
65095+ subj->inode = ino;
65096+ subj->device = dev;
65097+ }
65098+ /* nested subjects aren't in the role's subj_hash table */
65099+ update_acl_obj_label(matchn->inode, matchn->device,
65100+ ino, dev, subj);
65101+ FOR_EACH_NESTED_SUBJECT_END(subj)
65102+ FOR_EACH_SUBJECT_START(role, subj, x)
65103+ update_acl_obj_label(matchn->inode, matchn->device,
65104+ ino, dev, subj);
65105+ FOR_EACH_SUBJECT_END(subj,x)
65106+ FOR_EACH_ROLE_END(role)
65107+
65108+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
65109+
65110+ return;
65111+}
65112+
65113+static void
65114+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
65115+ const struct vfsmount *mnt)
65116+{
65117+ ino_t ino = dentry->d_inode->i_ino;
65118+ dev_t dev = __get_dev(dentry);
65119+
65120+ __do_handle_create(matchn, ino, dev);
65121+
65122+ return;
65123+}
65124+
65125+void
65126+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
65127+{
65128+ struct name_entry *matchn;
65129+
65130+ if (unlikely(!(gr_status & GR_READY)))
65131+ return;
65132+
65133+ preempt_disable();
65134+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
65135+
65136+ if (unlikely((unsigned long)matchn)) {
65137+ write_lock(&gr_inode_lock);
65138+ do_handle_create(matchn, dentry, mnt);
65139+ write_unlock(&gr_inode_lock);
65140+ }
65141+ preempt_enable();
65142+
65143+ return;
65144+}
65145+
65146+void
65147+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
65148+{
65149+ struct name_entry *matchn;
65150+
65151+ if (unlikely(!(gr_status & GR_READY)))
65152+ return;
65153+
65154+ preempt_disable();
65155+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
65156+
65157+ if (unlikely((unsigned long)matchn)) {
65158+ write_lock(&gr_inode_lock);
65159+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
65160+ write_unlock(&gr_inode_lock);
65161+ }
65162+ preempt_enable();
65163+
65164+ return;
65165+}
65166+
65167+void
65168+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65169+ struct dentry *old_dentry,
65170+ struct dentry *new_dentry,
65171+ struct vfsmount *mnt, const __u8 replace)
65172+{
65173+ struct name_entry *matchn;
65174+ struct inodev_entry *inodev;
65175+ struct inode *inode = new_dentry->d_inode;
65176+ ino_t old_ino = old_dentry->d_inode->i_ino;
65177+ dev_t old_dev = __get_dev(old_dentry);
65178+
65179+ /* vfs_rename swaps the name and parent link for old_dentry and
65180+ new_dentry
65181+ at this point, old_dentry has the new name, parent link, and inode
65182+ for the renamed file
65183+ if a file is being replaced by a rename, new_dentry has the inode
65184+ and name for the replaced file
65185+ */
65186+
65187+ if (unlikely(!(gr_status & GR_READY)))
65188+ return;
65189+
65190+ preempt_disable();
65191+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
65192+
65193+ /* we wouldn't have to check d_inode if it weren't for
65194+ NFS silly-renaming
65195+ */
65196+
65197+ write_lock(&gr_inode_lock);
65198+ if (unlikely(replace && inode)) {
65199+ ino_t new_ino = inode->i_ino;
65200+ dev_t new_dev = __get_dev(new_dentry);
65201+
65202+ inodev = lookup_inodev_entry(new_ino, new_dev);
65203+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
65204+ do_handle_delete(inodev, new_ino, new_dev);
65205+ }
65206+
65207+ inodev = lookup_inodev_entry(old_ino, old_dev);
65208+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
65209+ do_handle_delete(inodev, old_ino, old_dev);
65210+
65211+ if (unlikely((unsigned long)matchn))
65212+ do_handle_create(matchn, old_dentry, mnt);
65213+
65214+ write_unlock(&gr_inode_lock);
65215+ preempt_enable();
65216+
65217+ return;
65218+}
65219+
65220+static int
65221+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
65222+ unsigned char **sum)
65223+{
65224+ struct acl_role_label *r;
65225+ struct role_allowed_ip *ipp;
65226+ struct role_transition *trans;
65227+ unsigned int i;
65228+ int found = 0;
65229+ u32 curr_ip = current->signal->curr_ip;
65230+
65231+ current->signal->saved_ip = curr_ip;
65232+
65233+ /* check transition table */
65234+
65235+ for (trans = current->role->transitions; trans; trans = trans->next) {
65236+ if (!strcmp(rolename, trans->rolename)) {
65237+ found = 1;
65238+ break;
65239+ }
65240+ }
65241+
65242+ if (!found)
65243+ return 0;
65244+
65245+ /* handle special roles that do not require authentication
65246+ and check ip */
65247+
65248+ FOR_EACH_ROLE_START(r)
65249+ if (!strcmp(rolename, r->rolename) &&
65250+ (r->roletype & GR_ROLE_SPECIAL)) {
65251+ found = 0;
65252+ if (r->allowed_ips != NULL) {
65253+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
65254+ if ((ntohl(curr_ip) & ipp->netmask) ==
65255+ (ntohl(ipp->addr) & ipp->netmask))
65256+ found = 1;
65257+ }
65258+ } else
65259+ found = 2;
65260+ if (!found)
65261+ return 0;
65262+
65263+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
65264+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
65265+ *salt = NULL;
65266+ *sum = NULL;
65267+ return 1;
65268+ }
65269+ }
65270+ FOR_EACH_ROLE_END(r)
65271+
65272+ for (i = 0; i < num_sprole_pws; i++) {
65273+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
65274+ *salt = acl_special_roles[i]->salt;
65275+ *sum = acl_special_roles[i]->sum;
65276+ return 1;
65277+ }
65278+ }
65279+
65280+ return 0;
65281+}
65282+
65283+static void
65284+assign_special_role(char *rolename)
65285+{
65286+ struct acl_object_label *obj;
65287+ struct acl_role_label *r;
65288+ struct acl_role_label *assigned = NULL;
65289+ struct task_struct *tsk;
65290+ struct file *filp;
65291+
65292+ FOR_EACH_ROLE_START(r)
65293+ if (!strcmp(rolename, r->rolename) &&
65294+ (r->roletype & GR_ROLE_SPECIAL)) {
65295+ assigned = r;
65296+ break;
65297+ }
65298+ FOR_EACH_ROLE_END(r)
65299+
65300+ if (!assigned)
65301+ return;
65302+
65303+ read_lock(&tasklist_lock);
65304+ read_lock(&grsec_exec_file_lock);
65305+
65306+ tsk = current->real_parent;
65307+ if (tsk == NULL)
65308+ goto out_unlock;
65309+
65310+ filp = tsk->exec_file;
65311+ if (filp == NULL)
65312+ goto out_unlock;
65313+
65314+ tsk->is_writable = 0;
65315+
65316+ tsk->acl_sp_role = 1;
65317+ tsk->acl_role_id = ++acl_sp_role_value;
65318+ tsk->role = assigned;
65319+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
65320+
65321+ /* ignore additional mmap checks for processes that are writable
65322+ by the default ACL */
65323+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65324+ if (unlikely(obj->mode & GR_WRITE))
65325+ tsk->is_writable = 1;
65326+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
65327+ if (unlikely(obj->mode & GR_WRITE))
65328+ tsk->is_writable = 1;
65329+
65330+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65331+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
65332+#endif
65333+
65334+out_unlock:
65335+ read_unlock(&grsec_exec_file_lock);
65336+ read_unlock(&tasklist_lock);
65337+ return;
65338+}
65339+
65340+int gr_check_secure_terminal(struct task_struct *task)
65341+{
65342+ struct task_struct *p, *p2, *p3;
65343+ struct files_struct *files;
65344+ struct fdtable *fdt;
65345+ struct file *our_file = NULL, *file;
65346+ int i;
65347+
65348+ if (task->signal->tty == NULL)
65349+ return 1;
65350+
65351+ files = get_files_struct(task);
65352+ if (files != NULL) {
65353+ rcu_read_lock();
65354+ fdt = files_fdtable(files);
65355+ for (i=0; i < fdt->max_fds; i++) {
65356+ file = fcheck_files(files, i);
65357+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
65358+ get_file(file);
65359+ our_file = file;
65360+ }
65361+ }
65362+ rcu_read_unlock();
65363+ put_files_struct(files);
65364+ }
65365+
65366+ if (our_file == NULL)
65367+ return 1;
65368+
65369+ read_lock(&tasklist_lock);
65370+ do_each_thread(p2, p) {
65371+ files = get_files_struct(p);
65372+ if (files == NULL ||
65373+ (p->signal && p->signal->tty == task->signal->tty)) {
65374+ if (files != NULL)
65375+ put_files_struct(files);
65376+ continue;
65377+ }
65378+ rcu_read_lock();
65379+ fdt = files_fdtable(files);
65380+ for (i=0; i < fdt->max_fds; i++) {
65381+ file = fcheck_files(files, i);
65382+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
65383+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
65384+ p3 = task;
65385+ while (task_pid_nr(p3) > 0) {
65386+ if (p3 == p)
65387+ break;
65388+ p3 = p3->real_parent;
65389+ }
65390+ if (p3 == p)
65391+ break;
65392+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
65393+ gr_handle_alertkill(p);
65394+ rcu_read_unlock();
65395+ put_files_struct(files);
65396+ read_unlock(&tasklist_lock);
65397+ fput(our_file);
65398+ return 0;
65399+ }
65400+ }
65401+ rcu_read_unlock();
65402+ put_files_struct(files);
65403+ } while_each_thread(p2, p);
65404+ read_unlock(&tasklist_lock);
65405+
65406+ fput(our_file);
65407+ return 1;
65408+}
65409+
65410+static int gr_rbac_disable(void *unused)
65411+{
65412+ pax_open_kernel();
65413+ gr_status &= ~GR_READY;
65414+ pax_close_kernel();
65415+
65416+ return 0;
65417+}
65418+
65419+ssize_t
65420+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
65421+{
65422+ struct gr_arg_wrapper uwrap;
65423+ unsigned char *sprole_salt = NULL;
65424+ unsigned char *sprole_sum = NULL;
65425+ int error = 0;
65426+ int error2 = 0;
65427+ size_t req_count = 0;
65428+
65429+ mutex_lock(&gr_dev_mutex);
65430+
65431+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
65432+ error = -EPERM;
65433+ goto out;
65434+ }
65435+
65436+#ifdef CONFIG_COMPAT
65437+ pax_open_kernel();
65438+ if (is_compat_task()) {
65439+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
65440+ copy_gr_arg = &copy_gr_arg_compat;
65441+ copy_acl_object_label = &copy_acl_object_label_compat;
65442+ copy_acl_subject_label = &copy_acl_subject_label_compat;
65443+ copy_acl_role_label = &copy_acl_role_label_compat;
65444+ copy_acl_ip_label = &copy_acl_ip_label_compat;
65445+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
65446+ copy_role_transition = &copy_role_transition_compat;
65447+ copy_sprole_pw = &copy_sprole_pw_compat;
65448+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
65449+ copy_pointer_from_array = &copy_pointer_from_array_compat;
65450+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
65451+ } else {
65452+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
65453+ copy_gr_arg = &copy_gr_arg_normal;
65454+ copy_acl_object_label = &copy_acl_object_label_normal;
65455+ copy_acl_subject_label = &copy_acl_subject_label_normal;
65456+ copy_acl_role_label = &copy_acl_role_label_normal;
65457+ copy_acl_ip_label = &copy_acl_ip_label_normal;
65458+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
65459+ copy_role_transition = &copy_role_transition_normal;
65460+ copy_sprole_pw = &copy_sprole_pw_normal;
65461+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
65462+ copy_pointer_from_array = &copy_pointer_from_array_normal;
65463+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
65464+ }
65465+ pax_close_kernel();
65466+#endif
65467+
65468+ req_count = get_gr_arg_wrapper_size();
65469+
65470+ if (count != req_count) {
65471+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
65472+ error = -EINVAL;
65473+ goto out;
65474+ }
65475+
65476+
65477+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
65478+ gr_auth_expires = 0;
65479+ gr_auth_attempts = 0;
65480+ }
65481+
65482+ error = copy_gr_arg_wrapper(buf, &uwrap);
65483+ if (error)
65484+ goto out;
65485+
65486+ error = copy_gr_arg(uwrap.arg, gr_usermode);
65487+ if (error)
65488+ goto out;
65489+
65490+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
65491+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
65492+ time_after(gr_auth_expires, get_seconds())) {
65493+ error = -EBUSY;
65494+ goto out;
65495+ }
65496+
65497+ /* if non-root trying to do anything other than use a special role,
65498+ do not attempt authentication, do not count towards authentication
65499+ locking
65500+ */
65501+
65502+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
65503+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
65504+ gr_is_global_nonroot(current_uid())) {
65505+ error = -EPERM;
65506+ goto out;
65507+ }
65508+
65509+ /* ensure pw and special role name are null terminated */
65510+
65511+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
65512+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
65513+
65514+ /* Okay.
65515+ * We have our enough of the argument structure..(we have yet
65516+ * to copy_from_user the tables themselves) . Copy the tables
65517+ * only if we need them, i.e. for loading operations. */
65518+
65519+ switch (gr_usermode->mode) {
65520+ case GR_STATUS:
65521+ if (gr_status & GR_READY) {
65522+ error = 1;
65523+ if (!gr_check_secure_terminal(current))
65524+ error = 3;
65525+ } else
65526+ error = 2;
65527+ goto out;
65528+ case GR_SHUTDOWN:
65529+ if ((gr_status & GR_READY)
65530+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65531+ stop_machine(gr_rbac_disable, NULL, NULL);
65532+ free_variables();
65533+ memset(gr_usermode, 0, sizeof (struct gr_arg));
65534+ memset(gr_system_salt, 0, GR_SALT_LEN);
65535+ memset(gr_system_sum, 0, GR_SHA_LEN);
65536+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
65537+ } else if (gr_status & GR_READY) {
65538+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
65539+ error = -EPERM;
65540+ } else {
65541+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
65542+ error = -EAGAIN;
65543+ }
65544+ break;
65545+ case GR_ENABLE:
65546+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
65547+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
65548+ else {
65549+ if (gr_status & GR_READY)
65550+ error = -EAGAIN;
65551+ else
65552+ error = error2;
65553+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
65554+ }
65555+ break;
65556+ case GR_RELOAD:
65557+ if (!(gr_status & GR_READY)) {
65558+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
65559+ error = -EAGAIN;
65560+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65561+ stop_machine(gr_rbac_disable, NULL, NULL);
65562+ free_variables();
65563+ error2 = gracl_init(gr_usermode);
65564+ if (!error2)
65565+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
65566+ else {
65567+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
65568+ error = error2;
65569+ }
65570+ } else {
65571+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
65572+ error = -EPERM;
65573+ }
65574+ break;
65575+ case GR_SEGVMOD:
65576+ if (unlikely(!(gr_status & GR_READY))) {
65577+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
65578+ error = -EAGAIN;
65579+ break;
65580+ }
65581+
65582+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65583+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
65584+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
65585+ struct acl_subject_label *segvacl;
65586+ segvacl =
65587+ lookup_acl_subj_label(gr_usermode->segv_inode,
65588+ gr_usermode->segv_device,
65589+ current->role);
65590+ if (segvacl) {
65591+ segvacl->crashes = 0;
65592+ segvacl->expires = 0;
65593+ }
65594+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
65595+ gr_remove_uid(gr_usermode->segv_uid);
65596+ }
65597+ } else {
65598+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
65599+ error = -EPERM;
65600+ }
65601+ break;
65602+ case GR_SPROLE:
65603+ case GR_SPROLEPAM:
65604+ if (unlikely(!(gr_status & GR_READY))) {
65605+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
65606+ error = -EAGAIN;
65607+ break;
65608+ }
65609+
65610+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
65611+ current->role->expires = 0;
65612+ current->role->auth_attempts = 0;
65613+ }
65614+
65615+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
65616+ time_after(current->role->expires, get_seconds())) {
65617+ error = -EBUSY;
65618+ goto out;
65619+ }
65620+
65621+ if (lookup_special_role_auth
65622+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
65623+ && ((!sprole_salt && !sprole_sum)
65624+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
65625+ char *p = "";
65626+ assign_special_role(gr_usermode->sp_role);
65627+ read_lock(&tasklist_lock);
65628+ if (current->real_parent)
65629+ p = current->real_parent->role->rolename;
65630+ read_unlock(&tasklist_lock);
65631+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
65632+ p, acl_sp_role_value);
65633+ } else {
65634+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
65635+ error = -EPERM;
65636+ if(!(current->role->auth_attempts++))
65637+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
65638+
65639+ goto out;
65640+ }
65641+ break;
65642+ case GR_UNSPROLE:
65643+ if (unlikely(!(gr_status & GR_READY))) {
65644+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
65645+ error = -EAGAIN;
65646+ break;
65647+ }
65648+
65649+ if (current->role->roletype & GR_ROLE_SPECIAL) {
65650+ char *p = "";
65651+ int i = 0;
65652+
65653+ read_lock(&tasklist_lock);
65654+ if (current->real_parent) {
65655+ p = current->real_parent->role->rolename;
65656+ i = current->real_parent->acl_role_id;
65657+ }
65658+ read_unlock(&tasklist_lock);
65659+
65660+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
65661+ gr_set_acls(1);
65662+ } else {
65663+ error = -EPERM;
65664+ goto out;
65665+ }
65666+ break;
65667+ default:
65668+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
65669+ error = -EINVAL;
65670+ break;
65671+ }
65672+
65673+ if (error != -EPERM)
65674+ goto out;
65675+
65676+ if(!(gr_auth_attempts++))
65677+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
65678+
65679+ out:
65680+ mutex_unlock(&gr_dev_mutex);
65681+
65682+ if (!error)
65683+ error = req_count;
65684+
65685+ return error;
65686+}
65687+
65688+/* must be called with
65689+ rcu_read_lock();
65690+ read_lock(&tasklist_lock);
65691+ read_lock(&grsec_exec_file_lock);
65692+*/
65693+int gr_apply_subject_to_task(struct task_struct *task)
65694+{
65695+ struct acl_object_label *obj;
65696+ char *tmpname;
65697+ struct acl_subject_label *tmpsubj;
65698+ struct file *filp;
65699+ struct name_entry *nmatch;
65700+
65701+ filp = task->exec_file;
65702+ if (filp == NULL)
65703+ return 0;
65704+
65705+ /* the following is to apply the correct subject
65706+ on binaries running when the RBAC system
65707+ is enabled, when the binaries have been
65708+ replaced or deleted since their execution
65709+ -----
65710+ when the RBAC system starts, the inode/dev
65711+ from exec_file will be one the RBAC system
65712+ is unaware of. It only knows the inode/dev
65713+ of the present file on disk, or the absence
65714+ of it.
65715+ */
65716+ preempt_disable();
65717+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
65718+
65719+ nmatch = lookup_name_entry(tmpname);
65720+ preempt_enable();
65721+ tmpsubj = NULL;
65722+ if (nmatch) {
65723+ if (nmatch->deleted)
65724+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
65725+ else
65726+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
65727+ if (tmpsubj != NULL)
65728+ task->acl = tmpsubj;
65729+ }
65730+ if (tmpsubj == NULL)
65731+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
65732+ task->role);
65733+ if (task->acl) {
65734+ task->is_writable = 0;
65735+ /* ignore additional mmap checks for processes that are writable
65736+ by the default ACL */
65737+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65738+ if (unlikely(obj->mode & GR_WRITE))
65739+ task->is_writable = 1;
65740+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
65741+ if (unlikely(obj->mode & GR_WRITE))
65742+ task->is_writable = 1;
65743+
65744+ gr_set_proc_res(task);
65745+
65746+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65747+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65748+#endif
65749+ } else {
65750+ return 1;
65751+ }
65752+
65753+ return 0;
65754+}
65755+
65756+int
65757+gr_set_acls(const int type)
65758+{
65759+ struct task_struct *task, *task2;
65760+ struct acl_role_label *role = current->role;
65761+ __u16 acl_role_id = current->acl_role_id;
65762+ const struct cred *cred;
65763+ int ret;
65764+
65765+ rcu_read_lock();
65766+ read_lock(&tasklist_lock);
65767+ read_lock(&grsec_exec_file_lock);
65768+ do_each_thread(task2, task) {
65769+ /* check to see if we're called from the exit handler,
65770+ if so, only replace ACLs that have inherited the admin
65771+ ACL */
65772+
65773+ if (type && (task->role != role ||
65774+ task->acl_role_id != acl_role_id))
65775+ continue;
65776+
65777+ task->acl_role_id = 0;
65778+ task->acl_sp_role = 0;
65779+
65780+ if (task->exec_file) {
65781+ cred = __task_cred(task);
65782+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
65783+ ret = gr_apply_subject_to_task(task);
65784+ if (ret) {
65785+ read_unlock(&grsec_exec_file_lock);
65786+ read_unlock(&tasklist_lock);
65787+ rcu_read_unlock();
65788+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
65789+ return ret;
65790+ }
65791+ } else {
65792+ // it's a kernel process
65793+ task->role = kernel_role;
65794+ task->acl = kernel_role->root_label;
65795+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
65796+ task->acl->mode &= ~GR_PROCFIND;
65797+#endif
65798+ }
65799+ } while_each_thread(task2, task);
65800+ read_unlock(&grsec_exec_file_lock);
65801+ read_unlock(&tasklist_lock);
65802+ rcu_read_unlock();
65803+
65804+ return 0;
65805+}
65806+
65807+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
65808+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
65809+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
65810+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
65811+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
65812+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
65813+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
65814+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
65815+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
65816+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
65817+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
65818+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
65819+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
65820+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
65821+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
65822+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
65823+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
65824+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
65825+};
65826+
65827+void
65828+gr_learn_resource(const struct task_struct *task,
65829+ const int res, const unsigned long wanted, const int gt)
65830+{
65831+ struct acl_subject_label *acl;
65832+ const struct cred *cred;
65833+
65834+ if (unlikely((gr_status & GR_READY) &&
65835+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
65836+ goto skip_reslog;
65837+
65838+ gr_log_resource(task, res, wanted, gt);
65839+skip_reslog:
65840+
65841+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
65842+ return;
65843+
65844+ acl = task->acl;
65845+
65846+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
65847+ !(acl->resmask & (1U << (unsigned short) res))))
65848+ return;
65849+
65850+ if (wanted >= acl->res[res].rlim_cur) {
65851+ unsigned long res_add;
65852+
65853+ res_add = wanted + res_learn_bumps[res];
65854+
65855+ acl->res[res].rlim_cur = res_add;
65856+
65857+ if (wanted > acl->res[res].rlim_max)
65858+ acl->res[res].rlim_max = res_add;
65859+
65860+ /* only log the subject filename, since resource logging is supported for
65861+ single-subject learning only */
65862+ rcu_read_lock();
65863+ cred = __task_cred(task);
65864+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65865+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
65866+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
65867+ "", (unsigned long) res, &task->signal->saved_ip);
65868+ rcu_read_unlock();
65869+ }
65870+
65871+ return;
65872+}
65873+EXPORT_SYMBOL(gr_learn_resource);
65874+#endif
65875+
65876+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
65877+void
65878+pax_set_initial_flags(struct linux_binprm *bprm)
65879+{
65880+ struct task_struct *task = current;
65881+ struct acl_subject_label *proc;
65882+ unsigned long flags;
65883+
65884+ if (unlikely(!(gr_status & GR_READY)))
65885+ return;
65886+
65887+ flags = pax_get_flags(task);
65888+
65889+ proc = task->acl;
65890+
65891+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
65892+ flags &= ~MF_PAX_PAGEEXEC;
65893+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
65894+ flags &= ~MF_PAX_SEGMEXEC;
65895+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
65896+ flags &= ~MF_PAX_RANDMMAP;
65897+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
65898+ flags &= ~MF_PAX_EMUTRAMP;
65899+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
65900+ flags &= ~MF_PAX_MPROTECT;
65901+
65902+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
65903+ flags |= MF_PAX_PAGEEXEC;
65904+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
65905+ flags |= MF_PAX_SEGMEXEC;
65906+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
65907+ flags |= MF_PAX_RANDMMAP;
65908+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
65909+ flags |= MF_PAX_EMUTRAMP;
65910+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
65911+ flags |= MF_PAX_MPROTECT;
65912+
65913+ pax_set_flags(task, flags);
65914+
65915+ return;
65916+}
65917+#endif
65918+
65919+int
65920+gr_handle_proc_ptrace(struct task_struct *task)
65921+{
65922+ struct file *filp;
65923+ struct task_struct *tmp = task;
65924+ struct task_struct *curtemp = current;
65925+ __u32 retmode;
65926+
65927+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65928+ if (unlikely(!(gr_status & GR_READY)))
65929+ return 0;
65930+#endif
65931+
65932+ read_lock(&tasklist_lock);
65933+ read_lock(&grsec_exec_file_lock);
65934+ filp = task->exec_file;
65935+
65936+ while (task_pid_nr(tmp) > 0) {
65937+ if (tmp == curtemp)
65938+ break;
65939+ tmp = tmp->real_parent;
65940+ }
65941+
65942+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65943+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
65944+ read_unlock(&grsec_exec_file_lock);
65945+ read_unlock(&tasklist_lock);
65946+ return 1;
65947+ }
65948+
65949+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65950+ if (!(gr_status & GR_READY)) {
65951+ read_unlock(&grsec_exec_file_lock);
65952+ read_unlock(&tasklist_lock);
65953+ return 0;
65954+ }
65955+#endif
65956+
65957+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
65958+ read_unlock(&grsec_exec_file_lock);
65959+ read_unlock(&tasklist_lock);
65960+
65961+ if (retmode & GR_NOPTRACE)
65962+ return 1;
65963+
65964+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
65965+ && (current->acl != task->acl || (current->acl != current->role->root_label
65966+ && task_pid_nr(current) != task_pid_nr(task))))
65967+ return 1;
65968+
65969+ return 0;
65970+}
65971+
65972+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
65973+{
65974+ if (unlikely(!(gr_status & GR_READY)))
65975+ return;
65976+
65977+ if (!(current->role->roletype & GR_ROLE_GOD))
65978+ return;
65979+
65980+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
65981+ p->role->rolename, gr_task_roletype_to_char(p),
65982+ p->acl->filename);
65983+}
65984+
65985+int
65986+gr_handle_ptrace(struct task_struct *task, const long request)
65987+{
65988+ struct task_struct *tmp = task;
65989+ struct task_struct *curtemp = current;
65990+ __u32 retmode;
65991+
65992+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65993+ if (unlikely(!(gr_status & GR_READY)))
65994+ return 0;
65995+#endif
65996+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65997+ read_lock(&tasklist_lock);
65998+ while (task_pid_nr(tmp) > 0) {
65999+ if (tmp == curtemp)
66000+ break;
66001+ tmp = tmp->real_parent;
66002+ }
66003+
66004+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
66005+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
66006+ read_unlock(&tasklist_lock);
66007+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66008+ return 1;
66009+ }
66010+ read_unlock(&tasklist_lock);
66011+ }
66012+
66013+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66014+ if (!(gr_status & GR_READY))
66015+ return 0;
66016+#endif
66017+
66018+ read_lock(&grsec_exec_file_lock);
66019+ if (unlikely(!task->exec_file)) {
66020+ read_unlock(&grsec_exec_file_lock);
66021+ return 0;
66022+ }
66023+
66024+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
66025+ read_unlock(&grsec_exec_file_lock);
66026+
66027+ if (retmode & GR_NOPTRACE) {
66028+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66029+ return 1;
66030+ }
66031+
66032+ if (retmode & GR_PTRACERD) {
66033+ switch (request) {
66034+ case PTRACE_SEIZE:
66035+ case PTRACE_POKETEXT:
66036+ case PTRACE_POKEDATA:
66037+ case PTRACE_POKEUSR:
66038+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
66039+ case PTRACE_SETREGS:
66040+ case PTRACE_SETFPREGS:
66041+#endif
66042+#ifdef CONFIG_X86
66043+ case PTRACE_SETFPXREGS:
66044+#endif
66045+#ifdef CONFIG_ALTIVEC
66046+ case PTRACE_SETVRREGS:
66047+#endif
66048+ return 1;
66049+ default:
66050+ return 0;
66051+ }
66052+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
66053+ !(current->role->roletype & GR_ROLE_GOD) &&
66054+ (current->acl != task->acl)) {
66055+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66056+ return 1;
66057+ }
66058+
66059+ return 0;
66060+}
66061+
66062+static int is_writable_mmap(const struct file *filp)
66063+{
66064+ struct task_struct *task = current;
66065+ struct acl_object_label *obj, *obj2;
66066+
66067+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
66068+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
66069+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
66070+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
66071+ task->role->root_label);
66072+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
66073+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
66074+ return 1;
66075+ }
66076+ }
66077+ return 0;
66078+}
66079+
66080+int
66081+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
66082+{
66083+ __u32 mode;
66084+
66085+ if (unlikely(!file || !(prot & PROT_EXEC)))
66086+ return 1;
66087+
66088+ if (is_writable_mmap(file))
66089+ return 0;
66090+
66091+ mode =
66092+ gr_search_file(file->f_path.dentry,
66093+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66094+ file->f_path.mnt);
66095+
66096+ if (!gr_tpe_allow(file))
66097+ return 0;
66098+
66099+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66100+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66101+ return 0;
66102+ } else if (unlikely(!(mode & GR_EXEC))) {
66103+ return 0;
66104+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66105+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66106+ return 1;
66107+ }
66108+
66109+ return 1;
66110+}
66111+
66112+int
66113+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
66114+{
66115+ __u32 mode;
66116+
66117+ if (unlikely(!file || !(prot & PROT_EXEC)))
66118+ return 1;
66119+
66120+ if (is_writable_mmap(file))
66121+ return 0;
66122+
66123+ mode =
66124+ gr_search_file(file->f_path.dentry,
66125+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66126+ file->f_path.mnt);
66127+
66128+ if (!gr_tpe_allow(file))
66129+ return 0;
66130+
66131+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66132+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66133+ return 0;
66134+ } else if (unlikely(!(mode & GR_EXEC))) {
66135+ return 0;
66136+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66137+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66138+ return 1;
66139+ }
66140+
66141+ return 1;
66142+}
66143+
66144+void
66145+gr_acl_handle_psacct(struct task_struct *task, const long code)
66146+{
66147+ unsigned long runtime;
66148+ unsigned long cputime;
66149+ unsigned int wday, cday;
66150+ __u8 whr, chr;
66151+ __u8 wmin, cmin;
66152+ __u8 wsec, csec;
66153+ struct timespec timeval;
66154+
66155+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
66156+ !(task->acl->mode & GR_PROCACCT)))
66157+ return;
66158+
66159+ do_posix_clock_monotonic_gettime(&timeval);
66160+ runtime = timeval.tv_sec - task->start_time.tv_sec;
66161+ wday = runtime / (3600 * 24);
66162+ runtime -= wday * (3600 * 24);
66163+ whr = runtime / 3600;
66164+ runtime -= whr * 3600;
66165+ wmin = runtime / 60;
66166+ runtime -= wmin * 60;
66167+ wsec = runtime;
66168+
66169+ cputime = (task->utime + task->stime) / HZ;
66170+ cday = cputime / (3600 * 24);
66171+ cputime -= cday * (3600 * 24);
66172+ chr = cputime / 3600;
66173+ cputime -= chr * 3600;
66174+ cmin = cputime / 60;
66175+ cputime -= cmin * 60;
66176+ csec = cputime;
66177+
66178+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
66179+
66180+ return;
66181+}
66182+
66183+void gr_set_kernel_label(struct task_struct *task)
66184+{
66185+ if (gr_status & GR_READY) {
66186+ task->role = kernel_role;
66187+ task->acl = kernel_role->root_label;
66188+ }
66189+ return;
66190+}
66191+
66192+#ifdef CONFIG_TASKSTATS
66193+int gr_is_taskstats_denied(int pid)
66194+{
66195+ struct task_struct *task;
66196+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66197+ const struct cred *cred;
66198+#endif
66199+ int ret = 0;
66200+
66201+ /* restrict taskstats viewing to un-chrooted root users
66202+ who have the 'view' subject flag if the RBAC system is enabled
66203+ */
66204+
66205+ rcu_read_lock();
66206+ read_lock(&tasklist_lock);
66207+ task = find_task_by_vpid(pid);
66208+ if (task) {
66209+#ifdef CONFIG_GRKERNSEC_CHROOT
66210+ if (proc_is_chrooted(task))
66211+ ret = -EACCES;
66212+#endif
66213+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66214+ cred = __task_cred(task);
66215+#ifdef CONFIG_GRKERNSEC_PROC_USER
66216+ if (gr_is_global_nonroot(cred->uid))
66217+ ret = -EACCES;
66218+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66219+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
66220+ ret = -EACCES;
66221+#endif
66222+#endif
66223+ if (gr_status & GR_READY) {
66224+ if (!(task->acl->mode & GR_VIEW))
66225+ ret = -EACCES;
66226+ }
66227+ } else
66228+ ret = -ENOENT;
66229+
66230+ read_unlock(&tasklist_lock);
66231+ rcu_read_unlock();
66232+
66233+ return ret;
66234+}
66235+#endif
66236+
66237+/* AUXV entries are filled via a descendant of search_binary_handler
66238+ after we've already applied the subject for the target
66239+*/
66240+int gr_acl_enable_at_secure(void)
66241+{
66242+ if (unlikely(!(gr_status & GR_READY)))
66243+ return 0;
66244+
66245+ if (current->acl->mode & GR_ATSECURE)
66246+ return 1;
66247+
66248+ return 0;
66249+}
66250+
66251+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
66252+{
66253+ struct task_struct *task = current;
66254+ struct dentry *dentry = file->f_path.dentry;
66255+ struct vfsmount *mnt = file->f_path.mnt;
66256+ struct acl_object_label *obj, *tmp;
66257+ struct acl_subject_label *subj;
66258+ unsigned int bufsize;
66259+ int is_not_root;
66260+ char *path;
66261+ dev_t dev = __get_dev(dentry);
66262+
66263+ if (unlikely(!(gr_status & GR_READY)))
66264+ return 1;
66265+
66266+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
66267+ return 1;
66268+
66269+ /* ignore Eric Biederman */
66270+ if (IS_PRIVATE(dentry->d_inode))
66271+ return 1;
66272+
66273+ subj = task->acl;
66274+ read_lock(&gr_inode_lock);
66275+ do {
66276+ obj = lookup_acl_obj_label(ino, dev, subj);
66277+ if (obj != NULL) {
66278+ read_unlock(&gr_inode_lock);
66279+ return (obj->mode & GR_FIND) ? 1 : 0;
66280+ }
66281+ } while ((subj = subj->parent_subject));
66282+ read_unlock(&gr_inode_lock);
66283+
66284+ /* this is purely an optimization since we're looking for an object
66285+ for the directory we're doing a readdir on
66286+ if it's possible for any globbed object to match the entry we're
66287+ filling into the directory, then the object we find here will be
66288+ an anchor point with attached globbed objects
66289+ */
66290+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
66291+ if (obj->globbed == NULL)
66292+ return (obj->mode & GR_FIND) ? 1 : 0;
66293+
66294+ is_not_root = ((obj->filename[0] == '/') &&
66295+ (obj->filename[1] == '\0')) ? 0 : 1;
66296+ bufsize = PAGE_SIZE - namelen - is_not_root;
66297+
66298+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
66299+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
66300+ return 1;
66301+
66302+ preempt_disable();
66303+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66304+ bufsize);
66305+
66306+ bufsize = strlen(path);
66307+
66308+ /* if base is "/", don't append an additional slash */
66309+ if (is_not_root)
66310+ *(path + bufsize) = '/';
66311+ memcpy(path + bufsize + is_not_root, name, namelen);
66312+ *(path + bufsize + namelen + is_not_root) = '\0';
66313+
66314+ tmp = obj->globbed;
66315+ while (tmp) {
66316+ if (!glob_match(tmp->filename, path)) {
66317+ preempt_enable();
66318+ return (tmp->mode & GR_FIND) ? 1 : 0;
66319+ }
66320+ tmp = tmp->next;
66321+ }
66322+ preempt_enable();
66323+ return (obj->mode & GR_FIND) ? 1 : 0;
66324+}
66325+
66326+void gr_put_exec_file(struct task_struct *task)
66327+{
66328+ struct file *filp;
66329+
66330+ write_lock(&grsec_exec_file_lock);
66331+ filp = task->exec_file;
66332+ task->exec_file = NULL;
66333+ write_unlock(&grsec_exec_file_lock);
66334+
66335+ if (filp)
66336+ fput(filp);
66337+
66338+ return;
66339+}
66340+
66341+
66342+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
66343+EXPORT_SYMBOL(gr_acl_is_enabled);
66344+#endif
66345+EXPORT_SYMBOL(gr_set_kernel_label);
66346+#ifdef CONFIG_SECURITY
66347+EXPORT_SYMBOL(gr_check_user_change);
66348+EXPORT_SYMBOL(gr_check_group_change);
66349+#endif
66350+
66351diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
66352new file mode 100644
66353index 0000000..34fefda
66354--- /dev/null
66355+++ b/grsecurity/gracl_alloc.c
66356@@ -0,0 +1,105 @@
66357+#include <linux/kernel.h>
66358+#include <linux/mm.h>
66359+#include <linux/slab.h>
66360+#include <linux/vmalloc.h>
66361+#include <linux/gracl.h>
66362+#include <linux/grsecurity.h>
66363+
66364+static unsigned long alloc_stack_next = 1;
66365+static unsigned long alloc_stack_size = 1;
66366+static void **alloc_stack;
66367+
66368+static __inline__ int
66369+alloc_pop(void)
66370+{
66371+ if (alloc_stack_next == 1)
66372+ return 0;
66373+
66374+ kfree(alloc_stack[alloc_stack_next - 2]);
66375+
66376+ alloc_stack_next--;
66377+
66378+ return 1;
66379+}
66380+
66381+static __inline__ int
66382+alloc_push(void *buf)
66383+{
66384+ if (alloc_stack_next >= alloc_stack_size)
66385+ return 1;
66386+
66387+ alloc_stack[alloc_stack_next - 1] = buf;
66388+
66389+ alloc_stack_next++;
66390+
66391+ return 0;
66392+}
66393+
66394+void *
66395+acl_alloc(unsigned long len)
66396+{
66397+ void *ret = NULL;
66398+
66399+ if (!len || len > PAGE_SIZE)
66400+ goto out;
66401+
66402+ ret = kmalloc(len, GFP_KERNEL);
66403+
66404+ if (ret) {
66405+ if (alloc_push(ret)) {
66406+ kfree(ret);
66407+ ret = NULL;
66408+ }
66409+ }
66410+
66411+out:
66412+ return ret;
66413+}
66414+
66415+void *
66416+acl_alloc_num(unsigned long num, unsigned long len)
66417+{
66418+ if (!len || (num > (PAGE_SIZE / len)))
66419+ return NULL;
66420+
66421+ return acl_alloc(num * len);
66422+}
66423+
66424+void
66425+acl_free_all(void)
66426+{
66427+ if (gr_acl_is_enabled() || !alloc_stack)
66428+ return;
66429+
66430+ while (alloc_pop()) ;
66431+
66432+ if (alloc_stack) {
66433+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
66434+ kfree(alloc_stack);
66435+ else
66436+ vfree(alloc_stack);
66437+ }
66438+
66439+ alloc_stack = NULL;
66440+ alloc_stack_size = 1;
66441+ alloc_stack_next = 1;
66442+
66443+ return;
66444+}
66445+
66446+int
66447+acl_alloc_stack_init(unsigned long size)
66448+{
66449+ if ((size * sizeof (void *)) <= PAGE_SIZE)
66450+ alloc_stack =
66451+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
66452+ else
66453+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
66454+
66455+ alloc_stack_size = size;
66456+
66457+ if (!alloc_stack)
66458+ return 0;
66459+ else
66460+ return 1;
66461+}
66462diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
66463new file mode 100644
66464index 0000000..bdd51ea
66465--- /dev/null
66466+++ b/grsecurity/gracl_cap.c
66467@@ -0,0 +1,110 @@
66468+#include <linux/kernel.h>
66469+#include <linux/module.h>
66470+#include <linux/sched.h>
66471+#include <linux/gracl.h>
66472+#include <linux/grsecurity.h>
66473+#include <linux/grinternal.h>
66474+
66475+extern const char *captab_log[];
66476+extern int captab_log_entries;
66477+
66478+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
66479+{
66480+ struct acl_subject_label *curracl;
66481+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66482+ kernel_cap_t cap_audit = __cap_empty_set;
66483+
66484+ if (!gr_acl_is_enabled())
66485+ return 1;
66486+
66487+ curracl = task->acl;
66488+
66489+ cap_drop = curracl->cap_lower;
66490+ cap_mask = curracl->cap_mask;
66491+ cap_audit = curracl->cap_invert_audit;
66492+
66493+ while ((curracl = curracl->parent_subject)) {
66494+ /* if the cap isn't specified in the current computed mask but is specified in the
66495+ current level subject, and is lowered in the current level subject, then add
66496+ it to the set of dropped capabilities
66497+ otherwise, add the current level subject's mask to the current computed mask
66498+ */
66499+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66500+ cap_raise(cap_mask, cap);
66501+ if (cap_raised(curracl->cap_lower, cap))
66502+ cap_raise(cap_drop, cap);
66503+ if (cap_raised(curracl->cap_invert_audit, cap))
66504+ cap_raise(cap_audit, cap);
66505+ }
66506+ }
66507+
66508+ if (!cap_raised(cap_drop, cap)) {
66509+ if (cap_raised(cap_audit, cap))
66510+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
66511+ return 1;
66512+ }
66513+
66514+ curracl = task->acl;
66515+
66516+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
66517+ && cap_raised(cred->cap_effective, cap)) {
66518+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
66519+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
66520+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
66521+ gr_to_filename(task->exec_file->f_path.dentry,
66522+ task->exec_file->f_path.mnt) : curracl->filename,
66523+ curracl->filename, 0UL,
66524+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
66525+ return 1;
66526+ }
66527+
66528+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
66529+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
66530+
66531+ return 0;
66532+}
66533+
66534+int
66535+gr_acl_is_capable(const int cap)
66536+{
66537+ return gr_task_acl_is_capable(current, current_cred(), cap);
66538+}
66539+
66540+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
66541+{
66542+ struct acl_subject_label *curracl;
66543+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66544+
66545+ if (!gr_acl_is_enabled())
66546+ return 1;
66547+
66548+ curracl = task->acl;
66549+
66550+ cap_drop = curracl->cap_lower;
66551+ cap_mask = curracl->cap_mask;
66552+
66553+ while ((curracl = curracl->parent_subject)) {
66554+ /* if the cap isn't specified in the current computed mask but is specified in the
66555+ current level subject, and is lowered in the current level subject, then add
66556+ it to the set of dropped capabilities
66557+ otherwise, add the current level subject's mask to the current computed mask
66558+ */
66559+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66560+ cap_raise(cap_mask, cap);
66561+ if (cap_raised(curracl->cap_lower, cap))
66562+ cap_raise(cap_drop, cap);
66563+ }
66564+ }
66565+
66566+ if (!cap_raised(cap_drop, cap))
66567+ return 1;
66568+
66569+ return 0;
66570+}
66571+
66572+int
66573+gr_acl_is_capable_nolog(const int cap)
66574+{
66575+ return gr_task_acl_is_capable_nolog(current, cap);
66576+}
66577+
66578diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
66579new file mode 100644
66580index 0000000..a43dd06
66581--- /dev/null
66582+++ b/grsecurity/gracl_compat.c
66583@@ -0,0 +1,269 @@
66584+#include <linux/kernel.h>
66585+#include <linux/gracl.h>
66586+#include <linux/compat.h>
66587+#include <linux/gracl_compat.h>
66588+
66589+#include <asm/uaccess.h>
66590+
66591+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
66592+{
66593+ struct gr_arg_wrapper_compat uwrapcompat;
66594+
66595+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
66596+ return -EFAULT;
66597+
66598+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
66599+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
66600+ return -EINVAL;
66601+
66602+ uwrap->arg = compat_ptr(uwrapcompat.arg);
66603+ uwrap->version = uwrapcompat.version;
66604+ uwrap->size = sizeof(struct gr_arg);
66605+
66606+ return 0;
66607+}
66608+
66609+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
66610+{
66611+ struct gr_arg_compat argcompat;
66612+
66613+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
66614+ return -EFAULT;
66615+
66616+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
66617+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
66618+ arg->role_db.num_roles = argcompat.role_db.num_roles;
66619+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
66620+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
66621+ arg->role_db.num_objects = argcompat.role_db.num_objects;
66622+
66623+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
66624+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
66625+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
66626+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
66627+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
66628+ arg->segv_device = argcompat.segv_device;
66629+ arg->segv_inode = argcompat.segv_inode;
66630+ arg->segv_uid = argcompat.segv_uid;
66631+ arg->num_sprole_pws = argcompat.num_sprole_pws;
66632+ arg->mode = argcompat.mode;
66633+
66634+ return 0;
66635+}
66636+
66637+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
66638+{
66639+ struct acl_object_label_compat objcompat;
66640+
66641+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
66642+ return -EFAULT;
66643+
66644+ obj->filename = compat_ptr(objcompat.filename);
66645+ obj->inode = objcompat.inode;
66646+ obj->device = objcompat.device;
66647+ obj->mode = objcompat.mode;
66648+
66649+ obj->nested = compat_ptr(objcompat.nested);
66650+ obj->globbed = compat_ptr(objcompat.globbed);
66651+
66652+ obj->prev = compat_ptr(objcompat.prev);
66653+ obj->next = compat_ptr(objcompat.next);
66654+
66655+ return 0;
66656+}
66657+
66658+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
66659+{
66660+ unsigned int i;
66661+ struct acl_subject_label_compat subjcompat;
66662+
66663+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
66664+ return -EFAULT;
66665+
66666+ subj->filename = compat_ptr(subjcompat.filename);
66667+ subj->inode = subjcompat.inode;
66668+ subj->device = subjcompat.device;
66669+ subj->mode = subjcompat.mode;
66670+ subj->cap_mask = subjcompat.cap_mask;
66671+ subj->cap_lower = subjcompat.cap_lower;
66672+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
66673+
66674+ for (i = 0; i < GR_NLIMITS; i++) {
66675+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
66676+ subj->res[i].rlim_cur = RLIM_INFINITY;
66677+ else
66678+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
66679+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
66680+ subj->res[i].rlim_max = RLIM_INFINITY;
66681+ else
66682+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
66683+ }
66684+ subj->resmask = subjcompat.resmask;
66685+
66686+ subj->user_trans_type = subjcompat.user_trans_type;
66687+ subj->group_trans_type = subjcompat.group_trans_type;
66688+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
66689+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
66690+ subj->user_trans_num = subjcompat.user_trans_num;
66691+ subj->group_trans_num = subjcompat.group_trans_num;
66692+
66693+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
66694+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
66695+ subj->ip_type = subjcompat.ip_type;
66696+ subj->ips = compat_ptr(subjcompat.ips);
66697+ subj->ip_num = subjcompat.ip_num;
66698+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
66699+
66700+ subj->crashes = subjcompat.crashes;
66701+ subj->expires = subjcompat.expires;
66702+
66703+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
66704+ subj->hash = compat_ptr(subjcompat.hash);
66705+ subj->prev = compat_ptr(subjcompat.prev);
66706+ subj->next = compat_ptr(subjcompat.next);
66707+
66708+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
66709+ subj->obj_hash_size = subjcompat.obj_hash_size;
66710+ subj->pax_flags = subjcompat.pax_flags;
66711+
66712+ return 0;
66713+}
66714+
66715+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
66716+{
66717+ struct acl_role_label_compat rolecompat;
66718+
66719+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
66720+ return -EFAULT;
66721+
66722+ role->rolename = compat_ptr(rolecompat.rolename);
66723+ role->uidgid = rolecompat.uidgid;
66724+ role->roletype = rolecompat.roletype;
66725+
66726+ role->auth_attempts = rolecompat.auth_attempts;
66727+ role->expires = rolecompat.expires;
66728+
66729+ role->root_label = compat_ptr(rolecompat.root_label);
66730+ role->hash = compat_ptr(rolecompat.hash);
66731+
66732+ role->prev = compat_ptr(rolecompat.prev);
66733+ role->next = compat_ptr(rolecompat.next);
66734+
66735+ role->transitions = compat_ptr(rolecompat.transitions);
66736+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
66737+ role->domain_children = compat_ptr(rolecompat.domain_children);
66738+ role->domain_child_num = rolecompat.domain_child_num;
66739+
66740+ role->umask = rolecompat.umask;
66741+
66742+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
66743+ role->subj_hash_size = rolecompat.subj_hash_size;
66744+
66745+ return 0;
66746+}
66747+
66748+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
66749+{
66750+ struct role_allowed_ip_compat roleip_compat;
66751+
66752+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
66753+ return -EFAULT;
66754+
66755+ roleip->addr = roleip_compat.addr;
66756+ roleip->netmask = roleip_compat.netmask;
66757+
66758+ roleip->prev = compat_ptr(roleip_compat.prev);
66759+ roleip->next = compat_ptr(roleip_compat.next);
66760+
66761+ return 0;
66762+}
66763+
66764+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
66765+{
66766+ struct role_transition_compat trans_compat;
66767+
66768+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
66769+ return -EFAULT;
66770+
66771+ trans->rolename = compat_ptr(trans_compat.rolename);
66772+
66773+ trans->prev = compat_ptr(trans_compat.prev);
66774+ trans->next = compat_ptr(trans_compat.next);
66775+
66776+ return 0;
66777+
66778+}
66779+
66780+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
66781+{
66782+ struct gr_hash_struct_compat hash_compat;
66783+
66784+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
66785+ return -EFAULT;
66786+
66787+ hash->table = compat_ptr(hash_compat.table);
66788+ hash->nametable = compat_ptr(hash_compat.nametable);
66789+ hash->first = compat_ptr(hash_compat.first);
66790+
66791+ hash->table_size = hash_compat.table_size;
66792+ hash->used_size = hash_compat.used_size;
66793+
66794+ hash->type = hash_compat.type;
66795+
66796+ return 0;
66797+}
66798+
66799+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
66800+{
66801+ compat_uptr_t ptrcompat;
66802+
66803+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
66804+ return -EFAULT;
66805+
66806+ *(void **)ptr = compat_ptr(ptrcompat);
66807+
66808+ return 0;
66809+}
66810+
66811+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
66812+{
66813+ struct acl_ip_label_compat ip_compat;
66814+
66815+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
66816+ return -EFAULT;
66817+
66818+ ip->iface = compat_ptr(ip_compat.iface);
66819+ ip->addr = ip_compat.addr;
66820+ ip->netmask = ip_compat.netmask;
66821+ ip->low = ip_compat.low;
66822+ ip->high = ip_compat.high;
66823+ ip->mode = ip_compat.mode;
66824+ ip->type = ip_compat.type;
66825+
66826+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
66827+
66828+ ip->prev = compat_ptr(ip_compat.prev);
66829+ ip->next = compat_ptr(ip_compat.next);
66830+
66831+ return 0;
66832+}
66833+
66834+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
66835+{
66836+ struct sprole_pw_compat pw_compat;
66837+
66838+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
66839+ return -EFAULT;
66840+
66841+ pw->rolename = compat_ptr(pw_compat.rolename);
66842+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
66843+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
66844+
66845+ return 0;
66846+}
66847+
66848+size_t get_gr_arg_wrapper_size_compat(void)
66849+{
66850+ return sizeof(struct gr_arg_wrapper_compat);
66851+}
66852+
66853diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
66854new file mode 100644
66855index 0000000..a340c17
66856--- /dev/null
66857+++ b/grsecurity/gracl_fs.c
66858@@ -0,0 +1,431 @@
66859+#include <linux/kernel.h>
66860+#include <linux/sched.h>
66861+#include <linux/types.h>
66862+#include <linux/fs.h>
66863+#include <linux/file.h>
66864+#include <linux/stat.h>
66865+#include <linux/grsecurity.h>
66866+#include <linux/grinternal.h>
66867+#include <linux/gracl.h>
66868+
66869+umode_t
66870+gr_acl_umask(void)
66871+{
66872+ if (unlikely(!gr_acl_is_enabled()))
66873+ return 0;
66874+
66875+ return current->role->umask;
66876+}
66877+
66878+__u32
66879+gr_acl_handle_hidden_file(const struct dentry * dentry,
66880+ const struct vfsmount * mnt)
66881+{
66882+ __u32 mode;
66883+
66884+ if (unlikely(!dentry->d_inode))
66885+ return GR_FIND;
66886+
66887+ mode =
66888+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
66889+
66890+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
66891+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66892+ return mode;
66893+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
66894+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66895+ return 0;
66896+ } else if (unlikely(!(mode & GR_FIND)))
66897+ return 0;
66898+
66899+ return GR_FIND;
66900+}
66901+
66902+__u32
66903+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
66904+ int acc_mode)
66905+{
66906+ __u32 reqmode = GR_FIND;
66907+ __u32 mode;
66908+
66909+ if (unlikely(!dentry->d_inode))
66910+ return reqmode;
66911+
66912+ if (acc_mode & MAY_APPEND)
66913+ reqmode |= GR_APPEND;
66914+ else if (acc_mode & MAY_WRITE)
66915+ reqmode |= GR_WRITE;
66916+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
66917+ reqmode |= GR_READ;
66918+
66919+ mode =
66920+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66921+ mnt);
66922+
66923+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66924+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66925+ reqmode & GR_READ ? " reading" : "",
66926+ reqmode & GR_WRITE ? " writing" : reqmode &
66927+ GR_APPEND ? " appending" : "");
66928+ return reqmode;
66929+ } else
66930+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66931+ {
66932+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66933+ reqmode & GR_READ ? " reading" : "",
66934+ reqmode & GR_WRITE ? " writing" : reqmode &
66935+ GR_APPEND ? " appending" : "");
66936+ return 0;
66937+ } else if (unlikely((mode & reqmode) != reqmode))
66938+ return 0;
66939+
66940+ return reqmode;
66941+}
66942+
66943+__u32
66944+gr_acl_handle_creat(const struct dentry * dentry,
66945+ const struct dentry * p_dentry,
66946+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
66947+ const int imode)
66948+{
66949+ __u32 reqmode = GR_WRITE | GR_CREATE;
66950+ __u32 mode;
66951+
66952+ if (acc_mode & MAY_APPEND)
66953+ reqmode |= GR_APPEND;
66954+ // if a directory was required or the directory already exists, then
66955+ // don't count this open as a read
66956+ if ((acc_mode & MAY_READ) &&
66957+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
66958+ reqmode |= GR_READ;
66959+ if ((open_flags & O_CREAT) &&
66960+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
66961+ reqmode |= GR_SETID;
66962+
66963+ mode =
66964+ gr_check_create(dentry, p_dentry, p_mnt,
66965+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
66966+
66967+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66968+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66969+ reqmode & GR_READ ? " reading" : "",
66970+ reqmode & GR_WRITE ? " writing" : reqmode &
66971+ GR_APPEND ? " appending" : "");
66972+ return reqmode;
66973+ } else
66974+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66975+ {
66976+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66977+ reqmode & GR_READ ? " reading" : "",
66978+ reqmode & GR_WRITE ? " writing" : reqmode &
66979+ GR_APPEND ? " appending" : "");
66980+ return 0;
66981+ } else if (unlikely((mode & reqmode) != reqmode))
66982+ return 0;
66983+
66984+ return reqmode;
66985+}
66986+
66987+__u32
66988+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
66989+ const int fmode)
66990+{
66991+ __u32 mode, reqmode = GR_FIND;
66992+
66993+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
66994+ reqmode |= GR_EXEC;
66995+ if (fmode & S_IWOTH)
66996+ reqmode |= GR_WRITE;
66997+ if (fmode & S_IROTH)
66998+ reqmode |= GR_READ;
66999+
67000+ mode =
67001+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
67002+ mnt);
67003+
67004+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
67005+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
67006+ reqmode & GR_READ ? " reading" : "",
67007+ reqmode & GR_WRITE ? " writing" : "",
67008+ reqmode & GR_EXEC ? " executing" : "");
67009+ return reqmode;
67010+ } else
67011+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
67012+ {
67013+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
67014+ reqmode & GR_READ ? " reading" : "",
67015+ reqmode & GR_WRITE ? " writing" : "",
67016+ reqmode & GR_EXEC ? " executing" : "");
67017+ return 0;
67018+ } else if (unlikely((mode & reqmode) != reqmode))
67019+ return 0;
67020+
67021+ return reqmode;
67022+}
67023+
67024+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
67025+{
67026+ __u32 mode;
67027+
67028+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
67029+
67030+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67031+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
67032+ return mode;
67033+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67034+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
67035+ return 0;
67036+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
67037+ return 0;
67038+
67039+ return (reqmode);
67040+}
67041+
67042+__u32
67043+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
67044+{
67045+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
67046+}
67047+
67048+__u32
67049+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
67050+{
67051+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
67052+}
67053+
67054+__u32
67055+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
67056+{
67057+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
67058+}
67059+
67060+__u32
67061+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
67062+{
67063+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
67064+}
67065+
67066+__u32
67067+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
67068+ umode_t *modeptr)
67069+{
67070+ umode_t mode;
67071+
67072+ *modeptr &= ~gr_acl_umask();
67073+ mode = *modeptr;
67074+
67075+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
67076+ return 1;
67077+
67078+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
67079+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
67080+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
67081+ GR_CHMOD_ACL_MSG);
67082+ } else {
67083+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
67084+ }
67085+}
67086+
67087+__u32
67088+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
67089+{
67090+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
67091+}
67092+
67093+__u32
67094+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
67095+{
67096+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
67097+}
67098+
67099+__u32
67100+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
67101+{
67102+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
67103+}
67104+
67105+__u32
67106+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
67107+{
67108+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
67109+ GR_UNIXCONNECT_ACL_MSG);
67110+}
67111+
67112+/* hardlinks require at minimum create and link permission,
67113+ any additional privilege required is based on the
67114+ privilege of the file being linked to
67115+*/
67116+__u32
67117+gr_acl_handle_link(const struct dentry * new_dentry,
67118+ const struct dentry * parent_dentry,
67119+ const struct vfsmount * parent_mnt,
67120+ const struct dentry * old_dentry,
67121+ const struct vfsmount * old_mnt, const struct filename *to)
67122+{
67123+ __u32 mode;
67124+ __u32 needmode = GR_CREATE | GR_LINK;
67125+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
67126+
67127+ mode =
67128+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
67129+ old_mnt);
67130+
67131+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
67132+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67133+ return mode;
67134+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67135+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67136+ return 0;
67137+ } else if (unlikely((mode & needmode) != needmode))
67138+ return 0;
67139+
67140+ return 1;
67141+}
67142+
67143+__u32
67144+gr_acl_handle_symlink(const struct dentry * new_dentry,
67145+ const struct dentry * parent_dentry,
67146+ const struct vfsmount * parent_mnt, const struct filename *from)
67147+{
67148+ __u32 needmode = GR_WRITE | GR_CREATE;
67149+ __u32 mode;
67150+
67151+ mode =
67152+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
67153+ GR_CREATE | GR_AUDIT_CREATE |
67154+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
67155+
67156+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
67157+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67158+ return mode;
67159+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67160+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67161+ return 0;
67162+ } else if (unlikely((mode & needmode) != needmode))
67163+ return 0;
67164+
67165+ return (GR_WRITE | GR_CREATE);
67166+}
67167+
67168+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
67169+{
67170+ __u32 mode;
67171+
67172+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
67173+
67174+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67175+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
67176+ return mode;
67177+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67178+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
67179+ return 0;
67180+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
67181+ return 0;
67182+
67183+ return (reqmode);
67184+}
67185+
67186+__u32
67187+gr_acl_handle_mknod(const struct dentry * new_dentry,
67188+ const struct dentry * parent_dentry,
67189+ const struct vfsmount * parent_mnt,
67190+ const int mode)
67191+{
67192+ __u32 reqmode = GR_WRITE | GR_CREATE;
67193+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
67194+ reqmode |= GR_SETID;
67195+
67196+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67197+ reqmode, GR_MKNOD_ACL_MSG);
67198+}
67199+
67200+__u32
67201+gr_acl_handle_mkdir(const struct dentry *new_dentry,
67202+ const struct dentry *parent_dentry,
67203+ const struct vfsmount *parent_mnt)
67204+{
67205+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67206+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
67207+}
67208+
67209+#define RENAME_CHECK_SUCCESS(old, new) \
67210+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
67211+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
67212+
67213+int
67214+gr_acl_handle_rename(struct dentry *new_dentry,
67215+ struct dentry *parent_dentry,
67216+ const struct vfsmount *parent_mnt,
67217+ struct dentry *old_dentry,
67218+ struct inode *old_parent_inode,
67219+ struct vfsmount *old_mnt, const struct filename *newname)
67220+{
67221+ __u32 comp1, comp2;
67222+ int error = 0;
67223+
67224+ if (unlikely(!gr_acl_is_enabled()))
67225+ return 0;
67226+
67227+ if (!new_dentry->d_inode) {
67228+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
67229+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
67230+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
67231+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
67232+ GR_DELETE | GR_AUDIT_DELETE |
67233+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67234+ GR_SUPPRESS, old_mnt);
67235+ } else {
67236+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
67237+ GR_CREATE | GR_DELETE |
67238+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
67239+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67240+ GR_SUPPRESS, parent_mnt);
67241+ comp2 =
67242+ gr_search_file(old_dentry,
67243+ GR_READ | GR_WRITE | GR_AUDIT_READ |
67244+ GR_DELETE | GR_AUDIT_DELETE |
67245+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
67246+ }
67247+
67248+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
67249+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
67250+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67251+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
67252+ && !(comp2 & GR_SUPPRESS)) {
67253+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67254+ error = -EACCES;
67255+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
67256+ error = -EACCES;
67257+
67258+ return error;
67259+}
67260+
67261+void
67262+gr_acl_handle_exit(void)
67263+{
67264+ u16 id;
67265+ char *rolename;
67266+
67267+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
67268+ !(current->role->roletype & GR_ROLE_PERSIST))) {
67269+ id = current->acl_role_id;
67270+ rolename = current->role->rolename;
67271+ gr_set_acls(1);
67272+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
67273+ }
67274+
67275+ gr_put_exec_file(current);
67276+ return;
67277+}
67278+
67279+int
67280+gr_acl_handle_procpidmem(const struct task_struct *task)
67281+{
67282+ if (unlikely(!gr_acl_is_enabled()))
67283+ return 0;
67284+
67285+ if (task != current && task->acl->mode & GR_PROTPROCFD)
67286+ return -EACCES;
67287+
67288+ return 0;
67289+}
67290diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
67291new file mode 100644
67292index 0000000..8132048
67293--- /dev/null
67294+++ b/grsecurity/gracl_ip.c
67295@@ -0,0 +1,387 @@
67296+#include <linux/kernel.h>
67297+#include <asm/uaccess.h>
67298+#include <asm/errno.h>
67299+#include <net/sock.h>
67300+#include <linux/file.h>
67301+#include <linux/fs.h>
67302+#include <linux/net.h>
67303+#include <linux/in.h>
67304+#include <linux/skbuff.h>
67305+#include <linux/ip.h>
67306+#include <linux/udp.h>
67307+#include <linux/types.h>
67308+#include <linux/sched.h>
67309+#include <linux/netdevice.h>
67310+#include <linux/inetdevice.h>
67311+#include <linux/gracl.h>
67312+#include <linux/grsecurity.h>
67313+#include <linux/grinternal.h>
67314+
67315+#define GR_BIND 0x01
67316+#define GR_CONNECT 0x02
67317+#define GR_INVERT 0x04
67318+#define GR_BINDOVERRIDE 0x08
67319+#define GR_CONNECTOVERRIDE 0x10
67320+#define GR_SOCK_FAMILY 0x20
67321+
67322+static const char * gr_protocols[IPPROTO_MAX] = {
67323+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
67324+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
67325+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
67326+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
67327+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
67328+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
67329+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
67330+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
67331+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
67332+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
67333+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
67334+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
67335+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
67336+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
67337+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
67338+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
67339+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
67340+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
67341+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
67342+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
67343+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
67344+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
67345+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
67346+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
67347+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
67348+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
67349+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
67350+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
67351+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
67352+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
67353+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
67354+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
67355+ };
67356+
67357+static const char * gr_socktypes[SOCK_MAX] = {
67358+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
67359+ "unknown:7", "unknown:8", "unknown:9", "packet"
67360+ };
67361+
67362+static const char * gr_sockfamilies[AF_MAX+1] = {
67363+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
67364+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
67365+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
67366+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
67367+ };
67368+
67369+const char *
67370+gr_proto_to_name(unsigned char proto)
67371+{
67372+ return gr_protocols[proto];
67373+}
67374+
67375+const char *
67376+gr_socktype_to_name(unsigned char type)
67377+{
67378+ return gr_socktypes[type];
67379+}
67380+
67381+const char *
67382+gr_sockfamily_to_name(unsigned char family)
67383+{
67384+ return gr_sockfamilies[family];
67385+}
67386+
67387+int
67388+gr_search_socket(const int domain, const int type, const int protocol)
67389+{
67390+ struct acl_subject_label *curr;
67391+ const struct cred *cred = current_cred();
67392+
67393+ if (unlikely(!gr_acl_is_enabled()))
67394+ goto exit;
67395+
67396+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
67397+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
67398+ goto exit; // let the kernel handle it
67399+
67400+ curr = current->acl;
67401+
67402+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
67403+ /* the family is allowed, if this is PF_INET allow it only if
67404+ the extra sock type/protocol checks pass */
67405+ if (domain == PF_INET)
67406+ goto inet_check;
67407+ goto exit;
67408+ } else {
67409+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67410+ __u32 fakeip = 0;
67411+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67412+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67413+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67414+ gr_to_filename(current->exec_file->f_path.dentry,
67415+ current->exec_file->f_path.mnt) :
67416+ curr->filename, curr->filename,
67417+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
67418+ &current->signal->saved_ip);
67419+ goto exit;
67420+ }
67421+ goto exit_fail;
67422+ }
67423+
67424+inet_check:
67425+ /* the rest of this checking is for IPv4 only */
67426+ if (!curr->ips)
67427+ goto exit;
67428+
67429+ if ((curr->ip_type & (1U << type)) &&
67430+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
67431+ goto exit;
67432+
67433+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67434+ /* we don't place acls on raw sockets , and sometimes
67435+ dgram/ip sockets are opened for ioctl and not
67436+ bind/connect, so we'll fake a bind learn log */
67437+ if (type == SOCK_RAW || type == SOCK_PACKET) {
67438+ __u32 fakeip = 0;
67439+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67440+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67441+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67442+ gr_to_filename(current->exec_file->f_path.dentry,
67443+ current->exec_file->f_path.mnt) :
67444+ curr->filename, curr->filename,
67445+ &fakeip, 0, type,
67446+ protocol, GR_CONNECT, &current->signal->saved_ip);
67447+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
67448+ __u32 fakeip = 0;
67449+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67450+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67451+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67452+ gr_to_filename(current->exec_file->f_path.dentry,
67453+ current->exec_file->f_path.mnt) :
67454+ curr->filename, curr->filename,
67455+ &fakeip, 0, type,
67456+ protocol, GR_BIND, &current->signal->saved_ip);
67457+ }
67458+ /* we'll log when they use connect or bind */
67459+ goto exit;
67460+ }
67461+
67462+exit_fail:
67463+ if (domain == PF_INET)
67464+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
67465+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
67466+ else
67467+#ifndef CONFIG_IPV6
67468+ if (domain != PF_INET6)
67469+#endif
67470+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
67471+ gr_socktype_to_name(type), protocol);
67472+
67473+ return 0;
67474+exit:
67475+ return 1;
67476+}
67477+
67478+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
67479+{
67480+ if ((ip->mode & mode) &&
67481+ (ip_port >= ip->low) &&
67482+ (ip_port <= ip->high) &&
67483+ ((ntohl(ip_addr) & our_netmask) ==
67484+ (ntohl(our_addr) & our_netmask))
67485+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
67486+ && (ip->type & (1U << type))) {
67487+ if (ip->mode & GR_INVERT)
67488+ return 2; // specifically denied
67489+ else
67490+ return 1; // allowed
67491+ }
67492+
67493+ return 0; // not specifically allowed, may continue parsing
67494+}
67495+
67496+static int
67497+gr_search_connectbind(const int full_mode, struct sock *sk,
67498+ struct sockaddr_in *addr, const int type)
67499+{
67500+ char iface[IFNAMSIZ] = {0};
67501+ struct acl_subject_label *curr;
67502+ struct acl_ip_label *ip;
67503+ struct inet_sock *isk;
67504+ struct net_device *dev;
67505+ struct in_device *idev;
67506+ unsigned long i;
67507+ int ret;
67508+ int mode = full_mode & (GR_BIND | GR_CONNECT);
67509+ __u32 ip_addr = 0;
67510+ __u32 our_addr;
67511+ __u32 our_netmask;
67512+ char *p;
67513+ __u16 ip_port = 0;
67514+ const struct cred *cred = current_cred();
67515+
67516+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
67517+ return 0;
67518+
67519+ curr = current->acl;
67520+ isk = inet_sk(sk);
67521+
67522+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
67523+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
67524+ addr->sin_addr.s_addr = curr->inaddr_any_override;
67525+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
67526+ struct sockaddr_in saddr;
67527+ int err;
67528+
67529+ saddr.sin_family = AF_INET;
67530+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
67531+ saddr.sin_port = isk->inet_sport;
67532+
67533+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67534+ if (err)
67535+ return err;
67536+
67537+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67538+ if (err)
67539+ return err;
67540+ }
67541+
67542+ if (!curr->ips)
67543+ return 0;
67544+
67545+ ip_addr = addr->sin_addr.s_addr;
67546+ ip_port = ntohs(addr->sin_port);
67547+
67548+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67549+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67550+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67551+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67552+ gr_to_filename(current->exec_file->f_path.dentry,
67553+ current->exec_file->f_path.mnt) :
67554+ curr->filename, curr->filename,
67555+ &ip_addr, ip_port, type,
67556+ sk->sk_protocol, mode, &current->signal->saved_ip);
67557+ return 0;
67558+ }
67559+
67560+ for (i = 0; i < curr->ip_num; i++) {
67561+ ip = *(curr->ips + i);
67562+ if (ip->iface != NULL) {
67563+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
67564+ p = strchr(iface, ':');
67565+ if (p != NULL)
67566+ *p = '\0';
67567+ dev = dev_get_by_name(sock_net(sk), iface);
67568+ if (dev == NULL)
67569+ continue;
67570+ idev = in_dev_get(dev);
67571+ if (idev == NULL) {
67572+ dev_put(dev);
67573+ continue;
67574+ }
67575+ rcu_read_lock();
67576+ for_ifa(idev) {
67577+ if (!strcmp(ip->iface, ifa->ifa_label)) {
67578+ our_addr = ifa->ifa_address;
67579+ our_netmask = 0xffffffff;
67580+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67581+ if (ret == 1) {
67582+ rcu_read_unlock();
67583+ in_dev_put(idev);
67584+ dev_put(dev);
67585+ return 0;
67586+ } else if (ret == 2) {
67587+ rcu_read_unlock();
67588+ in_dev_put(idev);
67589+ dev_put(dev);
67590+ goto denied;
67591+ }
67592+ }
67593+ } endfor_ifa(idev);
67594+ rcu_read_unlock();
67595+ in_dev_put(idev);
67596+ dev_put(dev);
67597+ } else {
67598+ our_addr = ip->addr;
67599+ our_netmask = ip->netmask;
67600+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67601+ if (ret == 1)
67602+ return 0;
67603+ else if (ret == 2)
67604+ goto denied;
67605+ }
67606+ }
67607+
67608+denied:
67609+ if (mode == GR_BIND)
67610+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67611+ else if (mode == GR_CONNECT)
67612+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67613+
67614+ return -EACCES;
67615+}
67616+
67617+int
67618+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
67619+{
67620+ /* always allow disconnection of dgram sockets with connect */
67621+ if (addr->sin_family == AF_UNSPEC)
67622+ return 0;
67623+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
67624+}
67625+
67626+int
67627+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
67628+{
67629+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
67630+}
67631+
67632+int gr_search_listen(struct socket *sock)
67633+{
67634+ struct sock *sk = sock->sk;
67635+ struct sockaddr_in addr;
67636+
67637+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67638+ addr.sin_port = inet_sk(sk)->inet_sport;
67639+
67640+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67641+}
67642+
67643+int gr_search_accept(struct socket *sock)
67644+{
67645+ struct sock *sk = sock->sk;
67646+ struct sockaddr_in addr;
67647+
67648+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67649+ addr.sin_port = inet_sk(sk)->inet_sport;
67650+
67651+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67652+}
67653+
67654+int
67655+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
67656+{
67657+ if (addr)
67658+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
67659+ else {
67660+ struct sockaddr_in sin;
67661+ const struct inet_sock *inet = inet_sk(sk);
67662+
67663+ sin.sin_addr.s_addr = inet->inet_daddr;
67664+ sin.sin_port = inet->inet_dport;
67665+
67666+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67667+ }
67668+}
67669+
67670+int
67671+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
67672+{
67673+ struct sockaddr_in sin;
67674+
67675+ if (unlikely(skb->len < sizeof (struct udphdr)))
67676+ return 0; // skip this packet
67677+
67678+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
67679+ sin.sin_port = udp_hdr(skb)->source;
67680+
67681+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67682+}
67683diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
67684new file mode 100644
67685index 0000000..25f54ef
67686--- /dev/null
67687+++ b/grsecurity/gracl_learn.c
67688@@ -0,0 +1,207 @@
67689+#include <linux/kernel.h>
67690+#include <linux/mm.h>
67691+#include <linux/sched.h>
67692+#include <linux/poll.h>
67693+#include <linux/string.h>
67694+#include <linux/file.h>
67695+#include <linux/types.h>
67696+#include <linux/vmalloc.h>
67697+#include <linux/grinternal.h>
67698+
67699+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
67700+ size_t count, loff_t *ppos);
67701+extern int gr_acl_is_enabled(void);
67702+
67703+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
67704+static int gr_learn_attached;
67705+
67706+/* use a 512k buffer */
67707+#define LEARN_BUFFER_SIZE (512 * 1024)
67708+
67709+static DEFINE_SPINLOCK(gr_learn_lock);
67710+static DEFINE_MUTEX(gr_learn_user_mutex);
67711+
67712+/* we need to maintain two buffers, so that the kernel context of grlearn
67713+ uses a semaphore around the userspace copying, and the other kernel contexts
67714+ use a spinlock when copying into the buffer, since they cannot sleep
67715+*/
67716+static char *learn_buffer;
67717+static char *learn_buffer_user;
67718+static int learn_buffer_len;
67719+static int learn_buffer_user_len;
67720+
67721+static ssize_t
67722+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
67723+{
67724+ DECLARE_WAITQUEUE(wait, current);
67725+ ssize_t retval = 0;
67726+
67727+ add_wait_queue(&learn_wait, &wait);
67728+ set_current_state(TASK_INTERRUPTIBLE);
67729+ do {
67730+ mutex_lock(&gr_learn_user_mutex);
67731+ spin_lock(&gr_learn_lock);
67732+ if (learn_buffer_len)
67733+ break;
67734+ spin_unlock(&gr_learn_lock);
67735+ mutex_unlock(&gr_learn_user_mutex);
67736+ if (file->f_flags & O_NONBLOCK) {
67737+ retval = -EAGAIN;
67738+ goto out;
67739+ }
67740+ if (signal_pending(current)) {
67741+ retval = -ERESTARTSYS;
67742+ goto out;
67743+ }
67744+
67745+ schedule();
67746+ } while (1);
67747+
67748+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
67749+ learn_buffer_user_len = learn_buffer_len;
67750+ retval = learn_buffer_len;
67751+ learn_buffer_len = 0;
67752+
67753+ spin_unlock(&gr_learn_lock);
67754+
67755+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
67756+ retval = -EFAULT;
67757+
67758+ mutex_unlock(&gr_learn_user_mutex);
67759+out:
67760+ set_current_state(TASK_RUNNING);
67761+ remove_wait_queue(&learn_wait, &wait);
67762+ return retval;
67763+}
67764+
67765+static unsigned int
67766+poll_learn(struct file * file, poll_table * wait)
67767+{
67768+ poll_wait(file, &learn_wait, wait);
67769+
67770+ if (learn_buffer_len)
67771+ return (POLLIN | POLLRDNORM);
67772+
67773+ return 0;
67774+}
67775+
67776+void
67777+gr_clear_learn_entries(void)
67778+{
67779+ char *tmp;
67780+
67781+ mutex_lock(&gr_learn_user_mutex);
67782+ spin_lock(&gr_learn_lock);
67783+ tmp = learn_buffer;
67784+ learn_buffer = NULL;
67785+ spin_unlock(&gr_learn_lock);
67786+ if (tmp)
67787+ vfree(tmp);
67788+ if (learn_buffer_user != NULL) {
67789+ vfree(learn_buffer_user);
67790+ learn_buffer_user = NULL;
67791+ }
67792+ learn_buffer_len = 0;
67793+ mutex_unlock(&gr_learn_user_mutex);
67794+
67795+ return;
67796+}
67797+
67798+void
67799+gr_add_learn_entry(const char *fmt, ...)
67800+{
67801+ va_list args;
67802+ unsigned int len;
67803+
67804+ if (!gr_learn_attached)
67805+ return;
67806+
67807+ spin_lock(&gr_learn_lock);
67808+
67809+ /* leave a gap at the end so we know when it's "full" but don't have to
67810+ compute the exact length of the string we're trying to append
67811+ */
67812+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
67813+ spin_unlock(&gr_learn_lock);
67814+ wake_up_interruptible(&learn_wait);
67815+ return;
67816+ }
67817+ if (learn_buffer == NULL) {
67818+ spin_unlock(&gr_learn_lock);
67819+ return;
67820+ }
67821+
67822+ va_start(args, fmt);
67823+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
67824+ va_end(args);
67825+
67826+ learn_buffer_len += len + 1;
67827+
67828+ spin_unlock(&gr_learn_lock);
67829+ wake_up_interruptible(&learn_wait);
67830+
67831+ return;
67832+}
67833+
67834+static int
67835+open_learn(struct inode *inode, struct file *file)
67836+{
67837+ if (file->f_mode & FMODE_READ && gr_learn_attached)
67838+ return -EBUSY;
67839+ if (file->f_mode & FMODE_READ) {
67840+ int retval = 0;
67841+ mutex_lock(&gr_learn_user_mutex);
67842+ if (learn_buffer == NULL)
67843+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
67844+ if (learn_buffer_user == NULL)
67845+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
67846+ if (learn_buffer == NULL) {
67847+ retval = -ENOMEM;
67848+ goto out_error;
67849+ }
67850+ if (learn_buffer_user == NULL) {
67851+ retval = -ENOMEM;
67852+ goto out_error;
67853+ }
67854+ learn_buffer_len = 0;
67855+ learn_buffer_user_len = 0;
67856+ gr_learn_attached = 1;
67857+out_error:
67858+ mutex_unlock(&gr_learn_user_mutex);
67859+ return retval;
67860+ }
67861+ return 0;
67862+}
67863+
67864+static int
67865+close_learn(struct inode *inode, struct file *file)
67866+{
67867+ if (file->f_mode & FMODE_READ) {
67868+ char *tmp = NULL;
67869+ mutex_lock(&gr_learn_user_mutex);
67870+ spin_lock(&gr_learn_lock);
67871+ tmp = learn_buffer;
67872+ learn_buffer = NULL;
67873+ spin_unlock(&gr_learn_lock);
67874+ if (tmp)
67875+ vfree(tmp);
67876+ if (learn_buffer_user != NULL) {
67877+ vfree(learn_buffer_user);
67878+ learn_buffer_user = NULL;
67879+ }
67880+ learn_buffer_len = 0;
67881+ learn_buffer_user_len = 0;
67882+ gr_learn_attached = 0;
67883+ mutex_unlock(&gr_learn_user_mutex);
67884+ }
67885+
67886+ return 0;
67887+}
67888+
67889+const struct file_operations grsec_fops = {
67890+ .read = read_learn,
67891+ .write = write_grsec_handler,
67892+ .open = open_learn,
67893+ .release = close_learn,
67894+ .poll = poll_learn,
67895+};
67896diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
67897new file mode 100644
67898index 0000000..39645c9
67899--- /dev/null
67900+++ b/grsecurity/gracl_res.c
67901@@ -0,0 +1,68 @@
67902+#include <linux/kernel.h>
67903+#include <linux/sched.h>
67904+#include <linux/gracl.h>
67905+#include <linux/grinternal.h>
67906+
67907+static const char *restab_log[] = {
67908+ [RLIMIT_CPU] = "RLIMIT_CPU",
67909+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
67910+ [RLIMIT_DATA] = "RLIMIT_DATA",
67911+ [RLIMIT_STACK] = "RLIMIT_STACK",
67912+ [RLIMIT_CORE] = "RLIMIT_CORE",
67913+ [RLIMIT_RSS] = "RLIMIT_RSS",
67914+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
67915+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
67916+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
67917+ [RLIMIT_AS] = "RLIMIT_AS",
67918+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
67919+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
67920+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
67921+ [RLIMIT_NICE] = "RLIMIT_NICE",
67922+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
67923+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
67924+ [GR_CRASH_RES] = "RLIMIT_CRASH"
67925+};
67926+
67927+void
67928+gr_log_resource(const struct task_struct *task,
67929+ const int res, const unsigned long wanted, const int gt)
67930+{
67931+ const struct cred *cred;
67932+ unsigned long rlim;
67933+
67934+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
67935+ return;
67936+
67937+ // not yet supported resource
67938+ if (unlikely(!restab_log[res]))
67939+ return;
67940+
67941+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
67942+ rlim = task_rlimit_max(task, res);
67943+ else
67944+ rlim = task_rlimit(task, res);
67945+
67946+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
67947+ return;
67948+
67949+ rcu_read_lock();
67950+ cred = __task_cred(task);
67951+
67952+ if (res == RLIMIT_NPROC &&
67953+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
67954+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
67955+ goto out_rcu_unlock;
67956+ else if (res == RLIMIT_MEMLOCK &&
67957+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
67958+ goto out_rcu_unlock;
67959+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
67960+ goto out_rcu_unlock;
67961+ rcu_read_unlock();
67962+
67963+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
67964+
67965+ return;
67966+out_rcu_unlock:
67967+ rcu_read_unlock();
67968+ return;
67969+}
67970diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
67971new file mode 100644
67972index 0000000..3c38bfe
67973--- /dev/null
67974+++ b/grsecurity/gracl_segv.c
67975@@ -0,0 +1,305 @@
67976+#include <linux/kernel.h>
67977+#include <linux/mm.h>
67978+#include <asm/uaccess.h>
67979+#include <asm/errno.h>
67980+#include <asm/mman.h>
67981+#include <net/sock.h>
67982+#include <linux/file.h>
67983+#include <linux/fs.h>
67984+#include <linux/net.h>
67985+#include <linux/in.h>
67986+#include <linux/slab.h>
67987+#include <linux/types.h>
67988+#include <linux/sched.h>
67989+#include <linux/timer.h>
67990+#include <linux/gracl.h>
67991+#include <linux/grsecurity.h>
67992+#include <linux/grinternal.h>
67993+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67994+#include <linux/magic.h>
67995+#include <linux/pagemap.h>
67996+#include "../fs/btrfs/async-thread.h"
67997+#include "../fs/btrfs/ctree.h"
67998+#include "../fs/btrfs/btrfs_inode.h"
67999+#endif
68000+
68001+static struct crash_uid *uid_set;
68002+static unsigned short uid_used;
68003+static DEFINE_SPINLOCK(gr_uid_lock);
68004+extern rwlock_t gr_inode_lock;
68005+extern struct acl_subject_label *
68006+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
68007+ struct acl_role_label *role);
68008+
68009+static inline dev_t __get_dev(const struct dentry *dentry)
68010+{
68011+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
68012+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
68013+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
68014+ else
68015+#endif
68016+ return dentry->d_sb->s_dev;
68017+}
68018+
68019+int
68020+gr_init_uidset(void)
68021+{
68022+ uid_set =
68023+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
68024+ uid_used = 0;
68025+
68026+ return uid_set ? 1 : 0;
68027+}
68028+
68029+void
68030+gr_free_uidset(void)
68031+{
68032+ if (uid_set)
68033+ kfree(uid_set);
68034+
68035+ return;
68036+}
68037+
68038+int
68039+gr_find_uid(const uid_t uid)
68040+{
68041+ struct crash_uid *tmp = uid_set;
68042+ uid_t buid;
68043+ int low = 0, high = uid_used - 1, mid;
68044+
68045+ while (high >= low) {
68046+ mid = (low + high) >> 1;
68047+ buid = tmp[mid].uid;
68048+ if (buid == uid)
68049+ return mid;
68050+ if (buid > uid)
68051+ high = mid - 1;
68052+ if (buid < uid)
68053+ low = mid + 1;
68054+ }
68055+
68056+ return -1;
68057+}
68058+
68059+static __inline__ void
68060+gr_insertsort(void)
68061+{
68062+ unsigned short i, j;
68063+ struct crash_uid index;
68064+
68065+ for (i = 1; i < uid_used; i++) {
68066+ index = uid_set[i];
68067+ j = i;
68068+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
68069+ uid_set[j] = uid_set[j - 1];
68070+ j--;
68071+ }
68072+ uid_set[j] = index;
68073+ }
68074+
68075+ return;
68076+}
68077+
68078+static __inline__ void
68079+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
68080+{
68081+ int loc;
68082+ uid_t uid = GR_GLOBAL_UID(kuid);
68083+
68084+ if (uid_used == GR_UIDTABLE_MAX)
68085+ return;
68086+
68087+ loc = gr_find_uid(uid);
68088+
68089+ if (loc >= 0) {
68090+ uid_set[loc].expires = expires;
68091+ return;
68092+ }
68093+
68094+ uid_set[uid_used].uid = uid;
68095+ uid_set[uid_used].expires = expires;
68096+ uid_used++;
68097+
68098+ gr_insertsort();
68099+
68100+ return;
68101+}
68102+
68103+void
68104+gr_remove_uid(const unsigned short loc)
68105+{
68106+ unsigned short i;
68107+
68108+ for (i = loc + 1; i < uid_used; i++)
68109+ uid_set[i - 1] = uid_set[i];
68110+
68111+ uid_used--;
68112+
68113+ return;
68114+}
68115+
68116+int
68117+gr_check_crash_uid(const kuid_t kuid)
68118+{
68119+ int loc;
68120+ int ret = 0;
68121+ uid_t uid;
68122+
68123+ if (unlikely(!gr_acl_is_enabled()))
68124+ return 0;
68125+
68126+ uid = GR_GLOBAL_UID(kuid);
68127+
68128+ spin_lock(&gr_uid_lock);
68129+ loc = gr_find_uid(uid);
68130+
68131+ if (loc < 0)
68132+ goto out_unlock;
68133+
68134+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
68135+ gr_remove_uid(loc);
68136+ else
68137+ ret = 1;
68138+
68139+out_unlock:
68140+ spin_unlock(&gr_uid_lock);
68141+ return ret;
68142+}
68143+
68144+static __inline__ int
68145+proc_is_setxid(const struct cred *cred)
68146+{
68147+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
68148+ !uid_eq(cred->uid, cred->fsuid))
68149+ return 1;
68150+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
68151+ !gid_eq(cred->gid, cred->fsgid))
68152+ return 1;
68153+
68154+ return 0;
68155+}
68156+
68157+extern int gr_fake_force_sig(int sig, struct task_struct *t);
68158+
68159+void
68160+gr_handle_crash(struct task_struct *task, const int sig)
68161+{
68162+ struct acl_subject_label *curr;
68163+ struct task_struct *tsk, *tsk2;
68164+ const struct cred *cred;
68165+ const struct cred *cred2;
68166+
68167+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
68168+ return;
68169+
68170+ if (unlikely(!gr_acl_is_enabled()))
68171+ return;
68172+
68173+ curr = task->acl;
68174+
68175+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
68176+ return;
68177+
68178+ if (time_before_eq(curr->expires, get_seconds())) {
68179+ curr->expires = 0;
68180+ curr->crashes = 0;
68181+ }
68182+
68183+ curr->crashes++;
68184+
68185+ if (!curr->expires)
68186+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
68187+
68188+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
68189+ time_after(curr->expires, get_seconds())) {
68190+ rcu_read_lock();
68191+ cred = __task_cred(task);
68192+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
68193+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
68194+ spin_lock(&gr_uid_lock);
68195+ gr_insert_uid(cred->uid, curr->expires);
68196+ spin_unlock(&gr_uid_lock);
68197+ curr->expires = 0;
68198+ curr->crashes = 0;
68199+ read_lock(&tasklist_lock);
68200+ do_each_thread(tsk2, tsk) {
68201+ cred2 = __task_cred(tsk);
68202+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
68203+ gr_fake_force_sig(SIGKILL, tsk);
68204+ } while_each_thread(tsk2, tsk);
68205+ read_unlock(&tasklist_lock);
68206+ } else {
68207+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
68208+ read_lock(&tasklist_lock);
68209+ read_lock(&grsec_exec_file_lock);
68210+ do_each_thread(tsk2, tsk) {
68211+ if (likely(tsk != task)) {
68212+ // if this thread has the same subject as the one that triggered
68213+ // RES_CRASH and it's the same binary, kill it
68214+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
68215+ gr_fake_force_sig(SIGKILL, tsk);
68216+ }
68217+ } while_each_thread(tsk2, tsk);
68218+ read_unlock(&grsec_exec_file_lock);
68219+ read_unlock(&tasklist_lock);
68220+ }
68221+ rcu_read_unlock();
68222+ }
68223+
68224+ return;
68225+}
68226+
68227+int
68228+gr_check_crash_exec(const struct file *filp)
68229+{
68230+ struct acl_subject_label *curr;
68231+
68232+ if (unlikely(!gr_acl_is_enabled()))
68233+ return 0;
68234+
68235+ read_lock(&gr_inode_lock);
68236+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
68237+ __get_dev(filp->f_path.dentry),
68238+ current->role);
68239+ read_unlock(&gr_inode_lock);
68240+
68241+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
68242+ (!curr->crashes && !curr->expires))
68243+ return 0;
68244+
68245+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
68246+ time_after(curr->expires, get_seconds()))
68247+ return 1;
68248+ else if (time_before_eq(curr->expires, get_seconds())) {
68249+ curr->crashes = 0;
68250+ curr->expires = 0;
68251+ }
68252+
68253+ return 0;
68254+}
68255+
68256+void
68257+gr_handle_alertkill(struct task_struct *task)
68258+{
68259+ struct acl_subject_label *curracl;
68260+ __u32 curr_ip;
68261+ struct task_struct *p, *p2;
68262+
68263+ if (unlikely(!gr_acl_is_enabled()))
68264+ return;
68265+
68266+ curracl = task->acl;
68267+ curr_ip = task->signal->curr_ip;
68268+
68269+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
68270+ read_lock(&tasklist_lock);
68271+ do_each_thread(p2, p) {
68272+ if (p->signal->curr_ip == curr_ip)
68273+ gr_fake_force_sig(SIGKILL, p);
68274+ } while_each_thread(p2, p);
68275+ read_unlock(&tasklist_lock);
68276+ } else if (curracl->mode & GR_KILLPROC)
68277+ gr_fake_force_sig(SIGKILL, task);
68278+
68279+ return;
68280+}
68281diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
68282new file mode 100644
68283index 0000000..98011b0
68284--- /dev/null
68285+++ b/grsecurity/gracl_shm.c
68286@@ -0,0 +1,40 @@
68287+#include <linux/kernel.h>
68288+#include <linux/mm.h>
68289+#include <linux/sched.h>
68290+#include <linux/file.h>
68291+#include <linux/ipc.h>
68292+#include <linux/gracl.h>
68293+#include <linux/grsecurity.h>
68294+#include <linux/grinternal.h>
68295+
68296+int
68297+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68298+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
68299+{
68300+ struct task_struct *task;
68301+
68302+ if (!gr_acl_is_enabled())
68303+ return 1;
68304+
68305+ rcu_read_lock();
68306+ read_lock(&tasklist_lock);
68307+
68308+ task = find_task_by_vpid(shm_cprid);
68309+
68310+ if (unlikely(!task))
68311+ task = find_task_by_vpid(shm_lapid);
68312+
68313+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
68314+ (task_pid_nr(task) == shm_lapid)) &&
68315+ (task->acl->mode & GR_PROTSHM) &&
68316+ (task->acl != current->acl))) {
68317+ read_unlock(&tasklist_lock);
68318+ rcu_read_unlock();
68319+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
68320+ return 0;
68321+ }
68322+ read_unlock(&tasklist_lock);
68323+ rcu_read_unlock();
68324+
68325+ return 1;
68326+}
68327diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
68328new file mode 100644
68329index 0000000..bc0be01
68330--- /dev/null
68331+++ b/grsecurity/grsec_chdir.c
68332@@ -0,0 +1,19 @@
68333+#include <linux/kernel.h>
68334+#include <linux/sched.h>
68335+#include <linux/fs.h>
68336+#include <linux/file.h>
68337+#include <linux/grsecurity.h>
68338+#include <linux/grinternal.h>
68339+
68340+void
68341+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
68342+{
68343+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
68344+ if ((grsec_enable_chdir && grsec_enable_group &&
68345+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
68346+ !grsec_enable_group)) {
68347+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
68348+ }
68349+#endif
68350+ return;
68351+}
68352diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
68353new file mode 100644
68354index 0000000..bd6e105
68355--- /dev/null
68356+++ b/grsecurity/grsec_chroot.c
68357@@ -0,0 +1,370 @@
68358+#include <linux/kernel.h>
68359+#include <linux/module.h>
68360+#include <linux/sched.h>
68361+#include <linux/file.h>
68362+#include <linux/fs.h>
68363+#include <linux/mount.h>
68364+#include <linux/types.h>
68365+#include "../fs/mount.h"
68366+#include <linux/grsecurity.h>
68367+#include <linux/grinternal.h>
68368+
68369+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68370+static int gr_init_ran;
68371+#endif
68372+
68373+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
68374+{
68375+#ifdef CONFIG_GRKERNSEC
68376+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
68377+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
68378+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68379+ && gr_init_ran
68380+#endif
68381+ )
68382+ task->gr_is_chrooted = 1;
68383+ else {
68384+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68385+ if (task_pid_nr(task) == 1 && !gr_init_ran)
68386+ gr_init_ran = 1;
68387+#endif
68388+ task->gr_is_chrooted = 0;
68389+ }
68390+
68391+ task->gr_chroot_dentry = path->dentry;
68392+#endif
68393+ return;
68394+}
68395+
68396+void gr_clear_chroot_entries(struct task_struct *task)
68397+{
68398+#ifdef CONFIG_GRKERNSEC
68399+ task->gr_is_chrooted = 0;
68400+ task->gr_chroot_dentry = NULL;
68401+#endif
68402+ return;
68403+}
68404+
68405+int
68406+gr_handle_chroot_unix(const pid_t pid)
68407+{
68408+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
68409+ struct task_struct *p;
68410+
68411+ if (unlikely(!grsec_enable_chroot_unix))
68412+ return 1;
68413+
68414+ if (likely(!proc_is_chrooted(current)))
68415+ return 1;
68416+
68417+ rcu_read_lock();
68418+ read_lock(&tasklist_lock);
68419+ p = find_task_by_vpid_unrestricted(pid);
68420+ if (unlikely(p && !have_same_root(current, p))) {
68421+ read_unlock(&tasklist_lock);
68422+ rcu_read_unlock();
68423+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
68424+ return 0;
68425+ }
68426+ read_unlock(&tasklist_lock);
68427+ rcu_read_unlock();
68428+#endif
68429+ return 1;
68430+}
68431+
68432+int
68433+gr_handle_chroot_nice(void)
68434+{
68435+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68436+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
68437+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
68438+ return -EPERM;
68439+ }
68440+#endif
68441+ return 0;
68442+}
68443+
68444+int
68445+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
68446+{
68447+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68448+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
68449+ && proc_is_chrooted(current)) {
68450+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
68451+ return -EACCES;
68452+ }
68453+#endif
68454+ return 0;
68455+}
68456+
68457+int
68458+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
68459+{
68460+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68461+ struct task_struct *p;
68462+ int ret = 0;
68463+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
68464+ return ret;
68465+
68466+ read_lock(&tasklist_lock);
68467+ do_each_pid_task(pid, type, p) {
68468+ if (!have_same_root(current, p)) {
68469+ ret = 1;
68470+ goto out;
68471+ }
68472+ } while_each_pid_task(pid, type, p);
68473+out:
68474+ read_unlock(&tasklist_lock);
68475+ return ret;
68476+#endif
68477+ return 0;
68478+}
68479+
68480+int
68481+gr_pid_is_chrooted(struct task_struct *p)
68482+{
68483+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68484+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
68485+ return 0;
68486+
68487+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
68488+ !have_same_root(current, p)) {
68489+ return 1;
68490+ }
68491+#endif
68492+ return 0;
68493+}
68494+
68495+EXPORT_SYMBOL(gr_pid_is_chrooted);
68496+
68497+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
68498+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
68499+{
68500+ struct path path, currentroot;
68501+ int ret = 0;
68502+
68503+ path.dentry = (struct dentry *)u_dentry;
68504+ path.mnt = (struct vfsmount *)u_mnt;
68505+ get_fs_root(current->fs, &currentroot);
68506+ if (path_is_under(&path, &currentroot))
68507+ ret = 1;
68508+ path_put(&currentroot);
68509+
68510+ return ret;
68511+}
68512+#endif
68513+
68514+int
68515+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
68516+{
68517+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
68518+ if (!grsec_enable_chroot_fchdir)
68519+ return 1;
68520+
68521+ if (!proc_is_chrooted(current))
68522+ return 1;
68523+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
68524+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
68525+ return 0;
68526+ }
68527+#endif
68528+ return 1;
68529+}
68530+
68531+int
68532+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68533+ const time_t shm_createtime)
68534+{
68535+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
68536+ struct task_struct *p;
68537+ time_t starttime;
68538+
68539+ if (unlikely(!grsec_enable_chroot_shmat))
68540+ return 1;
68541+
68542+ if (likely(!proc_is_chrooted(current)))
68543+ return 1;
68544+
68545+ rcu_read_lock();
68546+ read_lock(&tasklist_lock);
68547+
68548+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
68549+ starttime = p->start_time.tv_sec;
68550+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
68551+ if (have_same_root(current, p)) {
68552+ goto allow;
68553+ } else {
68554+ read_unlock(&tasklist_lock);
68555+ rcu_read_unlock();
68556+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
68557+ return 0;
68558+ }
68559+ }
68560+ /* creator exited, pid reuse, fall through to next check */
68561+ }
68562+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
68563+ if (unlikely(!have_same_root(current, p))) {
68564+ read_unlock(&tasklist_lock);
68565+ rcu_read_unlock();
68566+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
68567+ return 0;
68568+ }
68569+ }
68570+
68571+allow:
68572+ read_unlock(&tasklist_lock);
68573+ rcu_read_unlock();
68574+#endif
68575+ return 1;
68576+}
68577+
68578+void
68579+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
68580+{
68581+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
68582+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
68583+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
68584+#endif
68585+ return;
68586+}
68587+
68588+int
68589+gr_handle_chroot_mknod(const struct dentry *dentry,
68590+ const struct vfsmount *mnt, const int mode)
68591+{
68592+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
68593+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
68594+ proc_is_chrooted(current)) {
68595+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
68596+ return -EPERM;
68597+ }
68598+#endif
68599+ return 0;
68600+}
68601+
68602+int
68603+gr_handle_chroot_mount(const struct dentry *dentry,
68604+ const struct vfsmount *mnt, const char *dev_name)
68605+{
68606+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
68607+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
68608+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
68609+ return -EPERM;
68610+ }
68611+#endif
68612+ return 0;
68613+}
68614+
68615+int
68616+gr_handle_chroot_pivot(void)
68617+{
68618+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
68619+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
68620+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
68621+ return -EPERM;
68622+ }
68623+#endif
68624+ return 0;
68625+}
68626+
68627+int
68628+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
68629+{
68630+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
68631+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
68632+ !gr_is_outside_chroot(dentry, mnt)) {
68633+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
68634+ return -EPERM;
68635+ }
68636+#endif
68637+ return 0;
68638+}
68639+
68640+extern const char *captab_log[];
68641+extern int captab_log_entries;
68642+
68643+int
68644+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
68645+{
68646+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68647+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
68648+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
68649+ if (cap_raised(chroot_caps, cap)) {
68650+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
68651+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
68652+ }
68653+ return 0;
68654+ }
68655+ }
68656+#endif
68657+ return 1;
68658+}
68659+
68660+int
68661+gr_chroot_is_capable(const int cap)
68662+{
68663+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68664+ return gr_task_chroot_is_capable(current, current_cred(), cap);
68665+#endif
68666+ return 1;
68667+}
68668+
68669+int
68670+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
68671+{
68672+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68673+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
68674+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
68675+ if (cap_raised(chroot_caps, cap)) {
68676+ return 0;
68677+ }
68678+ }
68679+#endif
68680+ return 1;
68681+}
68682+
68683+int
68684+gr_chroot_is_capable_nolog(const int cap)
68685+{
68686+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68687+ return gr_task_chroot_is_capable_nolog(current, cap);
68688+#endif
68689+ return 1;
68690+}
68691+
68692+int
68693+gr_handle_chroot_sysctl(const int op)
68694+{
68695+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
68696+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
68697+ proc_is_chrooted(current))
68698+ return -EACCES;
68699+#endif
68700+ return 0;
68701+}
68702+
68703+void
68704+gr_handle_chroot_chdir(const struct path *path)
68705+{
68706+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
68707+ if (grsec_enable_chroot_chdir)
68708+ set_fs_pwd(current->fs, path);
68709+#endif
68710+ return;
68711+}
68712+
68713+int
68714+gr_handle_chroot_chmod(const struct dentry *dentry,
68715+ const struct vfsmount *mnt, const int mode)
68716+{
68717+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
68718+ /* allow chmod +s on directories, but not files */
68719+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
68720+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
68721+ proc_is_chrooted(current)) {
68722+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
68723+ return -EPERM;
68724+ }
68725+#endif
68726+ return 0;
68727+}
68728diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
68729new file mode 100644
68730index 0000000..ce65ceb
68731--- /dev/null
68732+++ b/grsecurity/grsec_disabled.c
68733@@ -0,0 +1,434 @@
68734+#include <linux/kernel.h>
68735+#include <linux/module.h>
68736+#include <linux/sched.h>
68737+#include <linux/file.h>
68738+#include <linux/fs.h>
68739+#include <linux/kdev_t.h>
68740+#include <linux/net.h>
68741+#include <linux/in.h>
68742+#include <linux/ip.h>
68743+#include <linux/skbuff.h>
68744+#include <linux/sysctl.h>
68745+
68746+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68747+void
68748+pax_set_initial_flags(struct linux_binprm *bprm)
68749+{
68750+ return;
68751+}
68752+#endif
68753+
68754+#ifdef CONFIG_SYSCTL
68755+__u32
68756+gr_handle_sysctl(const struct ctl_table * table, const int op)
68757+{
68758+ return 0;
68759+}
68760+#endif
68761+
68762+#ifdef CONFIG_TASKSTATS
68763+int gr_is_taskstats_denied(int pid)
68764+{
68765+ return 0;
68766+}
68767+#endif
68768+
68769+int
68770+gr_acl_is_enabled(void)
68771+{
68772+ return 0;
68773+}
68774+
68775+void
68776+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
68777+{
68778+ return;
68779+}
68780+
68781+int
68782+gr_handle_rawio(const struct inode *inode)
68783+{
68784+ return 0;
68785+}
68786+
68787+void
68788+gr_acl_handle_psacct(struct task_struct *task, const long code)
68789+{
68790+ return;
68791+}
68792+
68793+int
68794+gr_handle_ptrace(struct task_struct *task, const long request)
68795+{
68796+ return 0;
68797+}
68798+
68799+int
68800+gr_handle_proc_ptrace(struct task_struct *task)
68801+{
68802+ return 0;
68803+}
68804+
68805+int
68806+gr_set_acls(const int type)
68807+{
68808+ return 0;
68809+}
68810+
68811+int
68812+gr_check_hidden_task(const struct task_struct *tsk)
68813+{
68814+ return 0;
68815+}
68816+
68817+int
68818+gr_check_protected_task(const struct task_struct *task)
68819+{
68820+ return 0;
68821+}
68822+
68823+int
68824+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
68825+{
68826+ return 0;
68827+}
68828+
68829+void
68830+gr_copy_label(struct task_struct *tsk)
68831+{
68832+ return;
68833+}
68834+
68835+void
68836+gr_set_pax_flags(struct task_struct *task)
68837+{
68838+ return;
68839+}
68840+
68841+int
68842+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
68843+ const int unsafe_share)
68844+{
68845+ return 0;
68846+}
68847+
68848+void
68849+gr_handle_delete(const ino_t ino, const dev_t dev)
68850+{
68851+ return;
68852+}
68853+
68854+void
68855+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
68856+{
68857+ return;
68858+}
68859+
68860+void
68861+gr_handle_crash(struct task_struct *task, const int sig)
68862+{
68863+ return;
68864+}
68865+
68866+int
68867+gr_check_crash_exec(const struct file *filp)
68868+{
68869+ return 0;
68870+}
68871+
68872+int
68873+gr_check_crash_uid(const kuid_t uid)
68874+{
68875+ return 0;
68876+}
68877+
68878+void
68879+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68880+ struct dentry *old_dentry,
68881+ struct dentry *new_dentry,
68882+ struct vfsmount *mnt, const __u8 replace)
68883+{
68884+ return;
68885+}
68886+
68887+int
68888+gr_search_socket(const int family, const int type, const int protocol)
68889+{
68890+ return 1;
68891+}
68892+
68893+int
68894+gr_search_connectbind(const int mode, const struct socket *sock,
68895+ const struct sockaddr_in *addr)
68896+{
68897+ return 0;
68898+}
68899+
68900+void
68901+gr_handle_alertkill(struct task_struct *task)
68902+{
68903+ return;
68904+}
68905+
68906+__u32
68907+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
68908+{
68909+ return 1;
68910+}
68911+
68912+__u32
68913+gr_acl_handle_hidden_file(const struct dentry * dentry,
68914+ const struct vfsmount * mnt)
68915+{
68916+ return 1;
68917+}
68918+
68919+__u32
68920+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
68921+ int acc_mode)
68922+{
68923+ return 1;
68924+}
68925+
68926+__u32
68927+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
68928+{
68929+ return 1;
68930+}
68931+
68932+__u32
68933+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
68934+{
68935+ return 1;
68936+}
68937+
68938+int
68939+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
68940+ unsigned int *vm_flags)
68941+{
68942+ return 1;
68943+}
68944+
68945+__u32
68946+gr_acl_handle_truncate(const struct dentry * dentry,
68947+ const struct vfsmount * mnt)
68948+{
68949+ return 1;
68950+}
68951+
68952+__u32
68953+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
68954+{
68955+ return 1;
68956+}
68957+
68958+__u32
68959+gr_acl_handle_access(const struct dentry * dentry,
68960+ const struct vfsmount * mnt, const int fmode)
68961+{
68962+ return 1;
68963+}
68964+
68965+__u32
68966+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
68967+ umode_t *mode)
68968+{
68969+ return 1;
68970+}
68971+
68972+__u32
68973+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
68974+{
68975+ return 1;
68976+}
68977+
68978+__u32
68979+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
68980+{
68981+ return 1;
68982+}
68983+
68984+void
68985+grsecurity_init(void)
68986+{
68987+ return;
68988+}
68989+
68990+umode_t gr_acl_umask(void)
68991+{
68992+ return 0;
68993+}
68994+
68995+__u32
68996+gr_acl_handle_mknod(const struct dentry * new_dentry,
68997+ const struct dentry * parent_dentry,
68998+ const struct vfsmount * parent_mnt,
68999+ const int mode)
69000+{
69001+ return 1;
69002+}
69003+
69004+__u32
69005+gr_acl_handle_mkdir(const struct dentry * new_dentry,
69006+ const struct dentry * parent_dentry,
69007+ const struct vfsmount * parent_mnt)
69008+{
69009+ return 1;
69010+}
69011+
69012+__u32
69013+gr_acl_handle_symlink(const struct dentry * new_dentry,
69014+ const struct dentry * parent_dentry,
69015+ const struct vfsmount * parent_mnt, const struct filename *from)
69016+{
69017+ return 1;
69018+}
69019+
69020+__u32
69021+gr_acl_handle_link(const struct dentry * new_dentry,
69022+ const struct dentry * parent_dentry,
69023+ const struct vfsmount * parent_mnt,
69024+ const struct dentry * old_dentry,
69025+ const struct vfsmount * old_mnt, const struct filename *to)
69026+{
69027+ return 1;
69028+}
69029+
69030+int
69031+gr_acl_handle_rename(const struct dentry *new_dentry,
69032+ const struct dentry *parent_dentry,
69033+ const struct vfsmount *parent_mnt,
69034+ const struct dentry *old_dentry,
69035+ const struct inode *old_parent_inode,
69036+ const struct vfsmount *old_mnt, const struct filename *newname)
69037+{
69038+ return 0;
69039+}
69040+
69041+int
69042+gr_acl_handle_filldir(const struct file *file, const char *name,
69043+ const int namelen, const ino_t ino)
69044+{
69045+ return 1;
69046+}
69047+
69048+int
69049+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
69050+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
69051+{
69052+ return 1;
69053+}
69054+
69055+int
69056+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
69057+{
69058+ return 0;
69059+}
69060+
69061+int
69062+gr_search_accept(const struct socket *sock)
69063+{
69064+ return 0;
69065+}
69066+
69067+int
69068+gr_search_listen(const struct socket *sock)
69069+{
69070+ return 0;
69071+}
69072+
69073+int
69074+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
69075+{
69076+ return 0;
69077+}
69078+
69079+__u32
69080+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
69081+{
69082+ return 1;
69083+}
69084+
69085+__u32
69086+gr_acl_handle_creat(const struct dentry * dentry,
69087+ const struct dentry * p_dentry,
69088+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
69089+ const int imode)
69090+{
69091+ return 1;
69092+}
69093+
69094+void
69095+gr_acl_handle_exit(void)
69096+{
69097+ return;
69098+}
69099+
69100+int
69101+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
69102+{
69103+ return 1;
69104+}
69105+
69106+void
69107+gr_set_role_label(const kuid_t uid, const kgid_t gid)
69108+{
69109+ return;
69110+}
69111+
69112+int
69113+gr_acl_handle_procpidmem(const struct task_struct *task)
69114+{
69115+ return 0;
69116+}
69117+
69118+int
69119+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
69120+{
69121+ return 0;
69122+}
69123+
69124+int
69125+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
69126+{
69127+ return 0;
69128+}
69129+
69130+void
69131+gr_set_kernel_label(struct task_struct *task)
69132+{
69133+ return;
69134+}
69135+
69136+int
69137+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
69138+{
69139+ return 0;
69140+}
69141+
69142+int
69143+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
69144+{
69145+ return 0;
69146+}
69147+
69148+int gr_acl_enable_at_secure(void)
69149+{
69150+ return 0;
69151+}
69152+
69153+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69154+{
69155+ return dentry->d_sb->s_dev;
69156+}
69157+
69158+void gr_put_exec_file(struct task_struct *task)
69159+{
69160+ return;
69161+}
69162+
69163+EXPORT_SYMBOL(gr_set_kernel_label);
69164+#ifdef CONFIG_SECURITY
69165+EXPORT_SYMBOL(gr_check_user_change);
69166+EXPORT_SYMBOL(gr_check_group_change);
69167+#endif
69168diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
69169new file mode 100644
69170index 0000000..387032b
69171--- /dev/null
69172+++ b/grsecurity/grsec_exec.c
69173@@ -0,0 +1,187 @@
69174+#include <linux/kernel.h>
69175+#include <linux/sched.h>
69176+#include <linux/file.h>
69177+#include <linux/binfmts.h>
69178+#include <linux/fs.h>
69179+#include <linux/types.h>
69180+#include <linux/grdefs.h>
69181+#include <linux/grsecurity.h>
69182+#include <linux/grinternal.h>
69183+#include <linux/capability.h>
69184+#include <linux/module.h>
69185+#include <linux/compat.h>
69186+
69187+#include <asm/uaccess.h>
69188+
69189+#ifdef CONFIG_GRKERNSEC_EXECLOG
69190+static char gr_exec_arg_buf[132];
69191+static DEFINE_MUTEX(gr_exec_arg_mutex);
69192+#endif
69193+
69194+struct user_arg_ptr {
69195+#ifdef CONFIG_COMPAT
69196+ bool is_compat;
69197+#endif
69198+ union {
69199+ const char __user *const __user *native;
69200+#ifdef CONFIG_COMPAT
69201+ const compat_uptr_t __user *compat;
69202+#endif
69203+ } ptr;
69204+};
69205+
69206+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
69207+
69208+void
69209+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
69210+{
69211+#ifdef CONFIG_GRKERNSEC_EXECLOG
69212+ char *grarg = gr_exec_arg_buf;
69213+ unsigned int i, x, execlen = 0;
69214+ char c;
69215+
69216+ if (!((grsec_enable_execlog && grsec_enable_group &&
69217+ in_group_p(grsec_audit_gid))
69218+ || (grsec_enable_execlog && !grsec_enable_group)))
69219+ return;
69220+
69221+ mutex_lock(&gr_exec_arg_mutex);
69222+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
69223+
69224+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
69225+ const char __user *p;
69226+ unsigned int len;
69227+
69228+ p = get_user_arg_ptr(argv, i);
69229+ if (IS_ERR(p))
69230+ goto log;
69231+
69232+ len = strnlen_user(p, 128 - execlen);
69233+ if (len > 128 - execlen)
69234+ len = 128 - execlen;
69235+ else if (len > 0)
69236+ len--;
69237+ if (copy_from_user(grarg + execlen, p, len))
69238+ goto log;
69239+
69240+ /* rewrite unprintable characters */
69241+ for (x = 0; x < len; x++) {
69242+ c = *(grarg + execlen + x);
69243+ if (c < 32 || c > 126)
69244+ *(grarg + execlen + x) = ' ';
69245+ }
69246+
69247+ execlen += len;
69248+ *(grarg + execlen) = ' ';
69249+ *(grarg + execlen + 1) = '\0';
69250+ execlen++;
69251+ }
69252+
69253+ log:
69254+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
69255+ bprm->file->f_path.mnt, grarg);
69256+ mutex_unlock(&gr_exec_arg_mutex);
69257+#endif
69258+ return;
69259+}
69260+
69261+#ifdef CONFIG_GRKERNSEC
69262+extern int gr_acl_is_capable(const int cap);
69263+extern int gr_acl_is_capable_nolog(const int cap);
69264+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69265+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
69266+extern int gr_chroot_is_capable(const int cap);
69267+extern int gr_chroot_is_capable_nolog(const int cap);
69268+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69269+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
69270+#endif
69271+
69272+const char *captab_log[] = {
69273+ "CAP_CHOWN",
69274+ "CAP_DAC_OVERRIDE",
69275+ "CAP_DAC_READ_SEARCH",
69276+ "CAP_FOWNER",
69277+ "CAP_FSETID",
69278+ "CAP_KILL",
69279+ "CAP_SETGID",
69280+ "CAP_SETUID",
69281+ "CAP_SETPCAP",
69282+ "CAP_LINUX_IMMUTABLE",
69283+ "CAP_NET_BIND_SERVICE",
69284+ "CAP_NET_BROADCAST",
69285+ "CAP_NET_ADMIN",
69286+ "CAP_NET_RAW",
69287+ "CAP_IPC_LOCK",
69288+ "CAP_IPC_OWNER",
69289+ "CAP_SYS_MODULE",
69290+ "CAP_SYS_RAWIO",
69291+ "CAP_SYS_CHROOT",
69292+ "CAP_SYS_PTRACE",
69293+ "CAP_SYS_PACCT",
69294+ "CAP_SYS_ADMIN",
69295+ "CAP_SYS_BOOT",
69296+ "CAP_SYS_NICE",
69297+ "CAP_SYS_RESOURCE",
69298+ "CAP_SYS_TIME",
69299+ "CAP_SYS_TTY_CONFIG",
69300+ "CAP_MKNOD",
69301+ "CAP_LEASE",
69302+ "CAP_AUDIT_WRITE",
69303+ "CAP_AUDIT_CONTROL",
69304+ "CAP_SETFCAP",
69305+ "CAP_MAC_OVERRIDE",
69306+ "CAP_MAC_ADMIN",
69307+ "CAP_SYSLOG",
69308+ "CAP_WAKE_ALARM"
69309+};
69310+
69311+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
69312+
69313+int gr_is_capable(const int cap)
69314+{
69315+#ifdef CONFIG_GRKERNSEC
69316+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
69317+ return 1;
69318+ return 0;
69319+#else
69320+ return 1;
69321+#endif
69322+}
69323+
69324+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
69325+{
69326+#ifdef CONFIG_GRKERNSEC
69327+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
69328+ return 1;
69329+ return 0;
69330+#else
69331+ return 1;
69332+#endif
69333+}
69334+
69335+int gr_is_capable_nolog(const int cap)
69336+{
69337+#ifdef CONFIG_GRKERNSEC
69338+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
69339+ return 1;
69340+ return 0;
69341+#else
69342+ return 1;
69343+#endif
69344+}
69345+
69346+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
69347+{
69348+#ifdef CONFIG_GRKERNSEC
69349+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
69350+ return 1;
69351+ return 0;
69352+#else
69353+ return 1;
69354+#endif
69355+}
69356+
69357+EXPORT_SYMBOL(gr_is_capable);
69358+EXPORT_SYMBOL(gr_is_capable_nolog);
69359+EXPORT_SYMBOL(gr_task_is_capable);
69360+EXPORT_SYMBOL(gr_task_is_capable_nolog);
69361diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
69362new file mode 100644
69363index 0000000..06cc6ea
69364--- /dev/null
69365+++ b/grsecurity/grsec_fifo.c
69366@@ -0,0 +1,24 @@
69367+#include <linux/kernel.h>
69368+#include <linux/sched.h>
69369+#include <linux/fs.h>
69370+#include <linux/file.h>
69371+#include <linux/grinternal.h>
69372+
69373+int
69374+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
69375+ const struct dentry *dir, const int flag, const int acc_mode)
69376+{
69377+#ifdef CONFIG_GRKERNSEC_FIFO
69378+ const struct cred *cred = current_cred();
69379+
69380+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
69381+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
69382+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
69383+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
69384+ if (!inode_permission(dentry->d_inode, acc_mode))
69385+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
69386+ return -EACCES;
69387+ }
69388+#endif
69389+ return 0;
69390+}
69391diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
69392new file mode 100644
69393index 0000000..8ca18bf
69394--- /dev/null
69395+++ b/grsecurity/grsec_fork.c
69396@@ -0,0 +1,23 @@
69397+#include <linux/kernel.h>
69398+#include <linux/sched.h>
69399+#include <linux/grsecurity.h>
69400+#include <linux/grinternal.h>
69401+#include <linux/errno.h>
69402+
69403+void
69404+gr_log_forkfail(const int retval)
69405+{
69406+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69407+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
69408+ switch (retval) {
69409+ case -EAGAIN:
69410+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
69411+ break;
69412+ case -ENOMEM:
69413+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
69414+ break;
69415+ }
69416+ }
69417+#endif
69418+ return;
69419+}
69420diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
69421new file mode 100644
69422index 0000000..836f38f
69423--- /dev/null
69424+++ b/grsecurity/grsec_init.c
69425@@ -0,0 +1,280 @@
69426+#include <linux/kernel.h>
69427+#include <linux/sched.h>
69428+#include <linux/mm.h>
69429+#include <linux/gracl.h>
69430+#include <linux/slab.h>
69431+#include <linux/vmalloc.h>
69432+#include <linux/percpu.h>
69433+#include <linux/module.h>
69434+
69435+int grsec_enable_ptrace_readexec;
69436+int grsec_enable_setxid;
69437+int grsec_enable_symlinkown;
69438+kgid_t grsec_symlinkown_gid;
69439+int grsec_enable_brute;
69440+int grsec_enable_link;
69441+int grsec_enable_dmesg;
69442+int grsec_enable_harden_ptrace;
69443+int grsec_enable_fifo;
69444+int grsec_enable_execlog;
69445+int grsec_enable_signal;
69446+int grsec_enable_forkfail;
69447+int grsec_enable_audit_ptrace;
69448+int grsec_enable_time;
69449+int grsec_enable_group;
69450+kgid_t grsec_audit_gid;
69451+int grsec_enable_chdir;
69452+int grsec_enable_mount;
69453+int grsec_enable_rofs;
69454+int grsec_deny_new_usb;
69455+int grsec_enable_chroot_findtask;
69456+int grsec_enable_chroot_mount;
69457+int grsec_enable_chroot_shmat;
69458+int grsec_enable_chroot_fchdir;
69459+int grsec_enable_chroot_double;
69460+int grsec_enable_chroot_pivot;
69461+int grsec_enable_chroot_chdir;
69462+int grsec_enable_chroot_chmod;
69463+int grsec_enable_chroot_mknod;
69464+int grsec_enable_chroot_nice;
69465+int grsec_enable_chroot_execlog;
69466+int grsec_enable_chroot_caps;
69467+int grsec_enable_chroot_sysctl;
69468+int grsec_enable_chroot_unix;
69469+int grsec_enable_tpe;
69470+kgid_t grsec_tpe_gid;
69471+int grsec_enable_blackhole;
69472+#ifdef CONFIG_IPV6_MODULE
69473+EXPORT_SYMBOL(grsec_enable_blackhole);
69474+#endif
69475+int grsec_lastack_retries;
69476+int grsec_enable_tpe_all;
69477+int grsec_enable_tpe_invert;
69478+int grsec_enable_socket_all;
69479+kgid_t grsec_socket_all_gid;
69480+int grsec_enable_socket_client;
69481+kgid_t grsec_socket_client_gid;
69482+int grsec_enable_socket_server;
69483+kgid_t grsec_socket_server_gid;
69484+int grsec_resource_logging;
69485+int grsec_disable_privio;
69486+int grsec_enable_log_rwxmaps;
69487+int grsec_lock;
69488+
69489+DEFINE_SPINLOCK(grsec_alert_lock);
69490+unsigned long grsec_alert_wtime = 0;
69491+unsigned long grsec_alert_fyet = 0;
69492+
69493+DEFINE_SPINLOCK(grsec_audit_lock);
69494+
69495+DEFINE_RWLOCK(grsec_exec_file_lock);
69496+
69497+char *gr_shared_page[4];
69498+
69499+char *gr_alert_log_fmt;
69500+char *gr_audit_log_fmt;
69501+char *gr_alert_log_buf;
69502+char *gr_audit_log_buf;
69503+
69504+extern struct gr_arg *gr_usermode;
69505+extern unsigned char *gr_system_salt;
69506+extern unsigned char *gr_system_sum;
69507+
69508+void __init
69509+grsecurity_init(void)
69510+{
69511+ int j;
69512+ /* create the per-cpu shared pages */
69513+
69514+#ifdef CONFIG_X86
69515+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
69516+#endif
69517+
69518+ for (j = 0; j < 4; j++) {
69519+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
69520+ if (gr_shared_page[j] == NULL) {
69521+ panic("Unable to allocate grsecurity shared page");
69522+ return;
69523+ }
69524+ }
69525+
69526+ /* allocate log buffers */
69527+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
69528+ if (!gr_alert_log_fmt) {
69529+ panic("Unable to allocate grsecurity alert log format buffer");
69530+ return;
69531+ }
69532+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
69533+ if (!gr_audit_log_fmt) {
69534+ panic("Unable to allocate grsecurity audit log format buffer");
69535+ return;
69536+ }
69537+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
69538+ if (!gr_alert_log_buf) {
69539+ panic("Unable to allocate grsecurity alert log buffer");
69540+ return;
69541+ }
69542+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
69543+ if (!gr_audit_log_buf) {
69544+ panic("Unable to allocate grsecurity audit log buffer");
69545+ return;
69546+ }
69547+
69548+ /* allocate memory for authentication structure */
69549+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
69550+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
69551+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
69552+
69553+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
69554+ panic("Unable to allocate grsecurity authentication structure");
69555+ return;
69556+ }
69557+
69558+
69559+#ifdef CONFIG_GRKERNSEC_IO
69560+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
69561+ grsec_disable_privio = 1;
69562+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
69563+ grsec_disable_privio = 1;
69564+#else
69565+ grsec_disable_privio = 0;
69566+#endif
69567+#endif
69568+
69569+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69570+ /* for backward compatibility, tpe_invert always defaults to on if
69571+ enabled in the kernel
69572+ */
69573+ grsec_enable_tpe_invert = 1;
69574+#endif
69575+
69576+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
69577+#ifndef CONFIG_GRKERNSEC_SYSCTL
69578+ grsec_lock = 1;
69579+#endif
69580+
69581+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
69582+ grsec_enable_log_rwxmaps = 1;
69583+#endif
69584+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
69585+ grsec_enable_group = 1;
69586+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
69587+#endif
69588+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
69589+ grsec_enable_ptrace_readexec = 1;
69590+#endif
69591+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
69592+ grsec_enable_chdir = 1;
69593+#endif
69594+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69595+ grsec_enable_harden_ptrace = 1;
69596+#endif
69597+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
69598+ grsec_enable_mount = 1;
69599+#endif
69600+#ifdef CONFIG_GRKERNSEC_LINK
69601+ grsec_enable_link = 1;
69602+#endif
69603+#ifdef CONFIG_GRKERNSEC_BRUTE
69604+ grsec_enable_brute = 1;
69605+#endif
69606+#ifdef CONFIG_GRKERNSEC_DMESG
69607+ grsec_enable_dmesg = 1;
69608+#endif
69609+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69610+ grsec_enable_blackhole = 1;
69611+ grsec_lastack_retries = 4;
69612+#endif
69613+#ifdef CONFIG_GRKERNSEC_FIFO
69614+ grsec_enable_fifo = 1;
69615+#endif
69616+#ifdef CONFIG_GRKERNSEC_EXECLOG
69617+ grsec_enable_execlog = 1;
69618+#endif
69619+#ifdef CONFIG_GRKERNSEC_SETXID
69620+ grsec_enable_setxid = 1;
69621+#endif
69622+#ifdef CONFIG_GRKERNSEC_SIGNAL
69623+ grsec_enable_signal = 1;
69624+#endif
69625+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69626+ grsec_enable_forkfail = 1;
69627+#endif
69628+#ifdef CONFIG_GRKERNSEC_TIME
69629+ grsec_enable_time = 1;
69630+#endif
69631+#ifdef CONFIG_GRKERNSEC_RESLOG
69632+ grsec_resource_logging = 1;
69633+#endif
69634+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69635+ grsec_enable_chroot_findtask = 1;
69636+#endif
69637+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69638+ grsec_enable_chroot_unix = 1;
69639+#endif
69640+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
69641+ grsec_enable_chroot_mount = 1;
69642+#endif
69643+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
69644+ grsec_enable_chroot_fchdir = 1;
69645+#endif
69646+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
69647+ grsec_enable_chroot_shmat = 1;
69648+#endif
69649+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
69650+ grsec_enable_audit_ptrace = 1;
69651+#endif
69652+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
69653+ grsec_enable_chroot_double = 1;
69654+#endif
69655+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
69656+ grsec_enable_chroot_pivot = 1;
69657+#endif
69658+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
69659+ grsec_enable_chroot_chdir = 1;
69660+#endif
69661+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
69662+ grsec_enable_chroot_chmod = 1;
69663+#endif
69664+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
69665+ grsec_enable_chroot_mknod = 1;
69666+#endif
69667+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
69668+ grsec_enable_chroot_nice = 1;
69669+#endif
69670+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
69671+ grsec_enable_chroot_execlog = 1;
69672+#endif
69673+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69674+ grsec_enable_chroot_caps = 1;
69675+#endif
69676+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
69677+ grsec_enable_chroot_sysctl = 1;
69678+#endif
69679+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69680+ grsec_enable_symlinkown = 1;
69681+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
69682+#endif
69683+#ifdef CONFIG_GRKERNSEC_TPE
69684+ grsec_enable_tpe = 1;
69685+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
69686+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69687+ grsec_enable_tpe_all = 1;
69688+#endif
69689+#endif
69690+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69691+ grsec_enable_socket_all = 1;
69692+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
69693+#endif
69694+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69695+ grsec_enable_socket_client = 1;
69696+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
69697+#endif
69698+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69699+ grsec_enable_socket_server = 1;
69700+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
69701+#endif
69702+#endif
69703+
69704+ return;
69705+}
69706diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
69707new file mode 100644
69708index 0000000..5e05e20
69709--- /dev/null
69710+++ b/grsecurity/grsec_link.c
69711@@ -0,0 +1,58 @@
69712+#include <linux/kernel.h>
69713+#include <linux/sched.h>
69714+#include <linux/fs.h>
69715+#include <linux/file.h>
69716+#include <linux/grinternal.h>
69717+
69718+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
69719+{
69720+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69721+ const struct inode *link_inode = link->dentry->d_inode;
69722+
69723+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
69724+ /* ignore root-owned links, e.g. /proc/self */
69725+ gr_is_global_nonroot(link_inode->i_uid) && target &&
69726+ !uid_eq(link_inode->i_uid, target->i_uid)) {
69727+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
69728+ return 1;
69729+ }
69730+#endif
69731+ return 0;
69732+}
69733+
69734+int
69735+gr_handle_follow_link(const struct inode *parent,
69736+ const struct inode *inode,
69737+ const struct dentry *dentry, const struct vfsmount *mnt)
69738+{
69739+#ifdef CONFIG_GRKERNSEC_LINK
69740+ const struct cred *cred = current_cred();
69741+
69742+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
69743+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
69744+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
69745+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
69746+ return -EACCES;
69747+ }
69748+#endif
69749+ return 0;
69750+}
69751+
69752+int
69753+gr_handle_hardlink(const struct dentry *dentry,
69754+ const struct vfsmount *mnt,
69755+ struct inode *inode, const int mode, const struct filename *to)
69756+{
69757+#ifdef CONFIG_GRKERNSEC_LINK
69758+ const struct cred *cred = current_cred();
69759+
69760+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
69761+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
69762+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
69763+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
69764+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
69765+ return -EPERM;
69766+ }
69767+#endif
69768+ return 0;
69769+}
69770diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
69771new file mode 100644
69772index 0000000..dbe0a6b
69773--- /dev/null
69774+++ b/grsecurity/grsec_log.c
69775@@ -0,0 +1,341 @@
69776+#include <linux/kernel.h>
69777+#include <linux/sched.h>
69778+#include <linux/file.h>
69779+#include <linux/tty.h>
69780+#include <linux/fs.h>
69781+#include <linux/mm.h>
69782+#include <linux/grinternal.h>
69783+
69784+#ifdef CONFIG_TREE_PREEMPT_RCU
69785+#define DISABLE_PREEMPT() preempt_disable()
69786+#define ENABLE_PREEMPT() preempt_enable()
69787+#else
69788+#define DISABLE_PREEMPT()
69789+#define ENABLE_PREEMPT()
69790+#endif
69791+
69792+#define BEGIN_LOCKS(x) \
69793+ DISABLE_PREEMPT(); \
69794+ rcu_read_lock(); \
69795+ read_lock(&tasklist_lock); \
69796+ read_lock(&grsec_exec_file_lock); \
69797+ if (x != GR_DO_AUDIT) \
69798+ spin_lock(&grsec_alert_lock); \
69799+ else \
69800+ spin_lock(&grsec_audit_lock)
69801+
69802+#define END_LOCKS(x) \
69803+ if (x != GR_DO_AUDIT) \
69804+ spin_unlock(&grsec_alert_lock); \
69805+ else \
69806+ spin_unlock(&grsec_audit_lock); \
69807+ read_unlock(&grsec_exec_file_lock); \
69808+ read_unlock(&tasklist_lock); \
69809+ rcu_read_unlock(); \
69810+ ENABLE_PREEMPT(); \
69811+ if (x == GR_DONT_AUDIT) \
69812+ gr_handle_alertkill(current)
69813+
69814+enum {
69815+ FLOODING,
69816+ NO_FLOODING
69817+};
69818+
69819+extern char *gr_alert_log_fmt;
69820+extern char *gr_audit_log_fmt;
69821+extern char *gr_alert_log_buf;
69822+extern char *gr_audit_log_buf;
69823+
69824+static int gr_log_start(int audit)
69825+{
69826+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
69827+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
69828+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69829+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
69830+ unsigned long curr_secs = get_seconds();
69831+
69832+ if (audit == GR_DO_AUDIT)
69833+ goto set_fmt;
69834+
69835+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
69836+ grsec_alert_wtime = curr_secs;
69837+ grsec_alert_fyet = 0;
69838+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
69839+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
69840+ grsec_alert_fyet++;
69841+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
69842+ grsec_alert_wtime = curr_secs;
69843+ grsec_alert_fyet++;
69844+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
69845+ return FLOODING;
69846+ }
69847+ else return FLOODING;
69848+
69849+set_fmt:
69850+#endif
69851+ memset(buf, 0, PAGE_SIZE);
69852+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
69853+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
69854+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
69855+ } else if (current->signal->curr_ip) {
69856+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
69857+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
69858+ } else if (gr_acl_is_enabled()) {
69859+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
69860+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
69861+ } else {
69862+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
69863+ strcpy(buf, fmt);
69864+ }
69865+
69866+ return NO_FLOODING;
69867+}
69868+
69869+static void gr_log_middle(int audit, const char *msg, va_list ap)
69870+ __attribute__ ((format (printf, 2, 0)));
69871+
69872+static void gr_log_middle(int audit, const char *msg, va_list ap)
69873+{
69874+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69875+ unsigned int len = strlen(buf);
69876+
69877+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
69878+
69879+ return;
69880+}
69881+
69882+static void gr_log_middle_varargs(int audit, const char *msg, ...)
69883+ __attribute__ ((format (printf, 2, 3)));
69884+
69885+static void gr_log_middle_varargs(int audit, const char *msg, ...)
69886+{
69887+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69888+ unsigned int len = strlen(buf);
69889+ va_list ap;
69890+
69891+ va_start(ap, msg);
69892+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
69893+ va_end(ap);
69894+
69895+ return;
69896+}
69897+
69898+static void gr_log_end(int audit, int append_default)
69899+{
69900+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69901+ if (append_default) {
69902+ struct task_struct *task = current;
69903+ struct task_struct *parent = task->real_parent;
69904+ const struct cred *cred = __task_cred(task);
69905+ const struct cred *pcred = __task_cred(parent);
69906+ unsigned int len = strlen(buf);
69907+
69908+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69909+ }
69910+
69911+ printk("%s\n", buf);
69912+
69913+ return;
69914+}
69915+
69916+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
69917+{
69918+ int logtype;
69919+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
69920+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
69921+ void *voidptr = NULL;
69922+ int num1 = 0, num2 = 0;
69923+ unsigned long ulong1 = 0, ulong2 = 0;
69924+ struct dentry *dentry = NULL;
69925+ struct vfsmount *mnt = NULL;
69926+ struct file *file = NULL;
69927+ struct task_struct *task = NULL;
69928+ struct vm_area_struct *vma = NULL;
69929+ const struct cred *cred, *pcred;
69930+ va_list ap;
69931+
69932+ BEGIN_LOCKS(audit);
69933+ logtype = gr_log_start(audit);
69934+ if (logtype == FLOODING) {
69935+ END_LOCKS(audit);
69936+ return;
69937+ }
69938+ va_start(ap, argtypes);
69939+ switch (argtypes) {
69940+ case GR_TTYSNIFF:
69941+ task = va_arg(ap, struct task_struct *);
69942+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
69943+ break;
69944+ case GR_SYSCTL_HIDDEN:
69945+ str1 = va_arg(ap, char *);
69946+ gr_log_middle_varargs(audit, msg, result, str1);
69947+ break;
69948+ case GR_RBAC:
69949+ dentry = va_arg(ap, struct dentry *);
69950+ mnt = va_arg(ap, struct vfsmount *);
69951+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
69952+ break;
69953+ case GR_RBAC_STR:
69954+ dentry = va_arg(ap, struct dentry *);
69955+ mnt = va_arg(ap, struct vfsmount *);
69956+ str1 = va_arg(ap, char *);
69957+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
69958+ break;
69959+ case GR_STR_RBAC:
69960+ str1 = va_arg(ap, char *);
69961+ dentry = va_arg(ap, struct dentry *);
69962+ mnt = va_arg(ap, struct vfsmount *);
69963+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
69964+ break;
69965+ case GR_RBAC_MODE2:
69966+ dentry = va_arg(ap, struct dentry *);
69967+ mnt = va_arg(ap, struct vfsmount *);
69968+ str1 = va_arg(ap, char *);
69969+ str2 = va_arg(ap, char *);
69970+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
69971+ break;
69972+ case GR_RBAC_MODE3:
69973+ dentry = va_arg(ap, struct dentry *);
69974+ mnt = va_arg(ap, struct vfsmount *);
69975+ str1 = va_arg(ap, char *);
69976+ str2 = va_arg(ap, char *);
69977+ str3 = va_arg(ap, char *);
69978+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
69979+ break;
69980+ case GR_FILENAME:
69981+ dentry = va_arg(ap, struct dentry *);
69982+ mnt = va_arg(ap, struct vfsmount *);
69983+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
69984+ break;
69985+ case GR_STR_FILENAME:
69986+ str1 = va_arg(ap, char *);
69987+ dentry = va_arg(ap, struct dentry *);
69988+ mnt = va_arg(ap, struct vfsmount *);
69989+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
69990+ break;
69991+ case GR_FILENAME_STR:
69992+ dentry = va_arg(ap, struct dentry *);
69993+ mnt = va_arg(ap, struct vfsmount *);
69994+ str1 = va_arg(ap, char *);
69995+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
69996+ break;
69997+ case GR_FILENAME_TWO_INT:
69998+ dentry = va_arg(ap, struct dentry *);
69999+ mnt = va_arg(ap, struct vfsmount *);
70000+ num1 = va_arg(ap, int);
70001+ num2 = va_arg(ap, int);
70002+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
70003+ break;
70004+ case GR_FILENAME_TWO_INT_STR:
70005+ dentry = va_arg(ap, struct dentry *);
70006+ mnt = va_arg(ap, struct vfsmount *);
70007+ num1 = va_arg(ap, int);
70008+ num2 = va_arg(ap, int);
70009+ str1 = va_arg(ap, char *);
70010+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
70011+ break;
70012+ case GR_TEXTREL:
70013+ file = va_arg(ap, struct file *);
70014+ ulong1 = va_arg(ap, unsigned long);
70015+ ulong2 = va_arg(ap, unsigned long);
70016+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
70017+ break;
70018+ case GR_PTRACE:
70019+ task = va_arg(ap, struct task_struct *);
70020+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
70021+ break;
70022+ case GR_RESOURCE:
70023+ task = va_arg(ap, struct task_struct *);
70024+ cred = __task_cred(task);
70025+ pcred = __task_cred(task->real_parent);
70026+ ulong1 = va_arg(ap, unsigned long);
70027+ str1 = va_arg(ap, char *);
70028+ ulong2 = va_arg(ap, unsigned long);
70029+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
70030+ break;
70031+ case GR_CAP:
70032+ task = va_arg(ap, struct task_struct *);
70033+ cred = __task_cred(task);
70034+ pcred = __task_cred(task->real_parent);
70035+ str1 = va_arg(ap, char *);
70036+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
70037+ break;
70038+ case GR_SIG:
70039+ str1 = va_arg(ap, char *);
70040+ voidptr = va_arg(ap, void *);
70041+ gr_log_middle_varargs(audit, msg, str1, voidptr);
70042+ break;
70043+ case GR_SIG2:
70044+ task = va_arg(ap, struct task_struct *);
70045+ cred = __task_cred(task);
70046+ pcred = __task_cred(task->real_parent);
70047+ num1 = va_arg(ap, int);
70048+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
70049+ break;
70050+ case GR_CRASH1:
70051+ task = va_arg(ap, struct task_struct *);
70052+ cred = __task_cred(task);
70053+ pcred = __task_cred(task->real_parent);
70054+ ulong1 = va_arg(ap, unsigned long);
70055+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
70056+ break;
70057+ case GR_CRASH2:
70058+ task = va_arg(ap, struct task_struct *);
70059+ cred = __task_cred(task);
70060+ pcred = __task_cred(task->real_parent);
70061+ ulong1 = va_arg(ap, unsigned long);
70062+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
70063+ break;
70064+ case GR_RWXMAP:
70065+ file = va_arg(ap, struct file *);
70066+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
70067+ break;
70068+ case GR_RWXMAPVMA:
70069+ vma = va_arg(ap, struct vm_area_struct *);
70070+ if (vma->vm_file)
70071+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
70072+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
70073+ str1 = "<stack>";
70074+ else if (vma->vm_start <= current->mm->brk &&
70075+ vma->vm_end >= current->mm->start_brk)
70076+ str1 = "<heap>";
70077+ else
70078+ str1 = "<anonymous mapping>";
70079+ gr_log_middle_varargs(audit, msg, str1);
70080+ break;
70081+ case GR_PSACCT:
70082+ {
70083+ unsigned int wday, cday;
70084+ __u8 whr, chr;
70085+ __u8 wmin, cmin;
70086+ __u8 wsec, csec;
70087+ char cur_tty[64] = { 0 };
70088+ char parent_tty[64] = { 0 };
70089+
70090+ task = va_arg(ap, struct task_struct *);
70091+ wday = va_arg(ap, unsigned int);
70092+ cday = va_arg(ap, unsigned int);
70093+ whr = va_arg(ap, int);
70094+ chr = va_arg(ap, int);
70095+ wmin = va_arg(ap, int);
70096+ cmin = va_arg(ap, int);
70097+ wsec = va_arg(ap, int);
70098+ csec = va_arg(ap, int);
70099+ ulong1 = va_arg(ap, unsigned long);
70100+ cred = __task_cred(task);
70101+ pcred = __task_cred(task->real_parent);
70102+
70103+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
70104+ }
70105+ break;
70106+ default:
70107+ gr_log_middle(audit, msg, ap);
70108+ }
70109+ va_end(ap);
70110+ // these don't need DEFAULTSECARGS printed on the end
70111+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
70112+ gr_log_end(audit, 0);
70113+ else
70114+ gr_log_end(audit, 1);
70115+ END_LOCKS(audit);
70116+}
70117diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
70118new file mode 100644
70119index 0000000..f536303
70120--- /dev/null
70121+++ b/grsecurity/grsec_mem.c
70122@@ -0,0 +1,40 @@
70123+#include <linux/kernel.h>
70124+#include <linux/sched.h>
70125+#include <linux/mm.h>
70126+#include <linux/mman.h>
70127+#include <linux/grinternal.h>
70128+
70129+void
70130+gr_handle_ioperm(void)
70131+{
70132+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
70133+ return;
70134+}
70135+
70136+void
70137+gr_handle_iopl(void)
70138+{
70139+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
70140+ return;
70141+}
70142+
70143+void
70144+gr_handle_mem_readwrite(u64 from, u64 to)
70145+{
70146+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
70147+ return;
70148+}
70149+
70150+void
70151+gr_handle_vm86(void)
70152+{
70153+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
70154+ return;
70155+}
70156+
70157+void
70158+gr_log_badprocpid(const char *entry)
70159+{
70160+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
70161+ return;
70162+}
70163diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
70164new file mode 100644
70165index 0000000..2131422
70166--- /dev/null
70167+++ b/grsecurity/grsec_mount.c
70168@@ -0,0 +1,62 @@
70169+#include <linux/kernel.h>
70170+#include <linux/sched.h>
70171+#include <linux/mount.h>
70172+#include <linux/grsecurity.h>
70173+#include <linux/grinternal.h>
70174+
70175+void
70176+gr_log_remount(const char *devname, const int retval)
70177+{
70178+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70179+ if (grsec_enable_mount && (retval >= 0))
70180+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
70181+#endif
70182+ return;
70183+}
70184+
70185+void
70186+gr_log_unmount(const char *devname, const int retval)
70187+{
70188+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70189+ if (grsec_enable_mount && (retval >= 0))
70190+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
70191+#endif
70192+ return;
70193+}
70194+
70195+void
70196+gr_log_mount(const char *from, const char *to, const int retval)
70197+{
70198+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70199+ if (grsec_enable_mount && (retval >= 0))
70200+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
70201+#endif
70202+ return;
70203+}
70204+
70205+int
70206+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
70207+{
70208+#ifdef CONFIG_GRKERNSEC_ROFS
70209+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
70210+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
70211+ return -EPERM;
70212+ } else
70213+ return 0;
70214+#endif
70215+ return 0;
70216+}
70217+
70218+int
70219+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
70220+{
70221+#ifdef CONFIG_GRKERNSEC_ROFS
70222+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
70223+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
70224+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
70225+ return -EPERM;
70226+ } else
70227+ return 0;
70228+#endif
70229+ return 0;
70230+}
70231diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
70232new file mode 100644
70233index 0000000..6ee9d50
70234--- /dev/null
70235+++ b/grsecurity/grsec_pax.c
70236@@ -0,0 +1,45 @@
70237+#include <linux/kernel.h>
70238+#include <linux/sched.h>
70239+#include <linux/mm.h>
70240+#include <linux/file.h>
70241+#include <linux/grinternal.h>
70242+#include <linux/grsecurity.h>
70243+
70244+void
70245+gr_log_textrel(struct vm_area_struct * vma)
70246+{
70247+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70248+ if (grsec_enable_log_rwxmaps)
70249+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
70250+#endif
70251+ return;
70252+}
70253+
70254+void gr_log_ptgnustack(struct file *file)
70255+{
70256+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70257+ if (grsec_enable_log_rwxmaps)
70258+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
70259+#endif
70260+ return;
70261+}
70262+
70263+void
70264+gr_log_rwxmmap(struct file *file)
70265+{
70266+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70267+ if (grsec_enable_log_rwxmaps)
70268+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
70269+#endif
70270+ return;
70271+}
70272+
70273+void
70274+gr_log_rwxmprotect(struct vm_area_struct *vma)
70275+{
70276+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70277+ if (grsec_enable_log_rwxmaps)
70278+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
70279+#endif
70280+ return;
70281+}
70282diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
70283new file mode 100644
70284index 0000000..f7f29aa
70285--- /dev/null
70286+++ b/grsecurity/grsec_ptrace.c
70287@@ -0,0 +1,30 @@
70288+#include <linux/kernel.h>
70289+#include <linux/sched.h>
70290+#include <linux/grinternal.h>
70291+#include <linux/security.h>
70292+
70293+void
70294+gr_audit_ptrace(struct task_struct *task)
70295+{
70296+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
70297+ if (grsec_enable_audit_ptrace)
70298+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
70299+#endif
70300+ return;
70301+}
70302+
70303+int
70304+gr_ptrace_readexec(struct file *file, int unsafe_flags)
70305+{
70306+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
70307+ const struct dentry *dentry = file->f_path.dentry;
70308+ const struct vfsmount *mnt = file->f_path.mnt;
70309+
70310+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
70311+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
70312+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
70313+ return -EACCES;
70314+ }
70315+#endif
70316+ return 0;
70317+}
70318diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
70319new file mode 100644
70320index 0000000..4e29cc7
70321--- /dev/null
70322+++ b/grsecurity/grsec_sig.c
70323@@ -0,0 +1,246 @@
70324+#include <linux/kernel.h>
70325+#include <linux/sched.h>
70326+#include <linux/fs.h>
70327+#include <linux/delay.h>
70328+#include <linux/grsecurity.h>
70329+#include <linux/grinternal.h>
70330+#include <linux/hardirq.h>
70331+
70332+char *signames[] = {
70333+ [SIGSEGV] = "Segmentation fault",
70334+ [SIGILL] = "Illegal instruction",
70335+ [SIGABRT] = "Abort",
70336+ [SIGBUS] = "Invalid alignment/Bus error"
70337+};
70338+
70339+void
70340+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
70341+{
70342+#ifdef CONFIG_GRKERNSEC_SIGNAL
70343+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
70344+ (sig == SIGABRT) || (sig == SIGBUS))) {
70345+ if (task_pid_nr(t) == task_pid_nr(current)) {
70346+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
70347+ } else {
70348+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
70349+ }
70350+ }
70351+#endif
70352+ return;
70353+}
70354+
70355+int
70356+gr_handle_signal(const struct task_struct *p, const int sig)
70357+{
70358+#ifdef CONFIG_GRKERNSEC
70359+ /* ignore the 0 signal for protected task checks */
70360+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
70361+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
70362+ return -EPERM;
70363+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
70364+ return -EPERM;
70365+ }
70366+#endif
70367+ return 0;
70368+}
70369+
70370+#ifdef CONFIG_GRKERNSEC
70371+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
70372+
70373+int gr_fake_force_sig(int sig, struct task_struct *t)
70374+{
70375+ unsigned long int flags;
70376+ int ret, blocked, ignored;
70377+ struct k_sigaction *action;
70378+
70379+ spin_lock_irqsave(&t->sighand->siglock, flags);
70380+ action = &t->sighand->action[sig-1];
70381+ ignored = action->sa.sa_handler == SIG_IGN;
70382+ blocked = sigismember(&t->blocked, sig);
70383+ if (blocked || ignored) {
70384+ action->sa.sa_handler = SIG_DFL;
70385+ if (blocked) {
70386+ sigdelset(&t->blocked, sig);
70387+ recalc_sigpending_and_wake(t);
70388+ }
70389+ }
70390+ if (action->sa.sa_handler == SIG_DFL)
70391+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
70392+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
70393+
70394+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
70395+
70396+ return ret;
70397+}
70398+#endif
70399+
70400+#ifdef CONFIG_GRKERNSEC_BRUTE
70401+#define GR_USER_BAN_TIME (15 * 60)
70402+#define GR_DAEMON_BRUTE_TIME (30 * 60)
70403+
70404+static int __get_dumpable(unsigned long mm_flags)
70405+{
70406+ int ret;
70407+
70408+ ret = mm_flags & MMF_DUMPABLE_MASK;
70409+ return (ret >= 2) ? 2 : ret;
70410+}
70411+#endif
70412+
70413+void gr_handle_brute_attach(unsigned long mm_flags)
70414+{
70415+#ifdef CONFIG_GRKERNSEC_BRUTE
70416+ struct task_struct *p = current;
70417+ kuid_t uid = GLOBAL_ROOT_UID;
70418+ int daemon = 0;
70419+
70420+ if (!grsec_enable_brute)
70421+ return;
70422+
70423+ rcu_read_lock();
70424+ read_lock(&tasklist_lock);
70425+ read_lock(&grsec_exec_file_lock);
70426+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
70427+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
70428+ p->real_parent->brute = 1;
70429+ daemon = 1;
70430+ } else {
70431+ const struct cred *cred = __task_cred(p), *cred2;
70432+ struct task_struct *tsk, *tsk2;
70433+
70434+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
70435+ struct user_struct *user;
70436+
70437+ uid = cred->uid;
70438+
70439+ /* this is put upon execution past expiration */
70440+ user = find_user(uid);
70441+ if (user == NULL)
70442+ goto unlock;
70443+ user->suid_banned = 1;
70444+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
70445+ if (user->suid_ban_expires == ~0UL)
70446+ user->suid_ban_expires--;
70447+
70448+ /* only kill other threads of the same binary, from the same user */
70449+ do_each_thread(tsk2, tsk) {
70450+ cred2 = __task_cred(tsk);
70451+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
70452+ gr_fake_force_sig(SIGKILL, tsk);
70453+ } while_each_thread(tsk2, tsk);
70454+ }
70455+ }
70456+unlock:
70457+ read_unlock(&grsec_exec_file_lock);
70458+ read_unlock(&tasklist_lock);
70459+ rcu_read_unlock();
70460+
70461+ if (gr_is_global_nonroot(uid))
70462+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
70463+ else if (daemon)
70464+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
70465+
70466+#endif
70467+ return;
70468+}
70469+
70470+void gr_handle_brute_check(void)
70471+{
70472+#ifdef CONFIG_GRKERNSEC_BRUTE
70473+ struct task_struct *p = current;
70474+
70475+ if (unlikely(p->brute)) {
70476+ if (!grsec_enable_brute)
70477+ p->brute = 0;
70478+ else if (time_before(get_seconds(), p->brute_expires))
70479+ msleep(30 * 1000);
70480+ }
70481+#endif
70482+ return;
70483+}
70484+
70485+void gr_handle_kernel_exploit(void)
70486+{
70487+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70488+ const struct cred *cred;
70489+ struct task_struct *tsk, *tsk2;
70490+ struct user_struct *user;
70491+ kuid_t uid;
70492+
70493+ if (in_irq() || in_serving_softirq() || in_nmi())
70494+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
70495+
70496+ uid = current_uid();
70497+
70498+ if (gr_is_global_root(uid))
70499+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
70500+ else {
70501+ /* kill all the processes of this user, hold a reference
70502+ to their creds struct, and prevent them from creating
70503+ another process until system reset
70504+ */
70505+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
70506+ GR_GLOBAL_UID(uid));
70507+ /* we intentionally leak this ref */
70508+ user = get_uid(current->cred->user);
70509+ if (user)
70510+ user->kernel_banned = 1;
70511+
70512+ /* kill all processes of this user */
70513+ read_lock(&tasklist_lock);
70514+ do_each_thread(tsk2, tsk) {
70515+ cred = __task_cred(tsk);
70516+ if (uid_eq(cred->uid, uid))
70517+ gr_fake_force_sig(SIGKILL, tsk);
70518+ } while_each_thread(tsk2, tsk);
70519+ read_unlock(&tasklist_lock);
70520+ }
70521+#endif
70522+}
70523+
70524+#ifdef CONFIG_GRKERNSEC_BRUTE
70525+static bool suid_ban_expired(struct user_struct *user)
70526+{
70527+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
70528+ user->suid_banned = 0;
70529+ user->suid_ban_expires = 0;
70530+ free_uid(user);
70531+ return true;
70532+ }
70533+
70534+ return false;
70535+}
70536+#endif
70537+
70538+int gr_process_kernel_exec_ban(void)
70539+{
70540+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70541+ if (unlikely(current->cred->user->kernel_banned))
70542+ return -EPERM;
70543+#endif
70544+ return 0;
70545+}
70546+
70547+int gr_process_kernel_setuid_ban(struct user_struct *user)
70548+{
70549+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70550+ if (unlikely(user->kernel_banned))
70551+ gr_fake_force_sig(SIGKILL, current);
70552+#endif
70553+ return 0;
70554+}
70555+
70556+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
70557+{
70558+#ifdef CONFIG_GRKERNSEC_BRUTE
70559+ struct user_struct *user = current->cred->user;
70560+ if (unlikely(user->suid_banned)) {
70561+ if (suid_ban_expired(user))
70562+ return 0;
70563+ /* disallow execution of suid binaries only */
70564+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
70565+ return -EPERM;
70566+ }
70567+#endif
70568+ return 0;
70569+}
70570diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
70571new file mode 100644
70572index 0000000..4030d57
70573--- /dev/null
70574+++ b/grsecurity/grsec_sock.c
70575@@ -0,0 +1,244 @@
70576+#include <linux/kernel.h>
70577+#include <linux/module.h>
70578+#include <linux/sched.h>
70579+#include <linux/file.h>
70580+#include <linux/net.h>
70581+#include <linux/in.h>
70582+#include <linux/ip.h>
70583+#include <net/sock.h>
70584+#include <net/inet_sock.h>
70585+#include <linux/grsecurity.h>
70586+#include <linux/grinternal.h>
70587+#include <linux/gracl.h>
70588+
70589+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
70590+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
70591+
70592+EXPORT_SYMBOL(gr_search_udp_recvmsg);
70593+EXPORT_SYMBOL(gr_search_udp_sendmsg);
70594+
70595+#ifdef CONFIG_UNIX_MODULE
70596+EXPORT_SYMBOL(gr_acl_handle_unix);
70597+EXPORT_SYMBOL(gr_acl_handle_mknod);
70598+EXPORT_SYMBOL(gr_handle_chroot_unix);
70599+EXPORT_SYMBOL(gr_handle_create);
70600+#endif
70601+
70602+#ifdef CONFIG_GRKERNSEC
70603+#define gr_conn_table_size 32749
70604+struct conn_table_entry {
70605+ struct conn_table_entry *next;
70606+ struct signal_struct *sig;
70607+};
70608+
70609+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
70610+DEFINE_SPINLOCK(gr_conn_table_lock);
70611+
70612+extern const char * gr_socktype_to_name(unsigned char type);
70613+extern const char * gr_proto_to_name(unsigned char proto);
70614+extern const char * gr_sockfamily_to_name(unsigned char family);
70615+
70616+static __inline__ int
70617+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
70618+{
70619+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
70620+}
70621+
70622+static __inline__ int
70623+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
70624+ __u16 sport, __u16 dport)
70625+{
70626+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
70627+ sig->gr_sport == sport && sig->gr_dport == dport))
70628+ return 1;
70629+ else
70630+ return 0;
70631+}
70632+
70633+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
70634+{
70635+ struct conn_table_entry **match;
70636+ unsigned int index;
70637+
70638+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
70639+ sig->gr_sport, sig->gr_dport,
70640+ gr_conn_table_size);
70641+
70642+ newent->sig = sig;
70643+
70644+ match = &gr_conn_table[index];
70645+ newent->next = *match;
70646+ *match = newent;
70647+
70648+ return;
70649+}
70650+
70651+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
70652+{
70653+ struct conn_table_entry *match, *last = NULL;
70654+ unsigned int index;
70655+
70656+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
70657+ sig->gr_sport, sig->gr_dport,
70658+ gr_conn_table_size);
70659+
70660+ match = gr_conn_table[index];
70661+ while (match && !conn_match(match->sig,
70662+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
70663+ sig->gr_dport)) {
70664+ last = match;
70665+ match = match->next;
70666+ }
70667+
70668+ if (match) {
70669+ if (last)
70670+ last->next = match->next;
70671+ else
70672+ gr_conn_table[index] = NULL;
70673+ kfree(match);
70674+ }
70675+
70676+ return;
70677+}
70678+
70679+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
70680+ __u16 sport, __u16 dport)
70681+{
70682+ struct conn_table_entry *match;
70683+ unsigned int index;
70684+
70685+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
70686+
70687+ match = gr_conn_table[index];
70688+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
70689+ match = match->next;
70690+
70691+ if (match)
70692+ return match->sig;
70693+ else
70694+ return NULL;
70695+}
70696+
70697+#endif
70698+
70699+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
70700+{
70701+#ifdef CONFIG_GRKERNSEC
70702+ struct signal_struct *sig = task->signal;
70703+ struct conn_table_entry *newent;
70704+
70705+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
70706+ if (newent == NULL)
70707+ return;
70708+ /* no bh lock needed since we are called with bh disabled */
70709+ spin_lock(&gr_conn_table_lock);
70710+ gr_del_task_from_ip_table_nolock(sig);
70711+ sig->gr_saddr = inet->inet_rcv_saddr;
70712+ sig->gr_daddr = inet->inet_daddr;
70713+ sig->gr_sport = inet->inet_sport;
70714+ sig->gr_dport = inet->inet_dport;
70715+ gr_add_to_task_ip_table_nolock(sig, newent);
70716+ spin_unlock(&gr_conn_table_lock);
70717+#endif
70718+ return;
70719+}
70720+
70721+void gr_del_task_from_ip_table(struct task_struct *task)
70722+{
70723+#ifdef CONFIG_GRKERNSEC
70724+ spin_lock_bh(&gr_conn_table_lock);
70725+ gr_del_task_from_ip_table_nolock(task->signal);
70726+ spin_unlock_bh(&gr_conn_table_lock);
70727+#endif
70728+ return;
70729+}
70730+
70731+void
70732+gr_attach_curr_ip(const struct sock *sk)
70733+{
70734+#ifdef CONFIG_GRKERNSEC
70735+ struct signal_struct *p, *set;
70736+ const struct inet_sock *inet = inet_sk(sk);
70737+
70738+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
70739+ return;
70740+
70741+ set = current->signal;
70742+
70743+ spin_lock_bh(&gr_conn_table_lock);
70744+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
70745+ inet->inet_dport, inet->inet_sport);
70746+ if (unlikely(p != NULL)) {
70747+ set->curr_ip = p->curr_ip;
70748+ set->used_accept = 1;
70749+ gr_del_task_from_ip_table_nolock(p);
70750+ spin_unlock_bh(&gr_conn_table_lock);
70751+ return;
70752+ }
70753+ spin_unlock_bh(&gr_conn_table_lock);
70754+
70755+ set->curr_ip = inet->inet_daddr;
70756+ set->used_accept = 1;
70757+#endif
70758+ return;
70759+}
70760+
70761+int
70762+gr_handle_sock_all(const int family, const int type, const int protocol)
70763+{
70764+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
70765+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
70766+ (family != AF_UNIX)) {
70767+ if (family == AF_INET)
70768+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
70769+ else
70770+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
70771+ return -EACCES;
70772+ }
70773+#endif
70774+ return 0;
70775+}
70776+
70777+int
70778+gr_handle_sock_server(const struct sockaddr *sck)
70779+{
70780+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
70781+ if (grsec_enable_socket_server &&
70782+ in_group_p(grsec_socket_server_gid) &&
70783+ sck && (sck->sa_family != AF_UNIX) &&
70784+ (sck->sa_family != AF_LOCAL)) {
70785+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
70786+ return -EACCES;
70787+ }
70788+#endif
70789+ return 0;
70790+}
70791+
70792+int
70793+gr_handle_sock_server_other(const struct sock *sck)
70794+{
70795+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
70796+ if (grsec_enable_socket_server &&
70797+ in_group_p(grsec_socket_server_gid) &&
70798+ sck && (sck->sk_family != AF_UNIX) &&
70799+ (sck->sk_family != AF_LOCAL)) {
70800+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
70801+ return -EACCES;
70802+ }
70803+#endif
70804+ return 0;
70805+}
70806+
70807+int
70808+gr_handle_sock_client(const struct sockaddr *sck)
70809+{
70810+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
70811+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
70812+ sck && (sck->sa_family != AF_UNIX) &&
70813+ (sck->sa_family != AF_LOCAL)) {
70814+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
70815+ return -EACCES;
70816+ }
70817+#endif
70818+ return 0;
70819+}
70820diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
70821new file mode 100644
70822index 0000000..a9e378f
70823--- /dev/null
70824+++ b/grsecurity/grsec_sysctl.c
70825@@ -0,0 +1,472 @@
70826+#include <linux/kernel.h>
70827+#include <linux/sched.h>
70828+#include <linux/sysctl.h>
70829+#include <linux/grsecurity.h>
70830+#include <linux/grinternal.h>
70831+
70832+int
70833+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
70834+{
70835+#ifdef CONFIG_GRKERNSEC_SYSCTL
70836+ if (dirname == NULL || name == NULL)
70837+ return 0;
70838+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
70839+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
70840+ return -EACCES;
70841+ }
70842+#endif
70843+ return 0;
70844+}
70845+
70846+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
70847+static int __maybe_unused __read_only one = 1;
70848+#endif
70849+
70850+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
70851+ defined(CONFIG_GRKERNSEC_DENYUSB)
70852+struct ctl_table grsecurity_table[] = {
70853+#ifdef CONFIG_GRKERNSEC_SYSCTL
70854+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
70855+#ifdef CONFIG_GRKERNSEC_IO
70856+ {
70857+ .procname = "disable_priv_io",
70858+ .data = &grsec_disable_privio,
70859+ .maxlen = sizeof(int),
70860+ .mode = 0600,
70861+ .proc_handler = &proc_dointvec,
70862+ },
70863+#endif
70864+#endif
70865+#ifdef CONFIG_GRKERNSEC_LINK
70866+ {
70867+ .procname = "linking_restrictions",
70868+ .data = &grsec_enable_link,
70869+ .maxlen = sizeof(int),
70870+ .mode = 0600,
70871+ .proc_handler = &proc_dointvec,
70872+ },
70873+#endif
70874+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
70875+ {
70876+ .procname = "enforce_symlinksifowner",
70877+ .data = &grsec_enable_symlinkown,
70878+ .maxlen = sizeof(int),
70879+ .mode = 0600,
70880+ .proc_handler = &proc_dointvec,
70881+ },
70882+ {
70883+ .procname = "symlinkown_gid",
70884+ .data = &grsec_symlinkown_gid,
70885+ .maxlen = sizeof(int),
70886+ .mode = 0600,
70887+ .proc_handler = &proc_dointvec,
70888+ },
70889+#endif
70890+#ifdef CONFIG_GRKERNSEC_BRUTE
70891+ {
70892+ .procname = "deter_bruteforce",
70893+ .data = &grsec_enable_brute,
70894+ .maxlen = sizeof(int),
70895+ .mode = 0600,
70896+ .proc_handler = &proc_dointvec,
70897+ },
70898+#endif
70899+#ifdef CONFIG_GRKERNSEC_FIFO
70900+ {
70901+ .procname = "fifo_restrictions",
70902+ .data = &grsec_enable_fifo,
70903+ .maxlen = sizeof(int),
70904+ .mode = 0600,
70905+ .proc_handler = &proc_dointvec,
70906+ },
70907+#endif
70908+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
70909+ {
70910+ .procname = "ptrace_readexec",
70911+ .data = &grsec_enable_ptrace_readexec,
70912+ .maxlen = sizeof(int),
70913+ .mode = 0600,
70914+ .proc_handler = &proc_dointvec,
70915+ },
70916+#endif
70917+#ifdef CONFIG_GRKERNSEC_SETXID
70918+ {
70919+ .procname = "consistent_setxid",
70920+ .data = &grsec_enable_setxid,
70921+ .maxlen = sizeof(int),
70922+ .mode = 0600,
70923+ .proc_handler = &proc_dointvec,
70924+ },
70925+#endif
70926+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70927+ {
70928+ .procname = "ip_blackhole",
70929+ .data = &grsec_enable_blackhole,
70930+ .maxlen = sizeof(int),
70931+ .mode = 0600,
70932+ .proc_handler = &proc_dointvec,
70933+ },
70934+ {
70935+ .procname = "lastack_retries",
70936+ .data = &grsec_lastack_retries,
70937+ .maxlen = sizeof(int),
70938+ .mode = 0600,
70939+ .proc_handler = &proc_dointvec,
70940+ },
70941+#endif
70942+#ifdef CONFIG_GRKERNSEC_EXECLOG
70943+ {
70944+ .procname = "exec_logging",
70945+ .data = &grsec_enable_execlog,
70946+ .maxlen = sizeof(int),
70947+ .mode = 0600,
70948+ .proc_handler = &proc_dointvec,
70949+ },
70950+#endif
70951+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70952+ {
70953+ .procname = "rwxmap_logging",
70954+ .data = &grsec_enable_log_rwxmaps,
70955+ .maxlen = sizeof(int),
70956+ .mode = 0600,
70957+ .proc_handler = &proc_dointvec,
70958+ },
70959+#endif
70960+#ifdef CONFIG_GRKERNSEC_SIGNAL
70961+ {
70962+ .procname = "signal_logging",
70963+ .data = &grsec_enable_signal,
70964+ .maxlen = sizeof(int),
70965+ .mode = 0600,
70966+ .proc_handler = &proc_dointvec,
70967+ },
70968+#endif
70969+#ifdef CONFIG_GRKERNSEC_FORKFAIL
70970+ {
70971+ .procname = "forkfail_logging",
70972+ .data = &grsec_enable_forkfail,
70973+ .maxlen = sizeof(int),
70974+ .mode = 0600,
70975+ .proc_handler = &proc_dointvec,
70976+ },
70977+#endif
70978+#ifdef CONFIG_GRKERNSEC_TIME
70979+ {
70980+ .procname = "timechange_logging",
70981+ .data = &grsec_enable_time,
70982+ .maxlen = sizeof(int),
70983+ .mode = 0600,
70984+ .proc_handler = &proc_dointvec,
70985+ },
70986+#endif
70987+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
70988+ {
70989+ .procname = "chroot_deny_shmat",
70990+ .data = &grsec_enable_chroot_shmat,
70991+ .maxlen = sizeof(int),
70992+ .mode = 0600,
70993+ .proc_handler = &proc_dointvec,
70994+ },
70995+#endif
70996+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70997+ {
70998+ .procname = "chroot_deny_unix",
70999+ .data = &grsec_enable_chroot_unix,
71000+ .maxlen = sizeof(int),
71001+ .mode = 0600,
71002+ .proc_handler = &proc_dointvec,
71003+ },
71004+#endif
71005+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
71006+ {
71007+ .procname = "chroot_deny_mount",
71008+ .data = &grsec_enable_chroot_mount,
71009+ .maxlen = sizeof(int),
71010+ .mode = 0600,
71011+ .proc_handler = &proc_dointvec,
71012+ },
71013+#endif
71014+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
71015+ {
71016+ .procname = "chroot_deny_fchdir",
71017+ .data = &grsec_enable_chroot_fchdir,
71018+ .maxlen = sizeof(int),
71019+ .mode = 0600,
71020+ .proc_handler = &proc_dointvec,
71021+ },
71022+#endif
71023+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
71024+ {
71025+ .procname = "chroot_deny_chroot",
71026+ .data = &grsec_enable_chroot_double,
71027+ .maxlen = sizeof(int),
71028+ .mode = 0600,
71029+ .proc_handler = &proc_dointvec,
71030+ },
71031+#endif
71032+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
71033+ {
71034+ .procname = "chroot_deny_pivot",
71035+ .data = &grsec_enable_chroot_pivot,
71036+ .maxlen = sizeof(int),
71037+ .mode = 0600,
71038+ .proc_handler = &proc_dointvec,
71039+ },
71040+#endif
71041+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
71042+ {
71043+ .procname = "chroot_enforce_chdir",
71044+ .data = &grsec_enable_chroot_chdir,
71045+ .maxlen = sizeof(int),
71046+ .mode = 0600,
71047+ .proc_handler = &proc_dointvec,
71048+ },
71049+#endif
71050+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
71051+ {
71052+ .procname = "chroot_deny_chmod",
71053+ .data = &grsec_enable_chroot_chmod,
71054+ .maxlen = sizeof(int),
71055+ .mode = 0600,
71056+ .proc_handler = &proc_dointvec,
71057+ },
71058+#endif
71059+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
71060+ {
71061+ .procname = "chroot_deny_mknod",
71062+ .data = &grsec_enable_chroot_mknod,
71063+ .maxlen = sizeof(int),
71064+ .mode = 0600,
71065+ .proc_handler = &proc_dointvec,
71066+ },
71067+#endif
71068+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
71069+ {
71070+ .procname = "chroot_restrict_nice",
71071+ .data = &grsec_enable_chroot_nice,
71072+ .maxlen = sizeof(int),
71073+ .mode = 0600,
71074+ .proc_handler = &proc_dointvec,
71075+ },
71076+#endif
71077+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
71078+ {
71079+ .procname = "chroot_execlog",
71080+ .data = &grsec_enable_chroot_execlog,
71081+ .maxlen = sizeof(int),
71082+ .mode = 0600,
71083+ .proc_handler = &proc_dointvec,
71084+ },
71085+#endif
71086+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
71087+ {
71088+ .procname = "chroot_caps",
71089+ .data = &grsec_enable_chroot_caps,
71090+ .maxlen = sizeof(int),
71091+ .mode = 0600,
71092+ .proc_handler = &proc_dointvec,
71093+ },
71094+#endif
71095+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
71096+ {
71097+ .procname = "chroot_deny_sysctl",
71098+ .data = &grsec_enable_chroot_sysctl,
71099+ .maxlen = sizeof(int),
71100+ .mode = 0600,
71101+ .proc_handler = &proc_dointvec,
71102+ },
71103+#endif
71104+#ifdef CONFIG_GRKERNSEC_TPE
71105+ {
71106+ .procname = "tpe",
71107+ .data = &grsec_enable_tpe,
71108+ .maxlen = sizeof(int),
71109+ .mode = 0600,
71110+ .proc_handler = &proc_dointvec,
71111+ },
71112+ {
71113+ .procname = "tpe_gid",
71114+ .data = &grsec_tpe_gid,
71115+ .maxlen = sizeof(int),
71116+ .mode = 0600,
71117+ .proc_handler = &proc_dointvec,
71118+ },
71119+#endif
71120+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71121+ {
71122+ .procname = "tpe_invert",
71123+ .data = &grsec_enable_tpe_invert,
71124+ .maxlen = sizeof(int),
71125+ .mode = 0600,
71126+ .proc_handler = &proc_dointvec,
71127+ },
71128+#endif
71129+#ifdef CONFIG_GRKERNSEC_TPE_ALL
71130+ {
71131+ .procname = "tpe_restrict_all",
71132+ .data = &grsec_enable_tpe_all,
71133+ .maxlen = sizeof(int),
71134+ .mode = 0600,
71135+ .proc_handler = &proc_dointvec,
71136+ },
71137+#endif
71138+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
71139+ {
71140+ .procname = "socket_all",
71141+ .data = &grsec_enable_socket_all,
71142+ .maxlen = sizeof(int),
71143+ .mode = 0600,
71144+ .proc_handler = &proc_dointvec,
71145+ },
71146+ {
71147+ .procname = "socket_all_gid",
71148+ .data = &grsec_socket_all_gid,
71149+ .maxlen = sizeof(int),
71150+ .mode = 0600,
71151+ .proc_handler = &proc_dointvec,
71152+ },
71153+#endif
71154+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
71155+ {
71156+ .procname = "socket_client",
71157+ .data = &grsec_enable_socket_client,
71158+ .maxlen = sizeof(int),
71159+ .mode = 0600,
71160+ .proc_handler = &proc_dointvec,
71161+ },
71162+ {
71163+ .procname = "socket_client_gid",
71164+ .data = &grsec_socket_client_gid,
71165+ .maxlen = sizeof(int),
71166+ .mode = 0600,
71167+ .proc_handler = &proc_dointvec,
71168+ },
71169+#endif
71170+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
71171+ {
71172+ .procname = "socket_server",
71173+ .data = &grsec_enable_socket_server,
71174+ .maxlen = sizeof(int),
71175+ .mode = 0600,
71176+ .proc_handler = &proc_dointvec,
71177+ },
71178+ {
71179+ .procname = "socket_server_gid",
71180+ .data = &grsec_socket_server_gid,
71181+ .maxlen = sizeof(int),
71182+ .mode = 0600,
71183+ .proc_handler = &proc_dointvec,
71184+ },
71185+#endif
71186+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
71187+ {
71188+ .procname = "audit_group",
71189+ .data = &grsec_enable_group,
71190+ .maxlen = sizeof(int),
71191+ .mode = 0600,
71192+ .proc_handler = &proc_dointvec,
71193+ },
71194+ {
71195+ .procname = "audit_gid",
71196+ .data = &grsec_audit_gid,
71197+ .maxlen = sizeof(int),
71198+ .mode = 0600,
71199+ .proc_handler = &proc_dointvec,
71200+ },
71201+#endif
71202+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71203+ {
71204+ .procname = "audit_chdir",
71205+ .data = &grsec_enable_chdir,
71206+ .maxlen = sizeof(int),
71207+ .mode = 0600,
71208+ .proc_handler = &proc_dointvec,
71209+ },
71210+#endif
71211+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71212+ {
71213+ .procname = "audit_mount",
71214+ .data = &grsec_enable_mount,
71215+ .maxlen = sizeof(int),
71216+ .mode = 0600,
71217+ .proc_handler = &proc_dointvec,
71218+ },
71219+#endif
71220+#ifdef CONFIG_GRKERNSEC_DMESG
71221+ {
71222+ .procname = "dmesg",
71223+ .data = &grsec_enable_dmesg,
71224+ .maxlen = sizeof(int),
71225+ .mode = 0600,
71226+ .proc_handler = &proc_dointvec,
71227+ },
71228+#endif
71229+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71230+ {
71231+ .procname = "chroot_findtask",
71232+ .data = &grsec_enable_chroot_findtask,
71233+ .maxlen = sizeof(int),
71234+ .mode = 0600,
71235+ .proc_handler = &proc_dointvec,
71236+ },
71237+#endif
71238+#ifdef CONFIG_GRKERNSEC_RESLOG
71239+ {
71240+ .procname = "resource_logging",
71241+ .data = &grsec_resource_logging,
71242+ .maxlen = sizeof(int),
71243+ .mode = 0600,
71244+ .proc_handler = &proc_dointvec,
71245+ },
71246+#endif
71247+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
71248+ {
71249+ .procname = "audit_ptrace",
71250+ .data = &grsec_enable_audit_ptrace,
71251+ .maxlen = sizeof(int),
71252+ .mode = 0600,
71253+ .proc_handler = &proc_dointvec,
71254+ },
71255+#endif
71256+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71257+ {
71258+ .procname = "harden_ptrace",
71259+ .data = &grsec_enable_harden_ptrace,
71260+ .maxlen = sizeof(int),
71261+ .mode = 0600,
71262+ .proc_handler = &proc_dointvec,
71263+ },
71264+#endif
71265+ {
71266+ .procname = "grsec_lock",
71267+ .data = &grsec_lock,
71268+ .maxlen = sizeof(int),
71269+ .mode = 0600,
71270+ .proc_handler = &proc_dointvec,
71271+ },
71272+#endif
71273+#ifdef CONFIG_GRKERNSEC_ROFS
71274+ {
71275+ .procname = "romount_protect",
71276+ .data = &grsec_enable_rofs,
71277+ .maxlen = sizeof(int),
71278+ .mode = 0600,
71279+ .proc_handler = &proc_dointvec_minmax,
71280+ .extra1 = &one,
71281+ .extra2 = &one,
71282+ },
71283+#endif
71284+#ifdef CONFIG_GRKERNSEC_DENYUSB
71285+ {
71286+ .procname = "deny_new_usb",
71287+ .data = &grsec_deny_new_usb,
71288+ .maxlen = sizeof(int),
71289+ .mode = 0600,
71290+ .proc_handler = &proc_dointvec_minmax,
71291+ .extra1 = &one,
71292+ .extra2 = &one,
71293+ },
71294+#endif
71295+ { }
71296+};
71297+#endif
71298diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
71299new file mode 100644
71300index 0000000..0dc13c3
71301--- /dev/null
71302+++ b/grsecurity/grsec_time.c
71303@@ -0,0 +1,16 @@
71304+#include <linux/kernel.h>
71305+#include <linux/sched.h>
71306+#include <linux/grinternal.h>
71307+#include <linux/module.h>
71308+
71309+void
71310+gr_log_timechange(void)
71311+{
71312+#ifdef CONFIG_GRKERNSEC_TIME
71313+ if (grsec_enable_time)
71314+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
71315+#endif
71316+ return;
71317+}
71318+
71319+EXPORT_SYMBOL(gr_log_timechange);
71320diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
71321new file mode 100644
71322index 0000000..ee57dcf
71323--- /dev/null
71324+++ b/grsecurity/grsec_tpe.c
71325@@ -0,0 +1,73 @@
71326+#include <linux/kernel.h>
71327+#include <linux/sched.h>
71328+#include <linux/file.h>
71329+#include <linux/fs.h>
71330+#include <linux/grinternal.h>
71331+
71332+extern int gr_acl_tpe_check(void);
71333+
71334+int
71335+gr_tpe_allow(const struct file *file)
71336+{
71337+#ifdef CONFIG_GRKERNSEC
71338+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
71339+ const struct cred *cred = current_cred();
71340+ char *msg = NULL;
71341+ char *msg2 = NULL;
71342+
71343+ // never restrict root
71344+ if (gr_is_global_root(cred->uid))
71345+ return 1;
71346+
71347+ if (grsec_enable_tpe) {
71348+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71349+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
71350+ msg = "not being in trusted group";
71351+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
71352+ msg = "being in untrusted group";
71353+#else
71354+ if (in_group_p(grsec_tpe_gid))
71355+ msg = "being in untrusted group";
71356+#endif
71357+ }
71358+ if (!msg && gr_acl_tpe_check())
71359+ msg = "being in untrusted role";
71360+
71361+ // not in any affected group/role
71362+ if (!msg)
71363+ goto next_check;
71364+
71365+ if (gr_is_global_nonroot(inode->i_uid))
71366+ msg2 = "file in non-root-owned directory";
71367+ else if (inode->i_mode & S_IWOTH)
71368+ msg2 = "file in world-writable directory";
71369+ else if (inode->i_mode & S_IWGRP)
71370+ msg2 = "file in group-writable directory";
71371+
71372+ if (msg && msg2) {
71373+ char fullmsg[70] = {0};
71374+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
71375+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
71376+ return 0;
71377+ }
71378+ msg = NULL;
71379+next_check:
71380+#ifdef CONFIG_GRKERNSEC_TPE_ALL
71381+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
71382+ return 1;
71383+
71384+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
71385+ msg = "directory not owned by user";
71386+ else if (inode->i_mode & S_IWOTH)
71387+ msg = "file in world-writable directory";
71388+ else if (inode->i_mode & S_IWGRP)
71389+ msg = "file in group-writable directory";
71390+
71391+ if (msg) {
71392+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
71393+ return 0;
71394+ }
71395+#endif
71396+#endif
71397+ return 1;
71398+}
71399diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
71400new file mode 100644
71401index 0000000..ae02d8e
71402--- /dev/null
71403+++ b/grsecurity/grsec_usb.c
71404@@ -0,0 +1,15 @@
71405+#include <linux/kernel.h>
71406+#include <linux/grinternal.h>
71407+#include <linux/module.h>
71408+
71409+int gr_handle_new_usb(void)
71410+{
71411+#ifdef CONFIG_GRKERNSEC_DENYUSB
71412+ if (grsec_deny_new_usb) {
71413+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
71414+ return 1;
71415+ }
71416+#endif
71417+ return 0;
71418+}
71419+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
71420diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
71421new file mode 100644
71422index 0000000..9f7b1ac
71423--- /dev/null
71424+++ b/grsecurity/grsum.c
71425@@ -0,0 +1,61 @@
71426+#include <linux/err.h>
71427+#include <linux/kernel.h>
71428+#include <linux/sched.h>
71429+#include <linux/mm.h>
71430+#include <linux/scatterlist.h>
71431+#include <linux/crypto.h>
71432+#include <linux/gracl.h>
71433+
71434+
71435+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
71436+#error "crypto and sha256 must be built into the kernel"
71437+#endif
71438+
71439+int
71440+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
71441+{
71442+ char *p;
71443+ struct crypto_hash *tfm;
71444+ struct hash_desc desc;
71445+ struct scatterlist sg;
71446+ unsigned char temp_sum[GR_SHA_LEN];
71447+ volatile int retval = 0;
71448+ volatile int dummy = 0;
71449+ unsigned int i;
71450+
71451+ sg_init_table(&sg, 1);
71452+
71453+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
71454+ if (IS_ERR(tfm)) {
71455+ /* should never happen, since sha256 should be built in */
71456+ return 1;
71457+ }
71458+
71459+ desc.tfm = tfm;
71460+ desc.flags = 0;
71461+
71462+ crypto_hash_init(&desc);
71463+
71464+ p = salt;
71465+ sg_set_buf(&sg, p, GR_SALT_LEN);
71466+ crypto_hash_update(&desc, &sg, sg.length);
71467+
71468+ p = entry->pw;
71469+ sg_set_buf(&sg, p, strlen(p));
71470+
71471+ crypto_hash_update(&desc, &sg, sg.length);
71472+
71473+ crypto_hash_final(&desc, temp_sum);
71474+
71475+ memset(entry->pw, 0, GR_PW_LEN);
71476+
71477+ for (i = 0; i < GR_SHA_LEN; i++)
71478+ if (sum[i] != temp_sum[i])
71479+ retval = 1;
71480+ else
71481+ dummy = 1; // waste a cycle
71482+
71483+ crypto_free_hash(tfm);
71484+
71485+ return retval;
71486+}
71487diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
71488index 77ff547..181834f 100644
71489--- a/include/asm-generic/4level-fixup.h
71490+++ b/include/asm-generic/4level-fixup.h
71491@@ -13,8 +13,10 @@
71492 #define pmd_alloc(mm, pud, address) \
71493 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
71494 NULL: pmd_offset(pud, address))
71495+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
71496
71497 #define pud_alloc(mm, pgd, address) (pgd)
71498+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
71499 #define pud_offset(pgd, start) (pgd)
71500 #define pud_none(pud) 0
71501 #define pud_bad(pud) 0
71502diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
71503index b7babf0..04ad282 100644
71504--- a/include/asm-generic/atomic-long.h
71505+++ b/include/asm-generic/atomic-long.h
71506@@ -22,6 +22,12 @@
71507
71508 typedef atomic64_t atomic_long_t;
71509
71510+#ifdef CONFIG_PAX_REFCOUNT
71511+typedef atomic64_unchecked_t atomic_long_unchecked_t;
71512+#else
71513+typedef atomic64_t atomic_long_unchecked_t;
71514+#endif
71515+
71516 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
71517
71518 static inline long atomic_long_read(atomic_long_t *l)
71519@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
71520 return (long)atomic64_read(v);
71521 }
71522
71523+#ifdef CONFIG_PAX_REFCOUNT
71524+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
71525+{
71526+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71527+
71528+ return (long)atomic64_read_unchecked(v);
71529+}
71530+#endif
71531+
71532 static inline void atomic_long_set(atomic_long_t *l, long i)
71533 {
71534 atomic64_t *v = (atomic64_t *)l;
71535@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
71536 atomic64_set(v, i);
71537 }
71538
71539+#ifdef CONFIG_PAX_REFCOUNT
71540+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
71541+{
71542+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71543+
71544+ atomic64_set_unchecked(v, i);
71545+}
71546+#endif
71547+
71548 static inline void atomic_long_inc(atomic_long_t *l)
71549 {
71550 atomic64_t *v = (atomic64_t *)l;
71551@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
71552 atomic64_inc(v);
71553 }
71554
71555+#ifdef CONFIG_PAX_REFCOUNT
71556+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
71557+{
71558+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71559+
71560+ atomic64_inc_unchecked(v);
71561+}
71562+#endif
71563+
71564 static inline void atomic_long_dec(atomic_long_t *l)
71565 {
71566 atomic64_t *v = (atomic64_t *)l;
71567@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
71568 atomic64_dec(v);
71569 }
71570
71571+#ifdef CONFIG_PAX_REFCOUNT
71572+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
71573+{
71574+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71575+
71576+ atomic64_dec_unchecked(v);
71577+}
71578+#endif
71579+
71580 static inline void atomic_long_add(long i, atomic_long_t *l)
71581 {
71582 atomic64_t *v = (atomic64_t *)l;
71583@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
71584 atomic64_add(i, v);
71585 }
71586
71587+#ifdef CONFIG_PAX_REFCOUNT
71588+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
71589+{
71590+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71591+
71592+ atomic64_add_unchecked(i, v);
71593+}
71594+#endif
71595+
71596 static inline void atomic_long_sub(long i, atomic_long_t *l)
71597 {
71598 atomic64_t *v = (atomic64_t *)l;
71599@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
71600 atomic64_sub(i, v);
71601 }
71602
71603+#ifdef CONFIG_PAX_REFCOUNT
71604+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
71605+{
71606+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71607+
71608+ atomic64_sub_unchecked(i, v);
71609+}
71610+#endif
71611+
71612 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
71613 {
71614 atomic64_t *v = (atomic64_t *)l;
71615@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
71616 return (long)atomic64_add_return(i, v);
71617 }
71618
71619+#ifdef CONFIG_PAX_REFCOUNT
71620+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
71621+{
71622+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71623+
71624+ return (long)atomic64_add_return_unchecked(i, v);
71625+}
71626+#endif
71627+
71628 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
71629 {
71630 atomic64_t *v = (atomic64_t *)l;
71631@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
71632 return (long)atomic64_inc_return(v);
71633 }
71634
71635+#ifdef CONFIG_PAX_REFCOUNT
71636+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
71637+{
71638+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71639+
71640+ return (long)atomic64_inc_return_unchecked(v);
71641+}
71642+#endif
71643+
71644 static inline long atomic_long_dec_return(atomic_long_t *l)
71645 {
71646 atomic64_t *v = (atomic64_t *)l;
71647@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
71648
71649 typedef atomic_t atomic_long_t;
71650
71651+#ifdef CONFIG_PAX_REFCOUNT
71652+typedef atomic_unchecked_t atomic_long_unchecked_t;
71653+#else
71654+typedef atomic_t atomic_long_unchecked_t;
71655+#endif
71656+
71657 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
71658 static inline long atomic_long_read(atomic_long_t *l)
71659 {
71660@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
71661 return (long)atomic_read(v);
71662 }
71663
71664+#ifdef CONFIG_PAX_REFCOUNT
71665+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
71666+{
71667+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71668+
71669+ return (long)atomic_read_unchecked(v);
71670+}
71671+#endif
71672+
71673 static inline void atomic_long_set(atomic_long_t *l, long i)
71674 {
71675 atomic_t *v = (atomic_t *)l;
71676@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
71677 atomic_set(v, i);
71678 }
71679
71680+#ifdef CONFIG_PAX_REFCOUNT
71681+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
71682+{
71683+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71684+
71685+ atomic_set_unchecked(v, i);
71686+}
71687+#endif
71688+
71689 static inline void atomic_long_inc(atomic_long_t *l)
71690 {
71691 atomic_t *v = (atomic_t *)l;
71692@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
71693 atomic_inc(v);
71694 }
71695
71696+#ifdef CONFIG_PAX_REFCOUNT
71697+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
71698+{
71699+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71700+
71701+ atomic_inc_unchecked(v);
71702+}
71703+#endif
71704+
71705 static inline void atomic_long_dec(atomic_long_t *l)
71706 {
71707 atomic_t *v = (atomic_t *)l;
71708@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
71709 atomic_dec(v);
71710 }
71711
71712+#ifdef CONFIG_PAX_REFCOUNT
71713+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
71714+{
71715+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71716+
71717+ atomic_dec_unchecked(v);
71718+}
71719+#endif
71720+
71721 static inline void atomic_long_add(long i, atomic_long_t *l)
71722 {
71723 atomic_t *v = (atomic_t *)l;
71724@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
71725 atomic_add(i, v);
71726 }
71727
71728+#ifdef CONFIG_PAX_REFCOUNT
71729+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
71730+{
71731+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71732+
71733+ atomic_add_unchecked(i, v);
71734+}
71735+#endif
71736+
71737 static inline void atomic_long_sub(long i, atomic_long_t *l)
71738 {
71739 atomic_t *v = (atomic_t *)l;
71740@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
71741 atomic_sub(i, v);
71742 }
71743
71744+#ifdef CONFIG_PAX_REFCOUNT
71745+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
71746+{
71747+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71748+
71749+ atomic_sub_unchecked(i, v);
71750+}
71751+#endif
71752+
71753 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
71754 {
71755 atomic_t *v = (atomic_t *)l;
71756@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
71757 return (long)atomic_add_return(i, v);
71758 }
71759
71760+#ifdef CONFIG_PAX_REFCOUNT
71761+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
71762+{
71763+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71764+
71765+ return (long)atomic_add_return_unchecked(i, v);
71766+}
71767+
71768+#endif
71769+
71770 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
71771 {
71772 atomic_t *v = (atomic_t *)l;
71773@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
71774 return (long)atomic_inc_return(v);
71775 }
71776
71777+#ifdef CONFIG_PAX_REFCOUNT
71778+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
71779+{
71780+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71781+
71782+ return (long)atomic_inc_return_unchecked(v);
71783+}
71784+#endif
71785+
71786 static inline long atomic_long_dec_return(atomic_long_t *l)
71787 {
71788 atomic_t *v = (atomic_t *)l;
71789@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
71790
71791 #endif /* BITS_PER_LONG == 64 */
71792
71793+#ifdef CONFIG_PAX_REFCOUNT
71794+static inline void pax_refcount_needs_these_functions(void)
71795+{
71796+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
71797+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
71798+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
71799+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
71800+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
71801+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
71802+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
71803+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
71804+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
71805+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
71806+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
71807+#ifdef CONFIG_X86
71808+ atomic_clear_mask_unchecked(0, NULL);
71809+ atomic_set_mask_unchecked(0, NULL);
71810+#endif
71811+
71812+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
71813+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
71814+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
71815+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
71816+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
71817+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
71818+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
71819+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
71820+}
71821+#else
71822+#define atomic_read_unchecked(v) atomic_read(v)
71823+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
71824+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
71825+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
71826+#define atomic_inc_unchecked(v) atomic_inc(v)
71827+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
71828+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
71829+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
71830+#define atomic_dec_unchecked(v) atomic_dec(v)
71831+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
71832+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
71833+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
71834+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
71835+
71836+#define atomic_long_read_unchecked(v) atomic_long_read(v)
71837+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
71838+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
71839+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
71840+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
71841+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
71842+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
71843+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
71844+#endif
71845+
71846 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
71847diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
71848index 33bd2de..f31bff97 100644
71849--- a/include/asm-generic/atomic.h
71850+++ b/include/asm-generic/atomic.h
71851@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
71852 * Atomically clears the bits set in @mask from @v
71853 */
71854 #ifndef atomic_clear_mask
71855-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
71856+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
71857 {
71858 unsigned long flags;
71859
71860diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
71861index b18ce4f..2ee2843 100644
71862--- a/include/asm-generic/atomic64.h
71863+++ b/include/asm-generic/atomic64.h
71864@@ -16,6 +16,8 @@ typedef struct {
71865 long long counter;
71866 } atomic64_t;
71867
71868+typedef atomic64_t atomic64_unchecked_t;
71869+
71870 #define ATOMIC64_INIT(i) { (i) }
71871
71872 extern long long atomic64_read(const atomic64_t *v);
71873@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
71874 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
71875 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
71876
71877+#define atomic64_read_unchecked(v) atomic64_read(v)
71878+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
71879+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
71880+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
71881+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
71882+#define atomic64_inc_unchecked(v) atomic64_inc(v)
71883+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
71884+#define atomic64_dec_unchecked(v) atomic64_dec(v)
71885+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
71886+
71887 #endif /* _ASM_GENERIC_ATOMIC64_H */
71888diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
71889index 1bfcfe5..e04c5c9 100644
71890--- a/include/asm-generic/cache.h
71891+++ b/include/asm-generic/cache.h
71892@@ -6,7 +6,7 @@
71893 * cache lines need to provide their own cache.h.
71894 */
71895
71896-#define L1_CACHE_SHIFT 5
71897-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
71898+#define L1_CACHE_SHIFT 5UL
71899+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
71900
71901 #endif /* __ASM_GENERIC_CACHE_H */
71902diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
71903index 0d68a1e..b74a761 100644
71904--- a/include/asm-generic/emergency-restart.h
71905+++ b/include/asm-generic/emergency-restart.h
71906@@ -1,7 +1,7 @@
71907 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
71908 #define _ASM_GENERIC_EMERGENCY_RESTART_H
71909
71910-static inline void machine_emergency_restart(void)
71911+static inline __noreturn void machine_emergency_restart(void)
71912 {
71913 machine_restart(NULL);
71914 }
71915diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
71916index 90f99c7..00ce236 100644
71917--- a/include/asm-generic/kmap_types.h
71918+++ b/include/asm-generic/kmap_types.h
71919@@ -2,9 +2,9 @@
71920 #define _ASM_GENERIC_KMAP_TYPES_H
71921
71922 #ifdef __WITH_KM_FENCE
71923-# define KM_TYPE_NR 41
71924+# define KM_TYPE_NR 42
71925 #else
71926-# define KM_TYPE_NR 20
71927+# define KM_TYPE_NR 21
71928 #endif
71929
71930 #endif
71931diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
71932index 9ceb03b..62b0b8f 100644
71933--- a/include/asm-generic/local.h
71934+++ b/include/asm-generic/local.h
71935@@ -23,24 +23,37 @@ typedef struct
71936 atomic_long_t a;
71937 } local_t;
71938
71939+typedef struct {
71940+ atomic_long_unchecked_t a;
71941+} local_unchecked_t;
71942+
71943 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
71944
71945 #define local_read(l) atomic_long_read(&(l)->a)
71946+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
71947 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
71948+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
71949 #define local_inc(l) atomic_long_inc(&(l)->a)
71950+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
71951 #define local_dec(l) atomic_long_dec(&(l)->a)
71952+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
71953 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
71954+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
71955 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
71956+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
71957
71958 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
71959 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
71960 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
71961 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
71962 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
71963+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
71964 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
71965 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
71966+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
71967
71968 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
71969+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
71970 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
71971 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
71972 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
71973diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
71974index 725612b..9cc513a 100644
71975--- a/include/asm-generic/pgtable-nopmd.h
71976+++ b/include/asm-generic/pgtable-nopmd.h
71977@@ -1,14 +1,19 @@
71978 #ifndef _PGTABLE_NOPMD_H
71979 #define _PGTABLE_NOPMD_H
71980
71981-#ifndef __ASSEMBLY__
71982-
71983 #include <asm-generic/pgtable-nopud.h>
71984
71985-struct mm_struct;
71986-
71987 #define __PAGETABLE_PMD_FOLDED
71988
71989+#define PMD_SHIFT PUD_SHIFT
71990+#define PTRS_PER_PMD 1
71991+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
71992+#define PMD_MASK (~(PMD_SIZE-1))
71993+
71994+#ifndef __ASSEMBLY__
71995+
71996+struct mm_struct;
71997+
71998 /*
71999 * Having the pmd type consist of a pud gets the size right, and allows
72000 * us to conceptually access the pud entry that this pmd is folded into
72001@@ -16,11 +21,6 @@ struct mm_struct;
72002 */
72003 typedef struct { pud_t pud; } pmd_t;
72004
72005-#define PMD_SHIFT PUD_SHIFT
72006-#define PTRS_PER_PMD 1
72007-#define PMD_SIZE (1UL << PMD_SHIFT)
72008-#define PMD_MASK (~(PMD_SIZE-1))
72009-
72010 /*
72011 * The "pud_xxx()" functions here are trivial for a folded two-level
72012 * setup: the pmd is never bad, and a pmd always exists (as it's folded
72013diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
72014index 810431d..0ec4804f 100644
72015--- a/include/asm-generic/pgtable-nopud.h
72016+++ b/include/asm-generic/pgtable-nopud.h
72017@@ -1,10 +1,15 @@
72018 #ifndef _PGTABLE_NOPUD_H
72019 #define _PGTABLE_NOPUD_H
72020
72021-#ifndef __ASSEMBLY__
72022-
72023 #define __PAGETABLE_PUD_FOLDED
72024
72025+#define PUD_SHIFT PGDIR_SHIFT
72026+#define PTRS_PER_PUD 1
72027+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
72028+#define PUD_MASK (~(PUD_SIZE-1))
72029+
72030+#ifndef __ASSEMBLY__
72031+
72032 /*
72033 * Having the pud type consist of a pgd gets the size right, and allows
72034 * us to conceptually access the pgd entry that this pud is folded into
72035@@ -12,11 +17,6 @@
72036 */
72037 typedef struct { pgd_t pgd; } pud_t;
72038
72039-#define PUD_SHIFT PGDIR_SHIFT
72040-#define PTRS_PER_PUD 1
72041-#define PUD_SIZE (1UL << PUD_SHIFT)
72042-#define PUD_MASK (~(PUD_SIZE-1))
72043-
72044 /*
72045 * The "pgd_xxx()" functions here are trivial for a folded two-level
72046 * setup: the pud is never bad, and a pud always exists (as it's folded
72047@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
72048 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
72049
72050 #define pgd_populate(mm, pgd, pud) do { } while (0)
72051+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
72052 /*
72053 * (puds are folded into pgds so this doesn't get actually called,
72054 * but the define is needed for a generic inline function.)
72055diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
72056index a59ff51..2594a70 100644
72057--- a/include/asm-generic/pgtable.h
72058+++ b/include/asm-generic/pgtable.h
72059@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
72060 }
72061 #endif /* CONFIG_NUMA_BALANCING */
72062
72063+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
72064+static inline unsigned long pax_open_kernel(void) { return 0; }
72065+#endif
72066+
72067+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
72068+static inline unsigned long pax_close_kernel(void) { return 0; }
72069+#endif
72070+
72071 #endif /* CONFIG_MMU */
72072
72073 #endif /* !__ASSEMBLY__ */
72074diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
72075index c184aa8..d049942 100644
72076--- a/include/asm-generic/uaccess.h
72077+++ b/include/asm-generic/uaccess.h
72078@@ -343,4 +343,12 @@ clear_user(void __user *to, unsigned long n)
72079 return __clear_user(to, n);
72080 }
72081
72082+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
72083+//static inline unsigned long pax_open_userland(void) { return 0; }
72084+#endif
72085+
72086+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
72087+//static inline unsigned long pax_close_userland(void) { return 0; }
72088+#endif
72089+
72090 #endif /* __ASM_GENERIC_UACCESS_H */
72091diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
72092index eb58d2d..df131bf 100644
72093--- a/include/asm-generic/vmlinux.lds.h
72094+++ b/include/asm-generic/vmlinux.lds.h
72095@@ -239,6 +239,7 @@
72096 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
72097 VMLINUX_SYMBOL(__start_rodata) = .; \
72098 *(.rodata) *(.rodata.*) \
72099+ *(.data..read_only) \
72100 *(__vermagic) /* Kernel version magic */ \
72101 . = ALIGN(8); \
72102 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
72103@@ -749,17 +750,18 @@
72104 * section in the linker script will go there too. @phdr should have
72105 * a leading colon.
72106 *
72107- * Note that this macros defines __per_cpu_load as an absolute symbol.
72108+ * Note that this macros defines per_cpu_load as an absolute symbol.
72109 * If there is no need to put the percpu section at a predetermined
72110 * address, use PERCPU_SECTION.
72111 */
72112 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
72113- VMLINUX_SYMBOL(__per_cpu_load) = .; \
72114- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
72115+ per_cpu_load = .; \
72116+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
72117 - LOAD_OFFSET) { \
72118+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
72119 PERCPU_INPUT(cacheline) \
72120 } phdr \
72121- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
72122+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
72123
72124 /**
72125 * PERCPU_SECTION - define output section for percpu area, simple version
72126diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
72127index 418d270..bfd2794 100644
72128--- a/include/crypto/algapi.h
72129+++ b/include/crypto/algapi.h
72130@@ -34,7 +34,7 @@ struct crypto_type {
72131 unsigned int maskclear;
72132 unsigned int maskset;
72133 unsigned int tfmsize;
72134-};
72135+} __do_const;
72136
72137 struct crypto_instance {
72138 struct crypto_alg alg;
72139diff --git a/include/drm/drmP.h b/include/drm/drmP.h
72140index 63d17ee..716de2b 100644
72141--- a/include/drm/drmP.h
72142+++ b/include/drm/drmP.h
72143@@ -72,6 +72,7 @@
72144 #include <linux/workqueue.h>
72145 #include <linux/poll.h>
72146 #include <asm/pgalloc.h>
72147+#include <asm/local.h>
72148 #include <drm/drm.h>
72149 #include <drm/drm_sarea.h>
72150
72151@@ -296,10 +297,12 @@ do { \
72152 * \param cmd command.
72153 * \param arg argument.
72154 */
72155-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
72156+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
72157+ struct drm_file *file_priv);
72158+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
72159 struct drm_file *file_priv);
72160
72161-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
72162+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
72163 unsigned long arg);
72164
72165 #define DRM_IOCTL_NR(n) _IOC_NR(n)
72166@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
72167 struct drm_ioctl_desc {
72168 unsigned int cmd;
72169 int flags;
72170- drm_ioctl_t *func;
72171+ drm_ioctl_t func;
72172 unsigned int cmd_drv;
72173 const char *name;
72174-};
72175+} __do_const;
72176
72177 /**
72178 * Creates a driver or general drm_ioctl_desc array entry for the given
72179@@ -1015,7 +1018,7 @@ struct drm_info_list {
72180 int (*show)(struct seq_file*, void*); /** show callback */
72181 u32 driver_features; /**< Required driver features for this entry */
72182 void *data;
72183-};
72184+} __do_const;
72185
72186 /**
72187 * debugfs node structure. This structure represents a debugfs file.
72188@@ -1088,7 +1091,7 @@ struct drm_device {
72189
72190 /** \name Usage Counters */
72191 /*@{ */
72192- int open_count; /**< Outstanding files open */
72193+ local_t open_count; /**< Outstanding files open */
72194 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
72195 atomic_t vma_count; /**< Outstanding vma areas open */
72196 int buf_use; /**< Buffers in use -- cannot alloc */
72197@@ -1099,7 +1102,7 @@ struct drm_device {
72198 /*@{ */
72199 unsigned long counters;
72200 enum drm_stat_type types[15];
72201- atomic_t counts[15];
72202+ atomic_unchecked_t counts[15];
72203 /*@} */
72204
72205 struct list_head filelist;
72206diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
72207index f43d556..94d9343 100644
72208--- a/include/drm/drm_crtc_helper.h
72209+++ b/include/drm/drm_crtc_helper.h
72210@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
72211 struct drm_connector *connector);
72212 /* disable encoder when not in use - more explicit than dpms off */
72213 void (*disable)(struct drm_encoder *encoder);
72214-};
72215+} __no_const;
72216
72217 /**
72218 * drm_connector_helper_funcs - helper operations for connectors
72219diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
72220index 72dcbe8..8db58d7 100644
72221--- a/include/drm/ttm/ttm_memory.h
72222+++ b/include/drm/ttm/ttm_memory.h
72223@@ -48,7 +48,7 @@
72224
72225 struct ttm_mem_shrink {
72226 int (*do_shrink) (struct ttm_mem_shrink *);
72227-};
72228+} __no_const;
72229
72230 /**
72231 * struct ttm_mem_global - Global memory accounting structure.
72232diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
72233index 4b840e8..155d235 100644
72234--- a/include/keys/asymmetric-subtype.h
72235+++ b/include/keys/asymmetric-subtype.h
72236@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
72237 /* Verify the signature on a key of this subtype (optional) */
72238 int (*verify_signature)(const struct key *key,
72239 const struct public_key_signature *sig);
72240-};
72241+} __do_const;
72242
72243 /**
72244 * asymmetric_key_subtype - Get the subtype from an asymmetric key
72245diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
72246index c1da539..1dcec55 100644
72247--- a/include/linux/atmdev.h
72248+++ b/include/linux/atmdev.h
72249@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
72250 #endif
72251
72252 struct k_atm_aal_stats {
72253-#define __HANDLE_ITEM(i) atomic_t i
72254+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72255 __AAL_STAT_ITEMS
72256 #undef __HANDLE_ITEM
72257 };
72258@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
72259 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
72260 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
72261 struct module *owner;
72262-};
72263+} __do_const ;
72264
72265 struct atmphy_ops {
72266 int (*start)(struct atm_dev *dev);
72267diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
72268index 70cf138..0418ee2 100644
72269--- a/include/linux/binfmts.h
72270+++ b/include/linux/binfmts.h
72271@@ -73,8 +73,10 @@ struct linux_binfmt {
72272 int (*load_binary)(struct linux_binprm *);
72273 int (*load_shlib)(struct file *);
72274 int (*core_dump)(struct coredump_params *cprm);
72275+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
72276+ void (*handle_mmap)(struct file *);
72277 unsigned long min_coredump; /* minimal dump size */
72278-};
72279+} __do_const;
72280
72281 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
72282
72283diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
72284index 2fdb4a4..54aad7e 100644
72285--- a/include/linux/blkdev.h
72286+++ b/include/linux/blkdev.h
72287@@ -1526,7 +1526,7 @@ struct block_device_operations {
72288 /* this callback is with swap_lock and sometimes page table lock held */
72289 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
72290 struct module *owner;
72291-};
72292+} __do_const;
72293
72294 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
72295 unsigned long);
72296diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
72297index 7c2e030..b72475d 100644
72298--- a/include/linux/blktrace_api.h
72299+++ b/include/linux/blktrace_api.h
72300@@ -23,7 +23,7 @@ struct blk_trace {
72301 struct dentry *dir;
72302 struct dentry *dropped_file;
72303 struct dentry *msg_file;
72304- atomic_t dropped;
72305+ atomic_unchecked_t dropped;
72306 };
72307
72308 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
72309diff --git a/include/linux/cache.h b/include/linux/cache.h
72310index 4c57065..4307975 100644
72311--- a/include/linux/cache.h
72312+++ b/include/linux/cache.h
72313@@ -16,6 +16,10 @@
72314 #define __read_mostly
72315 #endif
72316
72317+#ifndef __read_only
72318+#define __read_only __read_mostly
72319+#endif
72320+
72321 #ifndef ____cacheline_aligned
72322 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
72323 #endif
72324diff --git a/include/linux/capability.h b/include/linux/capability.h
72325index d9a4f7f4..19f77d6 100644
72326--- a/include/linux/capability.h
72327+++ b/include/linux/capability.h
72328@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
72329 extern bool nsown_capable(int cap);
72330 extern bool inode_capable(const struct inode *inode, int cap);
72331 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
72332+extern bool capable_nolog(int cap);
72333+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
72334+extern bool inode_capable_nolog(const struct inode *inode, int cap);
72335
72336 /* audit system wants to get cap info from files as well */
72337 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
72338
72339+extern int is_privileged_binary(const struct dentry *dentry);
72340+
72341 #endif /* !_LINUX_CAPABILITY_H */
72342diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
72343index 8609d57..86e4d79 100644
72344--- a/include/linux/cdrom.h
72345+++ b/include/linux/cdrom.h
72346@@ -87,7 +87,6 @@ struct cdrom_device_ops {
72347
72348 /* driver specifications */
72349 const int capability; /* capability flags */
72350- int n_minors; /* number of active minor devices */
72351 /* handle uniform packets for scsi type devices (scsi,atapi) */
72352 int (*generic_packet) (struct cdrom_device_info *,
72353 struct packet_command *);
72354diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
72355index 4ce9056..86caac6 100644
72356--- a/include/linux/cleancache.h
72357+++ b/include/linux/cleancache.h
72358@@ -31,7 +31,7 @@ struct cleancache_ops {
72359 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
72360 void (*invalidate_inode)(int, struct cleancache_filekey);
72361 void (*invalidate_fs)(int);
72362-};
72363+} __no_const;
72364
72365 extern struct cleancache_ops *
72366 cleancache_register_ops(struct cleancache_ops *ops);
72367diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
72368index 1186098..f87e53d 100644
72369--- a/include/linux/clk-provider.h
72370+++ b/include/linux/clk-provider.h
72371@@ -132,6 +132,7 @@ struct clk_ops {
72372 unsigned long);
72373 void (*init)(struct clk_hw *hw);
72374 };
72375+typedef struct clk_ops __no_const clk_ops_no_const;
72376
72377 /**
72378 * struct clk_init_data - holds init data that's common to all clocks and is
72379diff --git a/include/linux/compat.h b/include/linux/compat.h
72380index 7f0c1dd..206ac34 100644
72381--- a/include/linux/compat.h
72382+++ b/include/linux/compat.h
72383@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72384 compat_size_t __user *len_ptr);
72385
72386 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
72387-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
72388+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
72389 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
72390 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
72391 compat_ssize_t msgsz, int msgflg);
72392@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
72393 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
72394 compat_ulong_t addr, compat_ulong_t data);
72395 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
72396- compat_long_t addr, compat_long_t data);
72397+ compat_ulong_t addr, compat_ulong_t data);
72398
72399 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
72400 /*
72401@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
72402
72403 int compat_restore_altstack(const compat_stack_t __user *uss);
72404 int __compat_save_altstack(compat_stack_t __user *, unsigned long);
72405+void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
72406
72407 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
72408 struct compat_timespec __user *interval);
72409diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
72410index 842de22..7f3a41f 100644
72411--- a/include/linux/compiler-gcc4.h
72412+++ b/include/linux/compiler-gcc4.h
72413@@ -39,9 +39,29 @@
72414 # define __compiletime_warning(message) __attribute__((warning(message)))
72415 # define __compiletime_error(message) __attribute__((error(message)))
72416 #endif /* __CHECKER__ */
72417+
72418+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
72419+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
72420+#define __bos0(ptr) __bos((ptr), 0)
72421+#define __bos1(ptr) __bos((ptr), 1)
72422 #endif /* GCC_VERSION >= 40300 */
72423
72424 #if GCC_VERSION >= 40500
72425+
72426+#ifdef CONSTIFY_PLUGIN
72427+#define __no_const __attribute__((no_const))
72428+#define __do_const __attribute__((do_const))
72429+#endif
72430+
72431+#ifdef SIZE_OVERFLOW_PLUGIN
72432+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
72433+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
72434+#endif
72435+
72436+#ifdef LATENT_ENTROPY_PLUGIN
72437+#define __latent_entropy __attribute__((latent_entropy))
72438+#endif
72439+
72440 /*
72441 * Mark a position in code as unreachable. This can be used to
72442 * suppress control flow warnings after asm blocks that transfer
72443diff --git a/include/linux/compiler.h b/include/linux/compiler.h
72444index 92669cd..1771a15 100644
72445--- a/include/linux/compiler.h
72446+++ b/include/linux/compiler.h
72447@@ -5,11 +5,14 @@
72448
72449 #ifdef __CHECKER__
72450 # define __user __attribute__((noderef, address_space(1)))
72451+# define __force_user __force __user
72452 # define __kernel __attribute__((address_space(0)))
72453+# define __force_kernel __force __kernel
72454 # define __safe __attribute__((safe))
72455 # define __force __attribute__((force))
72456 # define __nocast __attribute__((nocast))
72457 # define __iomem __attribute__((noderef, address_space(2)))
72458+# define __force_iomem __force __iomem
72459 # define __must_hold(x) __attribute__((context(x,1,1)))
72460 # define __acquires(x) __attribute__((context(x,0,1)))
72461 # define __releases(x) __attribute__((context(x,1,0)))
72462@@ -17,20 +20,37 @@
72463 # define __release(x) __context__(x,-1)
72464 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
72465 # define __percpu __attribute__((noderef, address_space(3)))
72466+# define __force_percpu __force __percpu
72467 #ifdef CONFIG_SPARSE_RCU_POINTER
72468 # define __rcu __attribute__((noderef, address_space(4)))
72469+# define __force_rcu __force __rcu
72470 #else
72471 # define __rcu
72472+# define __force_rcu
72473 #endif
72474 extern void __chk_user_ptr(const volatile void __user *);
72475 extern void __chk_io_ptr(const volatile void __iomem *);
72476 #else
72477-# define __user
72478-# define __kernel
72479+# ifdef CHECKER_PLUGIN
72480+//# define __user
72481+//# define __force_user
72482+//# define __kernel
72483+//# define __force_kernel
72484+# else
72485+# ifdef STRUCTLEAK_PLUGIN
72486+# define __user __attribute__((user))
72487+# else
72488+# define __user
72489+# endif
72490+# define __force_user
72491+# define __kernel
72492+# define __force_kernel
72493+# endif
72494 # define __safe
72495 # define __force
72496 # define __nocast
72497 # define __iomem
72498+# define __force_iomem
72499 # define __chk_user_ptr(x) (void)0
72500 # define __chk_io_ptr(x) (void)0
72501 # define __builtin_warning(x, y...) (1)
72502@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
72503 # define __release(x) (void)0
72504 # define __cond_lock(x,c) (c)
72505 # define __percpu
72506+# define __force_percpu
72507 # define __rcu
72508+# define __force_rcu
72509 #endif
72510
72511 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
72512@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72513 # define __attribute_const__ /* unimplemented */
72514 #endif
72515
72516+#ifndef __no_const
72517+# define __no_const
72518+#endif
72519+
72520+#ifndef __do_const
72521+# define __do_const
72522+#endif
72523+
72524+#ifndef __size_overflow
72525+# define __size_overflow(...)
72526+#endif
72527+
72528+#ifndef __intentional_overflow
72529+# define __intentional_overflow(...)
72530+#endif
72531+
72532+#ifndef __latent_entropy
72533+# define __latent_entropy
72534+#endif
72535+
72536 /*
72537 * Tell gcc if a function is cold. The compiler will assume any path
72538 * directly leading to the call is unlikely.
72539@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72540 #define __cold
72541 #endif
72542
72543+#ifndef __alloc_size
72544+#define __alloc_size(...)
72545+#endif
72546+
72547+#ifndef __bos
72548+#define __bos(ptr, arg)
72549+#endif
72550+
72551+#ifndef __bos0
72552+#define __bos0(ptr)
72553+#endif
72554+
72555+#ifndef __bos1
72556+#define __bos1(ptr)
72557+#endif
72558+
72559 /* Simple shorthand for a section definition */
72560 #ifndef __section
72561 # define __section(S) __attribute__ ((__section__(#S)))
72562@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72563 * use is to mediate communication between process-level code and irq/NMI
72564 * handlers, all running on the same CPU.
72565 */
72566-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
72567+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
72568+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
72569
72570 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
72571 #ifdef CONFIG_KPROBES
72572diff --git a/include/linux/completion.h b/include/linux/completion.h
72573index 33f0280..35c6568 100644
72574--- a/include/linux/completion.h
72575+++ b/include/linux/completion.h
72576@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
72577 extern void wait_for_completion(struct completion *);
72578 extern void wait_for_completion_io(struct completion *);
72579 extern int wait_for_completion_interruptible(struct completion *x);
72580-extern int wait_for_completion_killable(struct completion *x);
72581+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
72582 extern unsigned long wait_for_completion_timeout(struct completion *x,
72583 unsigned long timeout);
72584 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
72585 unsigned long timeout);
72586 extern long wait_for_completion_interruptible_timeout(
72587- struct completion *x, unsigned long timeout);
72588+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
72589 extern long wait_for_completion_killable_timeout(
72590- struct completion *x, unsigned long timeout);
72591+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
72592 extern bool try_wait_for_completion(struct completion *x);
72593 extern bool completion_done(struct completion *x);
72594
72595diff --git a/include/linux/configfs.h b/include/linux/configfs.h
72596index 34025df..d94bbbc 100644
72597--- a/include/linux/configfs.h
72598+++ b/include/linux/configfs.h
72599@@ -125,7 +125,7 @@ struct configfs_attribute {
72600 const char *ca_name;
72601 struct module *ca_owner;
72602 umode_t ca_mode;
72603-};
72604+} __do_const;
72605
72606 /*
72607 * Users often need to create attribute structures for their configurable
72608diff --git a/include/linux/cpu.h b/include/linux/cpu.h
72609index 9f3c7e8..a18c7b6 100644
72610--- a/include/linux/cpu.h
72611+++ b/include/linux/cpu.h
72612@@ -115,7 +115,7 @@ enum {
72613 /* Need to know about CPUs going up/down? */
72614 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
72615 #define cpu_notifier(fn, pri) { \
72616- static struct notifier_block fn##_nb __cpuinitdata = \
72617+ static struct notifier_block fn##_nb = \
72618 { .notifier_call = fn, .priority = pri }; \
72619 register_cpu_notifier(&fn##_nb); \
72620 }
72621diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
72622index 037d36a..ca5fe6e 100644
72623--- a/include/linux/cpufreq.h
72624+++ b/include/linux/cpufreq.h
72625@@ -262,7 +262,7 @@ struct cpufreq_driver {
72626 int (*suspend) (struct cpufreq_policy *policy);
72627 int (*resume) (struct cpufreq_policy *policy);
72628 struct freq_attr **attr;
72629-};
72630+} __do_const;
72631
72632 /* flags */
72633
72634@@ -321,6 +321,7 @@ struct global_attr {
72635 ssize_t (*store)(struct kobject *a, struct attribute *b,
72636 const char *c, size_t count);
72637 };
72638+typedef struct global_attr __no_const global_attr_no_const;
72639
72640 #define define_one_global_ro(_name) \
72641 static struct global_attr _name = \
72642diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
72643index 8f04062..900239a 100644
72644--- a/include/linux/cpuidle.h
72645+++ b/include/linux/cpuidle.h
72646@@ -52,7 +52,8 @@ struct cpuidle_state {
72647 int index);
72648
72649 int (*enter_dead) (struct cpuidle_device *dev, int index);
72650-};
72651+} __do_const;
72652+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
72653
72654 /* Idle State Flags */
72655 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
72656@@ -191,7 +192,7 @@ struct cpuidle_governor {
72657 void (*reflect) (struct cpuidle_device *dev, int index);
72658
72659 struct module *owner;
72660-};
72661+} __do_const;
72662
72663 #ifdef CONFIG_CPU_IDLE
72664
72665diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
72666index d08e4d2..95fad61 100644
72667--- a/include/linux/cpumask.h
72668+++ b/include/linux/cpumask.h
72669@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
72670 }
72671
72672 /* Valid inputs for n are -1 and 0. */
72673-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72674+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
72675 {
72676 return n+1;
72677 }
72678
72679-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72680+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
72681 {
72682 return n+1;
72683 }
72684
72685-static inline unsigned int cpumask_next_and(int n,
72686+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
72687 const struct cpumask *srcp,
72688 const struct cpumask *andp)
72689 {
72690@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
72691 *
72692 * Returns >= nr_cpu_ids if no further cpus set.
72693 */
72694-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72695+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
72696 {
72697 /* -1 is a legal arg here. */
72698 if (n != -1)
72699@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72700 *
72701 * Returns >= nr_cpu_ids if no further cpus unset.
72702 */
72703-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72704+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
72705 {
72706 /* -1 is a legal arg here. */
72707 if (n != -1)
72708@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72709 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
72710 }
72711
72712-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
72713+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
72714 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
72715
72716 /**
72717diff --git a/include/linux/cred.h b/include/linux/cred.h
72718index 04421e8..6bce4ef 100644
72719--- a/include/linux/cred.h
72720+++ b/include/linux/cred.h
72721@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
72722 static inline void validate_process_creds(void)
72723 {
72724 }
72725+static inline void validate_task_creds(struct task_struct *task)
72726+{
72727+}
72728 #endif
72729
72730 /**
72731diff --git a/include/linux/crypto.h b/include/linux/crypto.h
72732index b92eadf..b4ecdc1 100644
72733--- a/include/linux/crypto.h
72734+++ b/include/linux/crypto.h
72735@@ -373,7 +373,7 @@ struct cipher_tfm {
72736 const u8 *key, unsigned int keylen);
72737 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
72738 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
72739-};
72740+} __no_const;
72741
72742 struct hash_tfm {
72743 int (*init)(struct hash_desc *desc);
72744@@ -394,13 +394,13 @@ struct compress_tfm {
72745 int (*cot_decompress)(struct crypto_tfm *tfm,
72746 const u8 *src, unsigned int slen,
72747 u8 *dst, unsigned int *dlen);
72748-};
72749+} __no_const;
72750
72751 struct rng_tfm {
72752 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
72753 unsigned int dlen);
72754 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
72755-};
72756+} __no_const;
72757
72758 #define crt_ablkcipher crt_u.ablkcipher
72759 #define crt_aead crt_u.aead
72760diff --git a/include/linux/ctype.h b/include/linux/ctype.h
72761index 653589e..4ef254a 100644
72762--- a/include/linux/ctype.h
72763+++ b/include/linux/ctype.h
72764@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
72765 * Fast implementation of tolower() for internal usage. Do not use in your
72766 * code.
72767 */
72768-static inline char _tolower(const char c)
72769+static inline unsigned char _tolower(const unsigned char c)
72770 {
72771 return c | 0x20;
72772 }
72773diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
72774index 7925bf0..d5143d2 100644
72775--- a/include/linux/decompress/mm.h
72776+++ b/include/linux/decompress/mm.h
72777@@ -77,7 +77,7 @@ static void free(void *where)
72778 * warnings when not needed (indeed large_malloc / large_free are not
72779 * needed by inflate */
72780
72781-#define malloc(a) kmalloc(a, GFP_KERNEL)
72782+#define malloc(a) kmalloc((a), GFP_KERNEL)
72783 #define free(a) kfree(a)
72784
72785 #define large_malloc(a) vmalloc(a)
72786diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
72787index fe8c447..bdc1f33 100644
72788--- a/include/linux/devfreq.h
72789+++ b/include/linux/devfreq.h
72790@@ -114,7 +114,7 @@ struct devfreq_governor {
72791 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
72792 int (*event_handler)(struct devfreq *devfreq,
72793 unsigned int event, void *data);
72794-};
72795+} __do_const;
72796
72797 /**
72798 * struct devfreq - Device devfreq structure
72799diff --git a/include/linux/device.h b/include/linux/device.h
72800index c0a1261..dba7569 100644
72801--- a/include/linux/device.h
72802+++ b/include/linux/device.h
72803@@ -290,7 +290,7 @@ struct subsys_interface {
72804 struct list_head node;
72805 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
72806 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
72807-};
72808+} __do_const;
72809
72810 int subsys_interface_register(struct subsys_interface *sif);
72811 void subsys_interface_unregister(struct subsys_interface *sif);
72812@@ -473,7 +473,7 @@ struct device_type {
72813 void (*release)(struct device *dev);
72814
72815 const struct dev_pm_ops *pm;
72816-};
72817+} __do_const;
72818
72819 /* interface for exporting device attributes */
72820 struct device_attribute {
72821@@ -483,11 +483,12 @@ struct device_attribute {
72822 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
72823 const char *buf, size_t count);
72824 };
72825+typedef struct device_attribute __no_const device_attribute_no_const;
72826
72827 struct dev_ext_attribute {
72828 struct device_attribute attr;
72829 void *var;
72830-};
72831+} __do_const;
72832
72833 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
72834 char *buf);
72835diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
72836index 94af418..b1ca7a2 100644
72837--- a/include/linux/dma-mapping.h
72838+++ b/include/linux/dma-mapping.h
72839@@ -54,7 +54,7 @@ struct dma_map_ops {
72840 u64 (*get_required_mask)(struct device *dev);
72841 #endif
72842 int is_phys;
72843-};
72844+} __do_const;
72845
72846 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
72847
72848diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
72849index 96d3e4a..dc36433 100644
72850--- a/include/linux/dmaengine.h
72851+++ b/include/linux/dmaengine.h
72852@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
72853 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
72854 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
72855
72856-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
72857+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
72858 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
72859-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
72860+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
72861 struct dma_pinned_list *pinned_list, struct page *page,
72862 unsigned int offset, size_t len);
72863
72864diff --git a/include/linux/efi.h b/include/linux/efi.h
72865index 2bc0ad7..3f7b006 100644
72866--- a/include/linux/efi.h
72867+++ b/include/linux/efi.h
72868@@ -745,6 +745,7 @@ struct efivar_operations {
72869 efi_set_variable_t *set_variable;
72870 efi_query_variable_store_t *query_variable_store;
72871 };
72872+typedef struct efivar_operations __no_const efivar_operations_no_const;
72873
72874 struct efivars {
72875 /*
72876diff --git a/include/linux/elf.h b/include/linux/elf.h
72877index 40a3c0e..4c45a38 100644
72878--- a/include/linux/elf.h
72879+++ b/include/linux/elf.h
72880@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
72881 #define elf_note elf32_note
72882 #define elf_addr_t Elf32_Off
72883 #define Elf_Half Elf32_Half
72884+#define elf_dyn Elf32_Dyn
72885
72886 #else
72887
72888@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
72889 #define elf_note elf64_note
72890 #define elf_addr_t Elf64_Off
72891 #define Elf_Half Elf64_Half
72892+#define elf_dyn Elf64_Dyn
72893
72894 #endif
72895
72896diff --git a/include/linux/err.h b/include/linux/err.h
72897index f2edce2..cc2082c 100644
72898--- a/include/linux/err.h
72899+++ b/include/linux/err.h
72900@@ -19,12 +19,12 @@
72901
72902 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
72903
72904-static inline void * __must_check ERR_PTR(long error)
72905+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
72906 {
72907 return (void *) error;
72908 }
72909
72910-static inline long __must_check PTR_ERR(const void *ptr)
72911+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
72912 {
72913 return (long) ptr;
72914 }
72915diff --git a/include/linux/extcon.h b/include/linux/extcon.h
72916index fcb51c8..bdafcf6 100644
72917--- a/include/linux/extcon.h
72918+++ b/include/linux/extcon.h
72919@@ -134,7 +134,7 @@ struct extcon_dev {
72920 /* /sys/class/extcon/.../mutually_exclusive/... */
72921 struct attribute_group attr_g_muex;
72922 struct attribute **attrs_muex;
72923- struct device_attribute *d_attrs_muex;
72924+ device_attribute_no_const *d_attrs_muex;
72925 };
72926
72927 /**
72928diff --git a/include/linux/fb.h b/include/linux/fb.h
72929index d49c60f..2834fbe 100644
72930--- a/include/linux/fb.h
72931+++ b/include/linux/fb.h
72932@@ -304,7 +304,7 @@ struct fb_ops {
72933 /* called at KDB enter and leave time to prepare the console */
72934 int (*fb_debug_enter)(struct fb_info *info);
72935 int (*fb_debug_leave)(struct fb_info *info);
72936-};
72937+} __do_const;
72938
72939 #ifdef CONFIG_FB_TILEBLITTING
72940 #define FB_TILE_CURSOR_NONE 0
72941diff --git a/include/linux/filter.h b/include/linux/filter.h
72942index f65f5a6..2f4f93a 100644
72943--- a/include/linux/filter.h
72944+++ b/include/linux/filter.h
72945@@ -20,6 +20,7 @@ struct compat_sock_fprog {
72946
72947 struct sk_buff;
72948 struct sock;
72949+struct bpf_jit_work;
72950
72951 struct sk_filter
72952 {
72953@@ -27,6 +28,9 @@ struct sk_filter
72954 unsigned int len; /* Number of filter blocks */
72955 unsigned int (*bpf_func)(const struct sk_buff *skb,
72956 const struct sock_filter *filter);
72957+#ifdef CONFIG_BPF_JIT
72958+ struct bpf_jit_work *work;
72959+#endif
72960 struct rcu_head rcu;
72961 struct sock_filter insns[0];
72962 };
72963diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
72964index 8293262..2b3b8bd 100644
72965--- a/include/linux/frontswap.h
72966+++ b/include/linux/frontswap.h
72967@@ -11,7 +11,7 @@ struct frontswap_ops {
72968 int (*load)(unsigned, pgoff_t, struct page *);
72969 void (*invalidate_page)(unsigned, pgoff_t);
72970 void (*invalidate_area)(unsigned);
72971-};
72972+} __no_const;
72973
72974 extern bool frontswap_enabled;
72975 extern struct frontswap_ops *
72976diff --git a/include/linux/fs.h b/include/linux/fs.h
72977index 65c2be2..4c53f6e 100644
72978--- a/include/linux/fs.h
72979+++ b/include/linux/fs.h
72980@@ -1543,7 +1543,8 @@ struct file_operations {
72981 long (*fallocate)(struct file *file, int mode, loff_t offset,
72982 loff_t len);
72983 int (*show_fdinfo)(struct seq_file *m, struct file *f);
72984-};
72985+} __do_const;
72986+typedef struct file_operations __no_const file_operations_no_const;
72987
72988 struct inode_operations {
72989 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
72990@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
72991 inode->i_flags |= S_NOSEC;
72992 }
72993
72994+static inline bool is_sidechannel_device(const struct inode *inode)
72995+{
72996+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
72997+ umode_t mode = inode->i_mode;
72998+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
72999+#else
73000+ return false;
73001+#endif
73002+}
73003+
73004 #endif /* _LINUX_FS_H */
73005diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
73006index 2b93a9a..855d94a 100644
73007--- a/include/linux/fs_struct.h
73008+++ b/include/linux/fs_struct.h
73009@@ -6,7 +6,7 @@
73010 #include <linux/seqlock.h>
73011
73012 struct fs_struct {
73013- int users;
73014+ atomic_t users;
73015 spinlock_t lock;
73016 seqcount_t seq;
73017 int umask;
73018diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
73019index 5dfa0aa..6acf322 100644
73020--- a/include/linux/fscache-cache.h
73021+++ b/include/linux/fscache-cache.h
73022@@ -112,7 +112,7 @@ struct fscache_operation {
73023 fscache_operation_release_t release;
73024 };
73025
73026-extern atomic_t fscache_op_debug_id;
73027+extern atomic_unchecked_t fscache_op_debug_id;
73028 extern void fscache_op_work_func(struct work_struct *work);
73029
73030 extern void fscache_enqueue_operation(struct fscache_operation *);
73031@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
73032 INIT_WORK(&op->work, fscache_op_work_func);
73033 atomic_set(&op->usage, 1);
73034 op->state = FSCACHE_OP_ST_INITIALISED;
73035- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
73036+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
73037 op->processor = processor;
73038 op->release = release;
73039 INIT_LIST_HEAD(&op->pend_link);
73040diff --git a/include/linux/fscache.h b/include/linux/fscache.h
73041index 7a08623..4c07b0f 100644
73042--- a/include/linux/fscache.h
73043+++ b/include/linux/fscache.h
73044@@ -152,7 +152,7 @@ struct fscache_cookie_def {
73045 * - this is mandatory for any object that may have data
73046 */
73047 void (*now_uncached)(void *cookie_netfs_data);
73048-};
73049+} __do_const;
73050
73051 /*
73052 * fscache cached network filesystem type
73053diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
73054index a78680a..87bd73e 100644
73055--- a/include/linux/fsnotify.h
73056+++ b/include/linux/fsnotify.h
73057@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
73058 struct inode *inode = path->dentry->d_inode;
73059 __u32 mask = FS_ACCESS;
73060
73061+ if (is_sidechannel_device(inode))
73062+ return;
73063+
73064 if (S_ISDIR(inode->i_mode))
73065 mask |= FS_ISDIR;
73066
73067@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
73068 struct inode *inode = path->dentry->d_inode;
73069 __u32 mask = FS_MODIFY;
73070
73071+ if (is_sidechannel_device(inode))
73072+ return;
73073+
73074 if (S_ISDIR(inode->i_mode))
73075 mask |= FS_ISDIR;
73076
73077@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
73078 */
73079 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
73080 {
73081- return kstrdup(name, GFP_KERNEL);
73082+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
73083 }
73084
73085 /*
73086diff --git a/include/linux/genhd.h b/include/linux/genhd.h
73087index 9f3c275..911b591 100644
73088--- a/include/linux/genhd.h
73089+++ b/include/linux/genhd.h
73090@@ -194,7 +194,7 @@ struct gendisk {
73091 struct kobject *slave_dir;
73092
73093 struct timer_rand_state *random;
73094- atomic_t sync_io; /* RAID */
73095+ atomic_unchecked_t sync_io; /* RAID */
73096 struct disk_events *ev;
73097 #ifdef CONFIG_BLK_DEV_INTEGRITY
73098 struct blk_integrity *integrity;
73099diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
73100index 023bc34..b02b46a 100644
73101--- a/include/linux/genl_magic_func.h
73102+++ b/include/linux/genl_magic_func.h
73103@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
73104 },
73105
73106 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
73107-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
73108+static struct genl_ops ZZZ_genl_ops[] = {
73109 #include GENL_MAGIC_INCLUDE_FILE
73110 };
73111
73112diff --git a/include/linux/gfp.h b/include/linux/gfp.h
73113index 0f615eb..5c3832f 100644
73114--- a/include/linux/gfp.h
73115+++ b/include/linux/gfp.h
73116@@ -35,6 +35,13 @@ struct vm_area_struct;
73117 #define ___GFP_NO_KSWAPD 0x400000u
73118 #define ___GFP_OTHER_NODE 0x800000u
73119 #define ___GFP_WRITE 0x1000000u
73120+
73121+#ifdef CONFIG_PAX_USERCOPY_SLABS
73122+#define ___GFP_USERCOPY 0x2000000u
73123+#else
73124+#define ___GFP_USERCOPY 0
73125+#endif
73126+
73127 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
73128
73129 /*
73130@@ -92,6 +99,7 @@ struct vm_area_struct;
73131 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
73132 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
73133 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
73134+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
73135
73136 /*
73137 * This may seem redundant, but it's a way of annotating false positives vs.
73138@@ -99,7 +107,7 @@ struct vm_area_struct;
73139 */
73140 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
73141
73142-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
73143+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
73144 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
73145
73146 /* This equals 0, but use constants in case they ever change */
73147@@ -153,6 +161,8 @@ struct vm_area_struct;
73148 /* 4GB DMA on some platforms */
73149 #define GFP_DMA32 __GFP_DMA32
73150
73151+#define GFP_USERCOPY __GFP_USERCOPY
73152+
73153 /* Convert GFP flags to their corresponding migrate type */
73154 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
73155 {
73156diff --git a/include/linux/gracl.h b/include/linux/gracl.h
73157new file mode 100644
73158index 0000000..ebe6d72
73159--- /dev/null
73160+++ b/include/linux/gracl.h
73161@@ -0,0 +1,319 @@
73162+#ifndef GR_ACL_H
73163+#define GR_ACL_H
73164+
73165+#include <linux/grdefs.h>
73166+#include <linux/resource.h>
73167+#include <linux/capability.h>
73168+#include <linux/dcache.h>
73169+#include <asm/resource.h>
73170+
73171+/* Major status information */
73172+
73173+#define GR_VERSION "grsecurity 2.9.1"
73174+#define GRSECURITY_VERSION 0x2901
73175+
73176+enum {
73177+ GR_SHUTDOWN = 0,
73178+ GR_ENABLE = 1,
73179+ GR_SPROLE = 2,
73180+ GR_RELOAD = 3,
73181+ GR_SEGVMOD = 4,
73182+ GR_STATUS = 5,
73183+ GR_UNSPROLE = 6,
73184+ GR_PASSSET = 7,
73185+ GR_SPROLEPAM = 8,
73186+};
73187+
73188+/* Password setup definitions
73189+ * kernel/grhash.c */
73190+enum {
73191+ GR_PW_LEN = 128,
73192+ GR_SALT_LEN = 16,
73193+ GR_SHA_LEN = 32,
73194+};
73195+
73196+enum {
73197+ GR_SPROLE_LEN = 64,
73198+};
73199+
73200+enum {
73201+ GR_NO_GLOB = 0,
73202+ GR_REG_GLOB,
73203+ GR_CREATE_GLOB
73204+};
73205+
73206+#define GR_NLIMITS 32
73207+
73208+/* Begin Data Structures */
73209+
73210+struct sprole_pw {
73211+ unsigned char *rolename;
73212+ unsigned char salt[GR_SALT_LEN];
73213+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
73214+};
73215+
73216+struct name_entry {
73217+ __u32 key;
73218+ ino_t inode;
73219+ dev_t device;
73220+ char *name;
73221+ __u16 len;
73222+ __u8 deleted;
73223+ struct name_entry *prev;
73224+ struct name_entry *next;
73225+};
73226+
73227+struct inodev_entry {
73228+ struct name_entry *nentry;
73229+ struct inodev_entry *prev;
73230+ struct inodev_entry *next;
73231+};
73232+
73233+struct acl_role_db {
73234+ struct acl_role_label **r_hash;
73235+ __u32 r_size;
73236+};
73237+
73238+struct inodev_db {
73239+ struct inodev_entry **i_hash;
73240+ __u32 i_size;
73241+};
73242+
73243+struct name_db {
73244+ struct name_entry **n_hash;
73245+ __u32 n_size;
73246+};
73247+
73248+struct crash_uid {
73249+ uid_t uid;
73250+ unsigned long expires;
73251+};
73252+
73253+struct gr_hash_struct {
73254+ void **table;
73255+ void **nametable;
73256+ void *first;
73257+ __u32 table_size;
73258+ __u32 used_size;
73259+ int type;
73260+};
73261+
73262+/* Userspace Grsecurity ACL data structures */
73263+
73264+struct acl_subject_label {
73265+ char *filename;
73266+ ino_t inode;
73267+ dev_t device;
73268+ __u32 mode;
73269+ kernel_cap_t cap_mask;
73270+ kernel_cap_t cap_lower;
73271+ kernel_cap_t cap_invert_audit;
73272+
73273+ struct rlimit res[GR_NLIMITS];
73274+ __u32 resmask;
73275+
73276+ __u8 user_trans_type;
73277+ __u8 group_trans_type;
73278+ uid_t *user_transitions;
73279+ gid_t *group_transitions;
73280+ __u16 user_trans_num;
73281+ __u16 group_trans_num;
73282+
73283+ __u32 sock_families[2];
73284+ __u32 ip_proto[8];
73285+ __u32 ip_type;
73286+ struct acl_ip_label **ips;
73287+ __u32 ip_num;
73288+ __u32 inaddr_any_override;
73289+
73290+ __u32 crashes;
73291+ unsigned long expires;
73292+
73293+ struct acl_subject_label *parent_subject;
73294+ struct gr_hash_struct *hash;
73295+ struct acl_subject_label *prev;
73296+ struct acl_subject_label *next;
73297+
73298+ struct acl_object_label **obj_hash;
73299+ __u32 obj_hash_size;
73300+ __u16 pax_flags;
73301+};
73302+
73303+struct role_allowed_ip {
73304+ __u32 addr;
73305+ __u32 netmask;
73306+
73307+ struct role_allowed_ip *prev;
73308+ struct role_allowed_ip *next;
73309+};
73310+
73311+struct role_transition {
73312+ char *rolename;
73313+
73314+ struct role_transition *prev;
73315+ struct role_transition *next;
73316+};
73317+
73318+struct acl_role_label {
73319+ char *rolename;
73320+ uid_t uidgid;
73321+ __u16 roletype;
73322+
73323+ __u16 auth_attempts;
73324+ unsigned long expires;
73325+
73326+ struct acl_subject_label *root_label;
73327+ struct gr_hash_struct *hash;
73328+
73329+ struct acl_role_label *prev;
73330+ struct acl_role_label *next;
73331+
73332+ struct role_transition *transitions;
73333+ struct role_allowed_ip *allowed_ips;
73334+ uid_t *domain_children;
73335+ __u16 domain_child_num;
73336+
73337+ umode_t umask;
73338+
73339+ struct acl_subject_label **subj_hash;
73340+ __u32 subj_hash_size;
73341+};
73342+
73343+struct user_acl_role_db {
73344+ struct acl_role_label **r_table;
73345+ __u32 num_pointers; /* Number of allocations to track */
73346+ __u32 num_roles; /* Number of roles */
73347+ __u32 num_domain_children; /* Number of domain children */
73348+ __u32 num_subjects; /* Number of subjects */
73349+ __u32 num_objects; /* Number of objects */
73350+};
73351+
73352+struct acl_object_label {
73353+ char *filename;
73354+ ino_t inode;
73355+ dev_t device;
73356+ __u32 mode;
73357+
73358+ struct acl_subject_label *nested;
73359+ struct acl_object_label *globbed;
73360+
73361+ /* next two structures not used */
73362+
73363+ struct acl_object_label *prev;
73364+ struct acl_object_label *next;
73365+};
73366+
73367+struct acl_ip_label {
73368+ char *iface;
73369+ __u32 addr;
73370+ __u32 netmask;
73371+ __u16 low, high;
73372+ __u8 mode;
73373+ __u32 type;
73374+ __u32 proto[8];
73375+
73376+ /* next two structures not used */
73377+
73378+ struct acl_ip_label *prev;
73379+ struct acl_ip_label *next;
73380+};
73381+
73382+struct gr_arg {
73383+ struct user_acl_role_db role_db;
73384+ unsigned char pw[GR_PW_LEN];
73385+ unsigned char salt[GR_SALT_LEN];
73386+ unsigned char sum[GR_SHA_LEN];
73387+ unsigned char sp_role[GR_SPROLE_LEN];
73388+ struct sprole_pw *sprole_pws;
73389+ dev_t segv_device;
73390+ ino_t segv_inode;
73391+ uid_t segv_uid;
73392+ __u16 num_sprole_pws;
73393+ __u16 mode;
73394+};
73395+
73396+struct gr_arg_wrapper {
73397+ struct gr_arg *arg;
73398+ __u32 version;
73399+ __u32 size;
73400+};
73401+
73402+struct subject_map {
73403+ struct acl_subject_label *user;
73404+ struct acl_subject_label *kernel;
73405+ struct subject_map *prev;
73406+ struct subject_map *next;
73407+};
73408+
73409+struct acl_subj_map_db {
73410+ struct subject_map **s_hash;
73411+ __u32 s_size;
73412+};
73413+
73414+/* End Data Structures Section */
73415+
73416+/* Hash functions generated by empirical testing by Brad Spengler
73417+ Makes good use of the low bits of the inode. Generally 0-1 times
73418+ in loop for successful match. 0-3 for unsuccessful match.
73419+ Shift/add algorithm with modulus of table size and an XOR*/
73420+
73421+static __inline__ unsigned int
73422+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
73423+{
73424+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
73425+}
73426+
73427+ static __inline__ unsigned int
73428+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
73429+{
73430+ return ((const unsigned long)userp % sz);
73431+}
73432+
73433+static __inline__ unsigned int
73434+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
73435+{
73436+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
73437+}
73438+
73439+static __inline__ unsigned int
73440+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
73441+{
73442+ return full_name_hash((const unsigned char *)name, len) % sz;
73443+}
73444+
73445+#define FOR_EACH_ROLE_START(role) \
73446+ role = role_list; \
73447+ while (role) {
73448+
73449+#define FOR_EACH_ROLE_END(role) \
73450+ role = role->prev; \
73451+ }
73452+
73453+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
73454+ subj = NULL; \
73455+ iter = 0; \
73456+ while (iter < role->subj_hash_size) { \
73457+ if (subj == NULL) \
73458+ subj = role->subj_hash[iter]; \
73459+ if (subj == NULL) { \
73460+ iter++; \
73461+ continue; \
73462+ }
73463+
73464+#define FOR_EACH_SUBJECT_END(subj,iter) \
73465+ subj = subj->next; \
73466+ if (subj == NULL) \
73467+ iter++; \
73468+ }
73469+
73470+
73471+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
73472+ subj = role->hash->first; \
73473+ while (subj != NULL) {
73474+
73475+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
73476+ subj = subj->next; \
73477+ }
73478+
73479+#endif
73480+
73481diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
73482new file mode 100644
73483index 0000000..33ebd1f
73484--- /dev/null
73485+++ b/include/linux/gracl_compat.h
73486@@ -0,0 +1,156 @@
73487+#ifndef GR_ACL_COMPAT_H
73488+#define GR_ACL_COMPAT_H
73489+
73490+#include <linux/resource.h>
73491+#include <asm/resource.h>
73492+
73493+struct sprole_pw_compat {
73494+ compat_uptr_t rolename;
73495+ unsigned char salt[GR_SALT_LEN];
73496+ unsigned char sum[GR_SHA_LEN];
73497+};
73498+
73499+struct gr_hash_struct_compat {
73500+ compat_uptr_t table;
73501+ compat_uptr_t nametable;
73502+ compat_uptr_t first;
73503+ __u32 table_size;
73504+ __u32 used_size;
73505+ int type;
73506+};
73507+
73508+struct acl_subject_label_compat {
73509+ compat_uptr_t filename;
73510+ compat_ino_t inode;
73511+ __u32 device;
73512+ __u32 mode;
73513+ kernel_cap_t cap_mask;
73514+ kernel_cap_t cap_lower;
73515+ kernel_cap_t cap_invert_audit;
73516+
73517+ struct compat_rlimit res[GR_NLIMITS];
73518+ __u32 resmask;
73519+
73520+ __u8 user_trans_type;
73521+ __u8 group_trans_type;
73522+ compat_uptr_t user_transitions;
73523+ compat_uptr_t group_transitions;
73524+ __u16 user_trans_num;
73525+ __u16 group_trans_num;
73526+
73527+ __u32 sock_families[2];
73528+ __u32 ip_proto[8];
73529+ __u32 ip_type;
73530+ compat_uptr_t ips;
73531+ __u32 ip_num;
73532+ __u32 inaddr_any_override;
73533+
73534+ __u32 crashes;
73535+ compat_ulong_t expires;
73536+
73537+ compat_uptr_t parent_subject;
73538+ compat_uptr_t hash;
73539+ compat_uptr_t prev;
73540+ compat_uptr_t next;
73541+
73542+ compat_uptr_t obj_hash;
73543+ __u32 obj_hash_size;
73544+ __u16 pax_flags;
73545+};
73546+
73547+struct role_allowed_ip_compat {
73548+ __u32 addr;
73549+ __u32 netmask;
73550+
73551+ compat_uptr_t prev;
73552+ compat_uptr_t next;
73553+};
73554+
73555+struct role_transition_compat {
73556+ compat_uptr_t rolename;
73557+
73558+ compat_uptr_t prev;
73559+ compat_uptr_t next;
73560+};
73561+
73562+struct acl_role_label_compat {
73563+ compat_uptr_t rolename;
73564+ uid_t uidgid;
73565+ __u16 roletype;
73566+
73567+ __u16 auth_attempts;
73568+ compat_ulong_t expires;
73569+
73570+ compat_uptr_t root_label;
73571+ compat_uptr_t hash;
73572+
73573+ compat_uptr_t prev;
73574+ compat_uptr_t next;
73575+
73576+ compat_uptr_t transitions;
73577+ compat_uptr_t allowed_ips;
73578+ compat_uptr_t domain_children;
73579+ __u16 domain_child_num;
73580+
73581+ umode_t umask;
73582+
73583+ compat_uptr_t subj_hash;
73584+ __u32 subj_hash_size;
73585+};
73586+
73587+struct user_acl_role_db_compat {
73588+ compat_uptr_t r_table;
73589+ __u32 num_pointers;
73590+ __u32 num_roles;
73591+ __u32 num_domain_children;
73592+ __u32 num_subjects;
73593+ __u32 num_objects;
73594+};
73595+
73596+struct acl_object_label_compat {
73597+ compat_uptr_t filename;
73598+ compat_ino_t inode;
73599+ __u32 device;
73600+ __u32 mode;
73601+
73602+ compat_uptr_t nested;
73603+ compat_uptr_t globbed;
73604+
73605+ compat_uptr_t prev;
73606+ compat_uptr_t next;
73607+};
73608+
73609+struct acl_ip_label_compat {
73610+ compat_uptr_t iface;
73611+ __u32 addr;
73612+ __u32 netmask;
73613+ __u16 low, high;
73614+ __u8 mode;
73615+ __u32 type;
73616+ __u32 proto[8];
73617+
73618+ compat_uptr_t prev;
73619+ compat_uptr_t next;
73620+};
73621+
73622+struct gr_arg_compat {
73623+ struct user_acl_role_db_compat role_db;
73624+ unsigned char pw[GR_PW_LEN];
73625+ unsigned char salt[GR_SALT_LEN];
73626+ unsigned char sum[GR_SHA_LEN];
73627+ unsigned char sp_role[GR_SPROLE_LEN];
73628+ compat_uptr_t sprole_pws;
73629+ __u32 segv_device;
73630+ compat_ino_t segv_inode;
73631+ uid_t segv_uid;
73632+ __u16 num_sprole_pws;
73633+ __u16 mode;
73634+};
73635+
73636+struct gr_arg_wrapper_compat {
73637+ compat_uptr_t arg;
73638+ __u32 version;
73639+ __u32 size;
73640+};
73641+
73642+#endif
73643diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
73644new file mode 100644
73645index 0000000..323ecf2
73646--- /dev/null
73647+++ b/include/linux/gralloc.h
73648@@ -0,0 +1,9 @@
73649+#ifndef __GRALLOC_H
73650+#define __GRALLOC_H
73651+
73652+void acl_free_all(void);
73653+int acl_alloc_stack_init(unsigned long size);
73654+void *acl_alloc(unsigned long len);
73655+void *acl_alloc_num(unsigned long num, unsigned long len);
73656+
73657+#endif
73658diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
73659new file mode 100644
73660index 0000000..be66033
73661--- /dev/null
73662+++ b/include/linux/grdefs.h
73663@@ -0,0 +1,140 @@
73664+#ifndef GRDEFS_H
73665+#define GRDEFS_H
73666+
73667+/* Begin grsecurity status declarations */
73668+
73669+enum {
73670+ GR_READY = 0x01,
73671+ GR_STATUS_INIT = 0x00 // disabled state
73672+};
73673+
73674+/* Begin ACL declarations */
73675+
73676+/* Role flags */
73677+
73678+enum {
73679+ GR_ROLE_USER = 0x0001,
73680+ GR_ROLE_GROUP = 0x0002,
73681+ GR_ROLE_DEFAULT = 0x0004,
73682+ GR_ROLE_SPECIAL = 0x0008,
73683+ GR_ROLE_AUTH = 0x0010,
73684+ GR_ROLE_NOPW = 0x0020,
73685+ GR_ROLE_GOD = 0x0040,
73686+ GR_ROLE_LEARN = 0x0080,
73687+ GR_ROLE_TPE = 0x0100,
73688+ GR_ROLE_DOMAIN = 0x0200,
73689+ GR_ROLE_PAM = 0x0400,
73690+ GR_ROLE_PERSIST = 0x0800
73691+};
73692+
73693+/* ACL Subject and Object mode flags */
73694+enum {
73695+ GR_DELETED = 0x80000000
73696+};
73697+
73698+/* ACL Object-only mode flags */
73699+enum {
73700+ GR_READ = 0x00000001,
73701+ GR_APPEND = 0x00000002,
73702+ GR_WRITE = 0x00000004,
73703+ GR_EXEC = 0x00000008,
73704+ GR_FIND = 0x00000010,
73705+ GR_INHERIT = 0x00000020,
73706+ GR_SETID = 0x00000040,
73707+ GR_CREATE = 0x00000080,
73708+ GR_DELETE = 0x00000100,
73709+ GR_LINK = 0x00000200,
73710+ GR_AUDIT_READ = 0x00000400,
73711+ GR_AUDIT_APPEND = 0x00000800,
73712+ GR_AUDIT_WRITE = 0x00001000,
73713+ GR_AUDIT_EXEC = 0x00002000,
73714+ GR_AUDIT_FIND = 0x00004000,
73715+ GR_AUDIT_INHERIT= 0x00008000,
73716+ GR_AUDIT_SETID = 0x00010000,
73717+ GR_AUDIT_CREATE = 0x00020000,
73718+ GR_AUDIT_DELETE = 0x00040000,
73719+ GR_AUDIT_LINK = 0x00080000,
73720+ GR_PTRACERD = 0x00100000,
73721+ GR_NOPTRACE = 0x00200000,
73722+ GR_SUPPRESS = 0x00400000,
73723+ GR_NOLEARN = 0x00800000,
73724+ GR_INIT_TRANSFER= 0x01000000
73725+};
73726+
73727+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
73728+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
73729+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
73730+
73731+/* ACL subject-only mode flags */
73732+enum {
73733+ GR_KILL = 0x00000001,
73734+ GR_VIEW = 0x00000002,
73735+ GR_PROTECTED = 0x00000004,
73736+ GR_LEARN = 0x00000008,
73737+ GR_OVERRIDE = 0x00000010,
73738+ /* just a placeholder, this mode is only used in userspace */
73739+ GR_DUMMY = 0x00000020,
73740+ GR_PROTSHM = 0x00000040,
73741+ GR_KILLPROC = 0x00000080,
73742+ GR_KILLIPPROC = 0x00000100,
73743+ /* just a placeholder, this mode is only used in userspace */
73744+ GR_NOTROJAN = 0x00000200,
73745+ GR_PROTPROCFD = 0x00000400,
73746+ GR_PROCACCT = 0x00000800,
73747+ GR_RELAXPTRACE = 0x00001000,
73748+ //GR_NESTED = 0x00002000,
73749+ GR_INHERITLEARN = 0x00004000,
73750+ GR_PROCFIND = 0x00008000,
73751+ GR_POVERRIDE = 0x00010000,
73752+ GR_KERNELAUTH = 0x00020000,
73753+ GR_ATSECURE = 0x00040000,
73754+ GR_SHMEXEC = 0x00080000
73755+};
73756+
73757+enum {
73758+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
73759+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
73760+ GR_PAX_ENABLE_MPROTECT = 0x0004,
73761+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
73762+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
73763+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
73764+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
73765+ GR_PAX_DISABLE_MPROTECT = 0x0400,
73766+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
73767+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
73768+};
73769+
73770+enum {
73771+ GR_ID_USER = 0x01,
73772+ GR_ID_GROUP = 0x02,
73773+};
73774+
73775+enum {
73776+ GR_ID_ALLOW = 0x01,
73777+ GR_ID_DENY = 0x02,
73778+};
73779+
73780+#define GR_CRASH_RES 31
73781+#define GR_UIDTABLE_MAX 500
73782+
73783+/* begin resource learning section */
73784+enum {
73785+ GR_RLIM_CPU_BUMP = 60,
73786+ GR_RLIM_FSIZE_BUMP = 50000,
73787+ GR_RLIM_DATA_BUMP = 10000,
73788+ GR_RLIM_STACK_BUMP = 1000,
73789+ GR_RLIM_CORE_BUMP = 10000,
73790+ GR_RLIM_RSS_BUMP = 500000,
73791+ GR_RLIM_NPROC_BUMP = 1,
73792+ GR_RLIM_NOFILE_BUMP = 5,
73793+ GR_RLIM_MEMLOCK_BUMP = 50000,
73794+ GR_RLIM_AS_BUMP = 500000,
73795+ GR_RLIM_LOCKS_BUMP = 2,
73796+ GR_RLIM_SIGPENDING_BUMP = 5,
73797+ GR_RLIM_MSGQUEUE_BUMP = 10000,
73798+ GR_RLIM_NICE_BUMP = 1,
73799+ GR_RLIM_RTPRIO_BUMP = 1,
73800+ GR_RLIM_RTTIME_BUMP = 1000000
73801+};
73802+
73803+#endif
73804diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
73805new file mode 100644
73806index 0000000..e337683
73807--- /dev/null
73808+++ b/include/linux/grinternal.h
73809@@ -0,0 +1,229 @@
73810+#ifndef __GRINTERNAL_H
73811+#define __GRINTERNAL_H
73812+
73813+#ifdef CONFIG_GRKERNSEC
73814+
73815+#include <linux/fs.h>
73816+#include <linux/mnt_namespace.h>
73817+#include <linux/nsproxy.h>
73818+#include <linux/gracl.h>
73819+#include <linux/grdefs.h>
73820+#include <linux/grmsg.h>
73821+
73822+void gr_add_learn_entry(const char *fmt, ...)
73823+ __attribute__ ((format (printf, 1, 2)));
73824+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
73825+ const struct vfsmount *mnt);
73826+__u32 gr_check_create(const struct dentry *new_dentry,
73827+ const struct dentry *parent,
73828+ const struct vfsmount *mnt, const __u32 mode);
73829+int gr_check_protected_task(const struct task_struct *task);
73830+__u32 to_gr_audit(const __u32 reqmode);
73831+int gr_set_acls(const int type);
73832+int gr_apply_subject_to_task(struct task_struct *task);
73833+int gr_acl_is_enabled(void);
73834+char gr_roletype_to_char(void);
73835+
73836+void gr_handle_alertkill(struct task_struct *task);
73837+char *gr_to_filename(const struct dentry *dentry,
73838+ const struct vfsmount *mnt);
73839+char *gr_to_filename1(const struct dentry *dentry,
73840+ const struct vfsmount *mnt);
73841+char *gr_to_filename2(const struct dentry *dentry,
73842+ const struct vfsmount *mnt);
73843+char *gr_to_filename3(const struct dentry *dentry,
73844+ const struct vfsmount *mnt);
73845+
73846+extern int grsec_enable_ptrace_readexec;
73847+extern int grsec_enable_harden_ptrace;
73848+extern int grsec_enable_link;
73849+extern int grsec_enable_fifo;
73850+extern int grsec_enable_execve;
73851+extern int grsec_enable_shm;
73852+extern int grsec_enable_execlog;
73853+extern int grsec_enable_signal;
73854+extern int grsec_enable_audit_ptrace;
73855+extern int grsec_enable_forkfail;
73856+extern int grsec_enable_time;
73857+extern int grsec_enable_rofs;
73858+extern int grsec_deny_new_usb;
73859+extern int grsec_enable_chroot_shmat;
73860+extern int grsec_enable_chroot_mount;
73861+extern int grsec_enable_chroot_double;
73862+extern int grsec_enable_chroot_pivot;
73863+extern int grsec_enable_chroot_chdir;
73864+extern int grsec_enable_chroot_chmod;
73865+extern int grsec_enable_chroot_mknod;
73866+extern int grsec_enable_chroot_fchdir;
73867+extern int grsec_enable_chroot_nice;
73868+extern int grsec_enable_chroot_execlog;
73869+extern int grsec_enable_chroot_caps;
73870+extern int grsec_enable_chroot_sysctl;
73871+extern int grsec_enable_chroot_unix;
73872+extern int grsec_enable_symlinkown;
73873+extern kgid_t grsec_symlinkown_gid;
73874+extern int grsec_enable_tpe;
73875+extern kgid_t grsec_tpe_gid;
73876+extern int grsec_enable_tpe_all;
73877+extern int grsec_enable_tpe_invert;
73878+extern int grsec_enable_socket_all;
73879+extern kgid_t grsec_socket_all_gid;
73880+extern int grsec_enable_socket_client;
73881+extern kgid_t grsec_socket_client_gid;
73882+extern int grsec_enable_socket_server;
73883+extern kgid_t grsec_socket_server_gid;
73884+extern kgid_t grsec_audit_gid;
73885+extern int grsec_enable_group;
73886+extern int grsec_enable_log_rwxmaps;
73887+extern int grsec_enable_mount;
73888+extern int grsec_enable_chdir;
73889+extern int grsec_resource_logging;
73890+extern int grsec_enable_blackhole;
73891+extern int grsec_lastack_retries;
73892+extern int grsec_enable_brute;
73893+extern int grsec_lock;
73894+
73895+extern spinlock_t grsec_alert_lock;
73896+extern unsigned long grsec_alert_wtime;
73897+extern unsigned long grsec_alert_fyet;
73898+
73899+extern spinlock_t grsec_audit_lock;
73900+
73901+extern rwlock_t grsec_exec_file_lock;
73902+
73903+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
73904+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
73905+ (tsk)->exec_file->f_path.mnt) : "/")
73906+
73907+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
73908+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
73909+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
73910+
73911+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
73912+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
73913+ (tsk)->exec_file->f_path.mnt) : "/")
73914+
73915+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
73916+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
73917+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
73918+
73919+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
73920+
73921+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
73922+
73923+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
73924+{
73925+ if (file1 && file2) {
73926+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
73927+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
73928+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
73929+ return true;
73930+ }
73931+
73932+ return false;
73933+}
73934+
73935+#define GR_CHROOT_CAPS {{ \
73936+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
73937+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
73938+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
73939+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
73940+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
73941+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
73942+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
73943+
73944+#define security_learn(normal_msg,args...) \
73945+({ \
73946+ read_lock(&grsec_exec_file_lock); \
73947+ gr_add_learn_entry(normal_msg "\n", ## args); \
73948+ read_unlock(&grsec_exec_file_lock); \
73949+})
73950+
73951+enum {
73952+ GR_DO_AUDIT,
73953+ GR_DONT_AUDIT,
73954+ /* used for non-audit messages that we shouldn't kill the task on */
73955+ GR_DONT_AUDIT_GOOD
73956+};
73957+
73958+enum {
73959+ GR_TTYSNIFF,
73960+ GR_RBAC,
73961+ GR_RBAC_STR,
73962+ GR_STR_RBAC,
73963+ GR_RBAC_MODE2,
73964+ GR_RBAC_MODE3,
73965+ GR_FILENAME,
73966+ GR_SYSCTL_HIDDEN,
73967+ GR_NOARGS,
73968+ GR_ONE_INT,
73969+ GR_ONE_INT_TWO_STR,
73970+ GR_ONE_STR,
73971+ GR_STR_INT,
73972+ GR_TWO_STR_INT,
73973+ GR_TWO_INT,
73974+ GR_TWO_U64,
73975+ GR_THREE_INT,
73976+ GR_FIVE_INT_TWO_STR,
73977+ GR_TWO_STR,
73978+ GR_THREE_STR,
73979+ GR_FOUR_STR,
73980+ GR_STR_FILENAME,
73981+ GR_FILENAME_STR,
73982+ GR_FILENAME_TWO_INT,
73983+ GR_FILENAME_TWO_INT_STR,
73984+ GR_TEXTREL,
73985+ GR_PTRACE,
73986+ GR_RESOURCE,
73987+ GR_CAP,
73988+ GR_SIG,
73989+ GR_SIG2,
73990+ GR_CRASH1,
73991+ GR_CRASH2,
73992+ GR_PSACCT,
73993+ GR_RWXMAP,
73994+ GR_RWXMAPVMA
73995+};
73996+
73997+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
73998+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
73999+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
74000+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
74001+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
74002+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
74003+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
74004+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
74005+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
74006+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
74007+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
74008+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
74009+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
74010+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
74011+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
74012+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
74013+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
74014+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
74015+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
74016+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
74017+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
74018+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
74019+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
74020+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
74021+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
74022+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
74023+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
74024+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
74025+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
74026+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
74027+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
74028+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
74029+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
74030+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
74031+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
74032+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
74033+
74034+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
74035+
74036+#endif
74037+
74038+#endif
74039diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
74040new file mode 100644
74041index 0000000..a4396b5
74042--- /dev/null
74043+++ b/include/linux/grmsg.h
74044@@ -0,0 +1,113 @@
74045+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
74046+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
74047+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
74048+#define GR_STOPMOD_MSG "denied modification of module state by "
74049+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
74050+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
74051+#define GR_IOPERM_MSG "denied use of ioperm() by "
74052+#define GR_IOPL_MSG "denied use of iopl() by "
74053+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
74054+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
74055+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
74056+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
74057+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
74058+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
74059+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
74060+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
74061+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
74062+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
74063+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
74064+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
74065+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
74066+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
74067+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
74068+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
74069+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
74070+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
74071+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
74072+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
74073+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
74074+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
74075+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
74076+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
74077+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
74078+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
74079+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
74080+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
74081+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
74082+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
74083+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
74084+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
74085+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
74086+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
74087+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
74088+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
74089+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
74090+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
74091+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
74092+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
74093+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
74094+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
74095+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
74096+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
74097+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
74098+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
74099+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
74100+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
74101+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
74102+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
74103+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
74104+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
74105+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
74106+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
74107+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
74108+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
74109+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
74110+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
74111+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
74112+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
74113+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
74114+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
74115+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
74116+#define GR_FAILFORK_MSG "failed fork with errno %s by "
74117+#define GR_NICE_CHROOT_MSG "denied priority change by "
74118+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
74119+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
74120+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
74121+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
74122+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
74123+#define GR_TIME_MSG "time set by "
74124+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
74125+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
74126+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
74127+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
74128+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
74129+#define GR_BIND_MSG "denied bind() by "
74130+#define GR_CONNECT_MSG "denied connect() by "
74131+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
74132+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
74133+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
74134+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
74135+#define GR_CAP_ACL_MSG "use of %s denied for "
74136+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
74137+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
74138+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
74139+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
74140+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
74141+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
74142+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
74143+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
74144+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
74145+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
74146+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
74147+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
74148+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
74149+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
74150+#define GR_VM86_MSG "denied use of vm86 by "
74151+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
74152+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
74153+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
74154+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
74155+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
74156+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
74157+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
74158diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
74159new file mode 100644
74160index 0000000..d6f5a21
74161--- /dev/null
74162+++ b/include/linux/grsecurity.h
74163@@ -0,0 +1,244 @@
74164+#ifndef GR_SECURITY_H
74165+#define GR_SECURITY_H
74166+#include <linux/fs.h>
74167+#include <linux/fs_struct.h>
74168+#include <linux/binfmts.h>
74169+#include <linux/gracl.h>
74170+
74171+/* notify of brain-dead configs */
74172+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74173+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
74174+#endif
74175+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
74176+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
74177+#endif
74178+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
74179+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
74180+#endif
74181+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
74182+#error "CONFIG_PAX enabled, but no PaX options are enabled."
74183+#endif
74184+
74185+int gr_handle_new_usb(void);
74186+
74187+void gr_handle_brute_attach(unsigned long mm_flags);
74188+void gr_handle_brute_check(void);
74189+void gr_handle_kernel_exploit(void);
74190+
74191+char gr_roletype_to_char(void);
74192+
74193+int gr_acl_enable_at_secure(void);
74194+
74195+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
74196+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
74197+
74198+void gr_del_task_from_ip_table(struct task_struct *p);
74199+
74200+int gr_pid_is_chrooted(struct task_struct *p);
74201+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
74202+int gr_handle_chroot_nice(void);
74203+int gr_handle_chroot_sysctl(const int op);
74204+int gr_handle_chroot_setpriority(struct task_struct *p,
74205+ const int niceval);
74206+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
74207+int gr_handle_chroot_chroot(const struct dentry *dentry,
74208+ const struct vfsmount *mnt);
74209+void gr_handle_chroot_chdir(const struct path *path);
74210+int gr_handle_chroot_chmod(const struct dentry *dentry,
74211+ const struct vfsmount *mnt, const int mode);
74212+int gr_handle_chroot_mknod(const struct dentry *dentry,
74213+ const struct vfsmount *mnt, const int mode);
74214+int gr_handle_chroot_mount(const struct dentry *dentry,
74215+ const struct vfsmount *mnt,
74216+ const char *dev_name);
74217+int gr_handle_chroot_pivot(void);
74218+int gr_handle_chroot_unix(const pid_t pid);
74219+
74220+int gr_handle_rawio(const struct inode *inode);
74221+
74222+void gr_handle_ioperm(void);
74223+void gr_handle_iopl(void);
74224+
74225+umode_t gr_acl_umask(void);
74226+
74227+int gr_tpe_allow(const struct file *file);
74228+
74229+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
74230+void gr_clear_chroot_entries(struct task_struct *task);
74231+
74232+void gr_log_forkfail(const int retval);
74233+void gr_log_timechange(void);
74234+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
74235+void gr_log_chdir(const struct dentry *dentry,
74236+ const struct vfsmount *mnt);
74237+void gr_log_chroot_exec(const struct dentry *dentry,
74238+ const struct vfsmount *mnt);
74239+void gr_log_remount(const char *devname, const int retval);
74240+void gr_log_unmount(const char *devname, const int retval);
74241+void gr_log_mount(const char *from, const char *to, const int retval);
74242+void gr_log_textrel(struct vm_area_struct *vma);
74243+void gr_log_ptgnustack(struct file *file);
74244+void gr_log_rwxmmap(struct file *file);
74245+void gr_log_rwxmprotect(struct vm_area_struct *vma);
74246+
74247+int gr_handle_follow_link(const struct inode *parent,
74248+ const struct inode *inode,
74249+ const struct dentry *dentry,
74250+ const struct vfsmount *mnt);
74251+int gr_handle_fifo(const struct dentry *dentry,
74252+ const struct vfsmount *mnt,
74253+ const struct dentry *dir, const int flag,
74254+ const int acc_mode);
74255+int gr_handle_hardlink(const struct dentry *dentry,
74256+ const struct vfsmount *mnt,
74257+ struct inode *inode,
74258+ const int mode, const struct filename *to);
74259+
74260+int gr_is_capable(const int cap);
74261+int gr_is_capable_nolog(const int cap);
74262+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
74263+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
74264+
74265+void gr_copy_label(struct task_struct *tsk);
74266+void gr_handle_crash(struct task_struct *task, const int sig);
74267+int gr_handle_signal(const struct task_struct *p, const int sig);
74268+int gr_check_crash_uid(const kuid_t uid);
74269+int gr_check_protected_task(const struct task_struct *task);
74270+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
74271+int gr_acl_handle_mmap(const struct file *file,
74272+ const unsigned long prot);
74273+int gr_acl_handle_mprotect(const struct file *file,
74274+ const unsigned long prot);
74275+int gr_check_hidden_task(const struct task_struct *tsk);
74276+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
74277+ const struct vfsmount *mnt);
74278+__u32 gr_acl_handle_utime(const struct dentry *dentry,
74279+ const struct vfsmount *mnt);
74280+__u32 gr_acl_handle_access(const struct dentry *dentry,
74281+ const struct vfsmount *mnt, const int fmode);
74282+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
74283+ const struct vfsmount *mnt, umode_t *mode);
74284+__u32 gr_acl_handle_chown(const struct dentry *dentry,
74285+ const struct vfsmount *mnt);
74286+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
74287+ const struct vfsmount *mnt);
74288+int gr_handle_ptrace(struct task_struct *task, const long request);
74289+int gr_handle_proc_ptrace(struct task_struct *task);
74290+__u32 gr_acl_handle_execve(const struct dentry *dentry,
74291+ const struct vfsmount *mnt);
74292+int gr_check_crash_exec(const struct file *filp);
74293+int gr_acl_is_enabled(void);
74294+void gr_set_kernel_label(struct task_struct *task);
74295+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
74296+ const kgid_t gid);
74297+int gr_set_proc_label(const struct dentry *dentry,
74298+ const struct vfsmount *mnt,
74299+ const int unsafe_flags);
74300+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
74301+ const struct vfsmount *mnt);
74302+__u32 gr_acl_handle_open(const struct dentry *dentry,
74303+ const struct vfsmount *mnt, int acc_mode);
74304+__u32 gr_acl_handle_creat(const struct dentry *dentry,
74305+ const struct dentry *p_dentry,
74306+ const struct vfsmount *p_mnt,
74307+ int open_flags, int acc_mode, const int imode);
74308+void gr_handle_create(const struct dentry *dentry,
74309+ const struct vfsmount *mnt);
74310+void gr_handle_proc_create(const struct dentry *dentry,
74311+ const struct inode *inode);
74312+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
74313+ const struct dentry *parent_dentry,
74314+ const struct vfsmount *parent_mnt,
74315+ const int mode);
74316+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
74317+ const struct dentry *parent_dentry,
74318+ const struct vfsmount *parent_mnt);
74319+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
74320+ const struct vfsmount *mnt);
74321+void gr_handle_delete(const ino_t ino, const dev_t dev);
74322+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
74323+ const struct vfsmount *mnt);
74324+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
74325+ const struct dentry *parent_dentry,
74326+ const struct vfsmount *parent_mnt,
74327+ const struct filename *from);
74328+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
74329+ const struct dentry *parent_dentry,
74330+ const struct vfsmount *parent_mnt,
74331+ const struct dentry *old_dentry,
74332+ const struct vfsmount *old_mnt, const struct filename *to);
74333+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
74334+int gr_acl_handle_rename(struct dentry *new_dentry,
74335+ struct dentry *parent_dentry,
74336+ const struct vfsmount *parent_mnt,
74337+ struct dentry *old_dentry,
74338+ struct inode *old_parent_inode,
74339+ struct vfsmount *old_mnt, const struct filename *newname);
74340+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
74341+ struct dentry *old_dentry,
74342+ struct dentry *new_dentry,
74343+ struct vfsmount *mnt, const __u8 replace);
74344+__u32 gr_check_link(const struct dentry *new_dentry,
74345+ const struct dentry *parent_dentry,
74346+ const struct vfsmount *parent_mnt,
74347+ const struct dentry *old_dentry,
74348+ const struct vfsmount *old_mnt);
74349+int gr_acl_handle_filldir(const struct file *file, const char *name,
74350+ const unsigned int namelen, const ino_t ino);
74351+
74352+__u32 gr_acl_handle_unix(const struct dentry *dentry,
74353+ const struct vfsmount *mnt);
74354+void gr_acl_handle_exit(void);
74355+void gr_acl_handle_psacct(struct task_struct *task, const long code);
74356+int gr_acl_handle_procpidmem(const struct task_struct *task);
74357+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
74358+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
74359+void gr_audit_ptrace(struct task_struct *task);
74360+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
74361+void gr_put_exec_file(struct task_struct *task);
74362+
74363+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
74364+
74365+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
74366+extern void gr_learn_resource(const struct task_struct *task, const int res,
74367+ const unsigned long wanted, const int gt);
74368+#else
74369+static inline void gr_learn_resource(const struct task_struct *task, const int res,
74370+ const unsigned long wanted, const int gt)
74371+{
74372+}
74373+#endif
74374+
74375+#ifdef CONFIG_GRKERNSEC_RESLOG
74376+extern void gr_log_resource(const struct task_struct *task, const int res,
74377+ const unsigned long wanted, const int gt);
74378+#else
74379+static inline void gr_log_resource(const struct task_struct *task, const int res,
74380+ const unsigned long wanted, const int gt)
74381+{
74382+}
74383+#endif
74384+
74385+#ifdef CONFIG_GRKERNSEC
74386+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
74387+void gr_handle_vm86(void);
74388+void gr_handle_mem_readwrite(u64 from, u64 to);
74389+
74390+void gr_log_badprocpid(const char *entry);
74391+
74392+extern int grsec_enable_dmesg;
74393+extern int grsec_disable_privio;
74394+
74395+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74396+extern kgid_t grsec_proc_gid;
74397+#endif
74398+
74399+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74400+extern int grsec_enable_chroot_findtask;
74401+#endif
74402+#ifdef CONFIG_GRKERNSEC_SETXID
74403+extern int grsec_enable_setxid;
74404+#endif
74405+#endif
74406+
74407+#endif
74408diff --git a/include/linux/grsock.h b/include/linux/grsock.h
74409new file mode 100644
74410index 0000000..e7ffaaf
74411--- /dev/null
74412+++ b/include/linux/grsock.h
74413@@ -0,0 +1,19 @@
74414+#ifndef __GRSOCK_H
74415+#define __GRSOCK_H
74416+
74417+extern void gr_attach_curr_ip(const struct sock *sk);
74418+extern int gr_handle_sock_all(const int family, const int type,
74419+ const int protocol);
74420+extern int gr_handle_sock_server(const struct sockaddr *sck);
74421+extern int gr_handle_sock_server_other(const struct sock *sck);
74422+extern int gr_handle_sock_client(const struct sockaddr *sck);
74423+extern int gr_search_connect(struct socket * sock,
74424+ struct sockaddr_in * addr);
74425+extern int gr_search_bind(struct socket * sock,
74426+ struct sockaddr_in * addr);
74427+extern int gr_search_listen(struct socket * sock);
74428+extern int gr_search_accept(struct socket * sock);
74429+extern int gr_search_socket(const int domain, const int type,
74430+ const int protocol);
74431+
74432+#endif
74433diff --git a/include/linux/hid.h b/include/linux/hid.h
74434index 0c48991..76e41d8 100644
74435--- a/include/linux/hid.h
74436+++ b/include/linux/hid.h
74437@@ -393,10 +393,12 @@ struct hid_report {
74438 struct hid_device *device; /* associated device */
74439 };
74440
74441+#define HID_MAX_IDS 256
74442+
74443 struct hid_report_enum {
74444 unsigned numbered;
74445 struct list_head report_list;
74446- struct hid_report *report_id_hash[256];
74447+ struct hid_report *report_id_hash[HID_MAX_IDS];
74448 };
74449
74450 #define HID_REPORT_TYPES 3
74451@@ -747,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
74452 struct hid_device *hid_allocate_device(void);
74453 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
74454 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
74455+struct hid_report *hid_validate_report(struct hid_device *hid,
74456+ unsigned int type, unsigned int id,
74457+ unsigned int fields,
74458+ unsigned int report_counts);
74459 int hid_open_report(struct hid_device *device);
74460 int hid_check_keys_pressed(struct hid_device *hid);
74461 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
74462diff --git a/include/linux/highmem.h b/include/linux/highmem.h
74463index 7fb31da..08b5114 100644
74464--- a/include/linux/highmem.h
74465+++ b/include/linux/highmem.h
74466@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
74467 kunmap_atomic(kaddr);
74468 }
74469
74470+static inline void sanitize_highpage(struct page *page)
74471+{
74472+ void *kaddr;
74473+ unsigned long flags;
74474+
74475+ local_irq_save(flags);
74476+ kaddr = kmap_atomic(page);
74477+ clear_page(kaddr);
74478+ kunmap_atomic(kaddr);
74479+ local_irq_restore(flags);
74480+}
74481+
74482 static inline void zero_user_segments(struct page *page,
74483 unsigned start1, unsigned end1,
74484 unsigned start2, unsigned end2)
74485diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
74486index 1c7b89a..7f52502 100644
74487--- a/include/linux/hwmon-sysfs.h
74488+++ b/include/linux/hwmon-sysfs.h
74489@@ -25,7 +25,8 @@
74490 struct sensor_device_attribute{
74491 struct device_attribute dev_attr;
74492 int index;
74493-};
74494+} __do_const;
74495+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
74496 #define to_sensor_dev_attr(_dev_attr) \
74497 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
74498
74499@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
74500 struct device_attribute dev_attr;
74501 u8 index;
74502 u8 nr;
74503-};
74504+} __do_const;
74505 #define to_sensor_dev_attr_2(_dev_attr) \
74506 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
74507
74508diff --git a/include/linux/i2c.h b/include/linux/i2c.h
74509index e988fa9..ff9f17e 100644
74510--- a/include/linux/i2c.h
74511+++ b/include/linux/i2c.h
74512@@ -366,6 +366,7 @@ struct i2c_algorithm {
74513 /* To determine what the adapter supports */
74514 u32 (*functionality) (struct i2c_adapter *);
74515 };
74516+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
74517
74518 /**
74519 * struct i2c_bus_recovery_info - I2C bus recovery information
74520diff --git a/include/linux/i2o.h b/include/linux/i2o.h
74521index d23c3c2..eb63c81 100644
74522--- a/include/linux/i2o.h
74523+++ b/include/linux/i2o.h
74524@@ -565,7 +565,7 @@ struct i2o_controller {
74525 struct i2o_device *exec; /* Executive */
74526 #if BITS_PER_LONG == 64
74527 spinlock_t context_list_lock; /* lock for context_list */
74528- atomic_t context_list_counter; /* needed for unique contexts */
74529+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
74530 struct list_head context_list; /* list of context id's
74531 and pointers */
74532 #endif
74533diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
74534index aff7ad8..3942bbd 100644
74535--- a/include/linux/if_pppox.h
74536+++ b/include/linux/if_pppox.h
74537@@ -76,7 +76,7 @@ struct pppox_proto {
74538 int (*ioctl)(struct socket *sock, unsigned int cmd,
74539 unsigned long arg);
74540 struct module *owner;
74541-};
74542+} __do_const;
74543
74544 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
74545 extern void unregister_pppox_proto(int proto_num);
74546diff --git a/include/linux/init.h b/include/linux/init.h
74547index 8618147..0821126 100644
74548--- a/include/linux/init.h
74549+++ b/include/linux/init.h
74550@@ -39,9 +39,36 @@
74551 * Also note, that this data cannot be "const".
74552 */
74553
74554+#ifdef MODULE
74555+#define add_init_latent_entropy
74556+#define add_devinit_latent_entropy
74557+#define add_cpuinit_latent_entropy
74558+#define add_meminit_latent_entropy
74559+#else
74560+#define add_init_latent_entropy __latent_entropy
74561+
74562+#ifdef CONFIG_HOTPLUG
74563+#define add_devinit_latent_entropy
74564+#else
74565+#define add_devinit_latent_entropy __latent_entropy
74566+#endif
74567+
74568+#ifdef CONFIG_HOTPLUG_CPU
74569+#define add_cpuinit_latent_entropy
74570+#else
74571+#define add_cpuinit_latent_entropy __latent_entropy
74572+#endif
74573+
74574+#ifdef CONFIG_MEMORY_HOTPLUG
74575+#define add_meminit_latent_entropy
74576+#else
74577+#define add_meminit_latent_entropy __latent_entropy
74578+#endif
74579+#endif
74580+
74581 /* These are for everybody (although not all archs will actually
74582 discard it in modules) */
74583-#define __init __section(.init.text) __cold notrace
74584+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
74585 #define __initdata __section(.init.data)
74586 #define __initconst __constsection(.init.rodata)
74587 #define __exitdata __section(.exit.data)
74588@@ -94,7 +121,7 @@
74589 #define __exit __section(.exit.text) __exitused __cold notrace
74590
74591 /* Used for HOTPLUG_CPU */
74592-#define __cpuinit __section(.cpuinit.text) __cold notrace
74593+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
74594 #define __cpuinitdata __section(.cpuinit.data)
74595 #define __cpuinitconst __constsection(.cpuinit.rodata)
74596 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
74597@@ -102,7 +129,7 @@
74598 #define __cpuexitconst __constsection(.cpuexit.rodata)
74599
74600 /* Used for MEMORY_HOTPLUG */
74601-#define __meminit __section(.meminit.text) __cold notrace
74602+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
74603 #define __meminitdata __section(.meminit.data)
74604 #define __meminitconst __constsection(.meminit.rodata)
74605 #define __memexit __section(.memexit.text) __exitused __cold notrace
74606diff --git a/include/linux/init_task.h b/include/linux/init_task.h
74607index 5cd0f09..c9f67cc 100644
74608--- a/include/linux/init_task.h
74609+++ b/include/linux/init_task.h
74610@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
74611
74612 #define INIT_TASK_COMM "swapper"
74613
74614+#ifdef CONFIG_X86
74615+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
74616+#else
74617+#define INIT_TASK_THREAD_INFO
74618+#endif
74619+
74620 /*
74621 * INIT_TASK is used to set up the first task table, touch at
74622 * your own risk!. Base=0, limit=0x1fffff (=2MB)
74623@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
74624 RCU_POINTER_INITIALIZER(cred, &init_cred), \
74625 .comm = INIT_TASK_COMM, \
74626 .thread = INIT_THREAD, \
74627+ INIT_TASK_THREAD_INFO \
74628 .fs = &init_fs, \
74629 .files = &init_files, \
74630 .signal = &init_signals, \
74631diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
74632index 5fa5afe..ac55b25 100644
74633--- a/include/linux/interrupt.h
74634+++ b/include/linux/interrupt.h
74635@@ -430,7 +430,7 @@ enum
74636 /* map softirq index to softirq name. update 'softirq_to_name' in
74637 * kernel/softirq.c when adding a new softirq.
74638 */
74639-extern char *softirq_to_name[NR_SOFTIRQS];
74640+extern const char * const softirq_to_name[NR_SOFTIRQS];
74641
74642 /* softirq mask and active fields moved to irq_cpustat_t in
74643 * asm/hardirq.h to get better cache usage. KAO
74644@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
74645
74646 struct softirq_action
74647 {
74648- void (*action)(struct softirq_action *);
74649-};
74650+ void (*action)(void);
74651+} __no_const;
74652
74653 asmlinkage void do_softirq(void);
74654 asmlinkage void __do_softirq(void);
74655-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
74656+extern void open_softirq(int nr, void (*action)(void));
74657 extern void softirq_init(void);
74658 extern void __raise_softirq_irqoff(unsigned int nr);
74659
74660diff --git a/include/linux/iommu.h b/include/linux/iommu.h
74661index 3aeb730..2177f39 100644
74662--- a/include/linux/iommu.h
74663+++ b/include/linux/iommu.h
74664@@ -113,7 +113,7 @@ struct iommu_ops {
74665 u32 (*domain_get_windows)(struct iommu_domain *domain);
74666
74667 unsigned long pgsize_bitmap;
74668-};
74669+} __do_const;
74670
74671 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
74672 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
74673diff --git a/include/linux/ioport.h b/include/linux/ioport.h
74674index 89b7c24..382af74 100644
74675--- a/include/linux/ioport.h
74676+++ b/include/linux/ioport.h
74677@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
74678 int adjust_resource(struct resource *res, resource_size_t start,
74679 resource_size_t size);
74680 resource_size_t resource_alignment(struct resource *res);
74681-static inline resource_size_t resource_size(const struct resource *res)
74682+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
74683 {
74684 return res->end - res->start + 1;
74685 }
74686diff --git a/include/linux/irq.h b/include/linux/irq.h
74687index bc4e066..50468a9 100644
74688--- a/include/linux/irq.h
74689+++ b/include/linux/irq.h
74690@@ -328,7 +328,8 @@ struct irq_chip {
74691 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
74692
74693 unsigned long flags;
74694-};
74695+} __do_const;
74696+typedef struct irq_chip __no_const irq_chip_no_const;
74697
74698 /*
74699 * irq_chip specific flags
74700diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
74701index 3e203eb..3fe68d0 100644
74702--- a/include/linux/irqchip/arm-gic.h
74703+++ b/include/linux/irqchip/arm-gic.h
74704@@ -59,9 +59,11 @@
74705
74706 #ifndef __ASSEMBLY__
74707
74708+#include <linux/irq.h>
74709+
74710 struct device_node;
74711
74712-extern struct irq_chip gic_arch_extn;
74713+extern irq_chip_no_const gic_arch_extn;
74714
74715 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
74716 u32 offset, struct device_node *);
74717diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
74718index 6883e19..e854fcb 100644
74719--- a/include/linux/kallsyms.h
74720+++ b/include/linux/kallsyms.h
74721@@ -15,7 +15,8 @@
74722
74723 struct module;
74724
74725-#ifdef CONFIG_KALLSYMS
74726+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
74727+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74728 /* Lookup the address for a symbol. Returns 0 if not found. */
74729 unsigned long kallsyms_lookup_name(const char *name);
74730
74731@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
74732 /* Stupid that this does nothing, but I didn't create this mess. */
74733 #define __print_symbol(fmt, addr)
74734 #endif /*CONFIG_KALLSYMS*/
74735+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
74736+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
74737+extern unsigned long kallsyms_lookup_name(const char *name);
74738+extern void __print_symbol(const char *fmt, unsigned long address);
74739+extern int sprint_backtrace(char *buffer, unsigned long address);
74740+extern int sprint_symbol(char *buffer, unsigned long address);
74741+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
74742+const char *kallsyms_lookup(unsigned long addr,
74743+ unsigned long *symbolsize,
74744+ unsigned long *offset,
74745+ char **modname, char *namebuf);
74746+extern int kallsyms_lookup_size_offset(unsigned long addr,
74747+ unsigned long *symbolsize,
74748+ unsigned long *offset);
74749+#endif
74750
74751 /* This macro allows us to keep printk typechecking */
74752 static __printf(1, 2)
74753diff --git a/include/linux/key-type.h b/include/linux/key-type.h
74754index 518a53a..5e28358 100644
74755--- a/include/linux/key-type.h
74756+++ b/include/linux/key-type.h
74757@@ -125,7 +125,7 @@ struct key_type {
74758 /* internal fields */
74759 struct list_head link; /* link in types list */
74760 struct lock_class_key lock_class; /* key->sem lock class */
74761-};
74762+} __do_const;
74763
74764 extern struct key_type key_type_keyring;
74765
74766diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
74767index c6e091b..a940adf 100644
74768--- a/include/linux/kgdb.h
74769+++ b/include/linux/kgdb.h
74770@@ -52,7 +52,7 @@ extern int kgdb_connected;
74771 extern int kgdb_io_module_registered;
74772
74773 extern atomic_t kgdb_setting_breakpoint;
74774-extern atomic_t kgdb_cpu_doing_single_step;
74775+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
74776
74777 extern struct task_struct *kgdb_usethread;
74778 extern struct task_struct *kgdb_contthread;
74779@@ -254,7 +254,7 @@ struct kgdb_arch {
74780 void (*correct_hw_break)(void);
74781
74782 void (*enable_nmi)(bool on);
74783-};
74784+} __do_const;
74785
74786 /**
74787 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
74788@@ -279,7 +279,7 @@ struct kgdb_io {
74789 void (*pre_exception) (void);
74790 void (*post_exception) (void);
74791 int is_console;
74792-};
74793+} __do_const;
74794
74795 extern struct kgdb_arch arch_kgdb_ops;
74796
74797diff --git a/include/linux/kmod.h b/include/linux/kmod.h
74798index 0555cc6..b16a7a4 100644
74799--- a/include/linux/kmod.h
74800+++ b/include/linux/kmod.h
74801@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
74802 * usually useless though. */
74803 extern __printf(2, 3)
74804 int __request_module(bool wait, const char *name, ...);
74805+extern __printf(3, 4)
74806+int ___request_module(bool wait, char *param_name, const char *name, ...);
74807 #define request_module(mod...) __request_module(true, mod)
74808 #define request_module_nowait(mod...) __request_module(false, mod)
74809 #define try_then_request_module(x, mod...) \
74810diff --git a/include/linux/kobject.h b/include/linux/kobject.h
74811index 939b112..ed6ed51 100644
74812--- a/include/linux/kobject.h
74813+++ b/include/linux/kobject.h
74814@@ -111,7 +111,7 @@ struct kobj_type {
74815 struct attribute **default_attrs;
74816 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
74817 const void *(*namespace)(struct kobject *kobj);
74818-};
74819+} __do_const;
74820
74821 struct kobj_uevent_env {
74822 char *envp[UEVENT_NUM_ENVP];
74823@@ -134,6 +134,7 @@ struct kobj_attribute {
74824 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
74825 const char *buf, size_t count);
74826 };
74827+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
74828
74829 extern const struct sysfs_ops kobj_sysfs_ops;
74830
74831diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
74832index f66b065..c2c29b4 100644
74833--- a/include/linux/kobject_ns.h
74834+++ b/include/linux/kobject_ns.h
74835@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
74836 const void *(*netlink_ns)(struct sock *sk);
74837 const void *(*initial_ns)(void);
74838 void (*drop_ns)(void *);
74839-};
74840+} __do_const;
74841
74842 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
74843 int kobj_ns_type_registered(enum kobj_ns_type type);
74844diff --git a/include/linux/kref.h b/include/linux/kref.h
74845index 484604d..0f6c5b6 100644
74846--- a/include/linux/kref.h
74847+++ b/include/linux/kref.h
74848@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
74849 static inline int kref_sub(struct kref *kref, unsigned int count,
74850 void (*release)(struct kref *kref))
74851 {
74852- WARN_ON(release == NULL);
74853+ BUG_ON(release == NULL);
74854
74855 if (atomic_sub_and_test((int) count, &kref->refcount)) {
74856 release(kref);
74857diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
74858index 8db53cf..c21121d 100644
74859--- a/include/linux/kvm_host.h
74860+++ b/include/linux/kvm_host.h
74861@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
74862 {
74863 }
74864 #endif
74865-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74866+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74867 struct module *module);
74868 void kvm_exit(void);
74869
74870@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
74871 struct kvm_guest_debug *dbg);
74872 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
74873
74874-int kvm_arch_init(void *opaque);
74875+int kvm_arch_init(const void *opaque);
74876 void kvm_arch_exit(void);
74877
74878 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
74879diff --git a/include/linux/libata.h b/include/linux/libata.h
74880index eae7a05..2cdd875 100644
74881--- a/include/linux/libata.h
74882+++ b/include/linux/libata.h
74883@@ -919,7 +919,7 @@ struct ata_port_operations {
74884 * fields must be pointers.
74885 */
74886 const struct ata_port_operations *inherits;
74887-};
74888+} __do_const;
74889
74890 struct ata_port_info {
74891 unsigned long flags;
74892diff --git a/include/linux/list.h b/include/linux/list.h
74893index b83e565..baa6c1d 100644
74894--- a/include/linux/list.h
74895+++ b/include/linux/list.h
74896@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
74897 extern void list_del(struct list_head *entry);
74898 #endif
74899
74900+extern void __pax_list_add(struct list_head *new,
74901+ struct list_head *prev,
74902+ struct list_head *next);
74903+static inline void pax_list_add(struct list_head *new, struct list_head *head)
74904+{
74905+ __pax_list_add(new, head, head->next);
74906+}
74907+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
74908+{
74909+ __pax_list_add(new, head->prev, head);
74910+}
74911+extern void pax_list_del(struct list_head *entry);
74912+
74913 /**
74914 * list_replace - replace old entry by new one
74915 * @old : the element to be replaced
74916@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
74917 INIT_LIST_HEAD(entry);
74918 }
74919
74920+extern void pax_list_del_init(struct list_head *entry);
74921+
74922 /**
74923 * list_move - delete from one list and add as another's head
74924 * @list: the entry to move
74925diff --git a/include/linux/math64.h b/include/linux/math64.h
74926index 2913b86..8dcbb1e 100644
74927--- a/include/linux/math64.h
74928+++ b/include/linux/math64.h
74929@@ -15,7 +15,7 @@
74930 * This is commonly provided by 32bit archs to provide an optimized 64bit
74931 * divide.
74932 */
74933-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74934+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74935 {
74936 *remainder = dividend % divisor;
74937 return dividend / divisor;
74938@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
74939 /**
74940 * div64_u64 - unsigned 64bit divide with 64bit divisor
74941 */
74942-static inline u64 div64_u64(u64 dividend, u64 divisor)
74943+static inline u64 __intentional_overflow(0) div64_u64(u64 dividend, u64 divisor)
74944 {
74945 return dividend / divisor;
74946 }
74947@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
74948 #define div64_ul(x, y) div_u64((x), (y))
74949
74950 #ifndef div_u64_rem
74951-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74952+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74953 {
74954 *remainder = do_div(dividend, divisor);
74955 return dividend;
74956@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
74957 * divide.
74958 */
74959 #ifndef div_u64
74960-static inline u64 div_u64(u64 dividend, u32 divisor)
74961+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
74962 {
74963 u32 remainder;
74964 return div_u64_rem(dividend, divisor, &remainder);
74965diff --git a/include/linux/mm.h b/include/linux/mm.h
74966index e0c8528..bcf0c29 100644
74967--- a/include/linux/mm.h
74968+++ b/include/linux/mm.h
74969@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
74970 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
74971 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
74972 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
74973+
74974+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
74975+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
74976+#endif
74977+
74978 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
74979
74980 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
74981@@ -205,8 +210,8 @@ struct vm_operations_struct {
74982 /* called by access_process_vm when get_user_pages() fails, typically
74983 * for use by special VMAs that can switch between memory and hardware
74984 */
74985- int (*access)(struct vm_area_struct *vma, unsigned long addr,
74986- void *buf, int len, int write);
74987+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
74988+ void *buf, size_t len, int write);
74989 #ifdef CONFIG_NUMA
74990 /*
74991 * set_policy() op must add a reference to any non-NULL @new mempolicy
74992@@ -236,6 +241,7 @@ struct vm_operations_struct {
74993 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
74994 unsigned long size, pgoff_t pgoff);
74995 };
74996+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
74997
74998 struct mmu_gather;
74999 struct inode;
75000@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
75001 unsigned long *pfn);
75002 int follow_phys(struct vm_area_struct *vma, unsigned long address,
75003 unsigned int flags, unsigned long *prot, resource_size_t *phys);
75004-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
75005- void *buf, int len, int write);
75006+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
75007+ void *buf, size_t len, int write);
75008
75009 static inline void unmap_shared_mapping_range(struct address_space *mapping,
75010 loff_t const holebegin, loff_t const holelen)
75011@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
75012 }
75013 #endif
75014
75015-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
75016-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
75017- void *buf, int len, int write);
75018+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
75019+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
75020+ void *buf, size_t len, int write);
75021
75022 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
75023 unsigned long start, unsigned long nr_pages,
75024@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
75025 int set_page_dirty_lock(struct page *page);
75026 int clear_page_dirty_for_io(struct page *page);
75027
75028-/* Is the vma a continuation of the stack vma above it? */
75029-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
75030-{
75031- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
75032-}
75033-
75034-static inline int stack_guard_page_start(struct vm_area_struct *vma,
75035- unsigned long addr)
75036-{
75037- return (vma->vm_flags & VM_GROWSDOWN) &&
75038- (vma->vm_start == addr) &&
75039- !vma_growsdown(vma->vm_prev, addr);
75040-}
75041-
75042-/* Is the vma a continuation of the stack vma below it? */
75043-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
75044-{
75045- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
75046-}
75047-
75048-static inline int stack_guard_page_end(struct vm_area_struct *vma,
75049- unsigned long addr)
75050-{
75051- return (vma->vm_flags & VM_GROWSUP) &&
75052- (vma->vm_end == addr) &&
75053- !vma_growsup(vma->vm_next, addr);
75054-}
75055-
75056 extern pid_t
75057 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
75058
75059@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
75060 }
75061 #endif
75062
75063+#ifdef CONFIG_MMU
75064+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
75065+#else
75066+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
75067+{
75068+ return __pgprot(0);
75069+}
75070+#endif
75071+
75072 int vma_wants_writenotify(struct vm_area_struct *vma);
75073
75074 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
75075@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
75076 {
75077 return 0;
75078 }
75079+
75080+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
75081+ unsigned long address)
75082+{
75083+ return 0;
75084+}
75085 #else
75086 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
75087+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
75088 #endif
75089
75090 #ifdef __PAGETABLE_PMD_FOLDED
75091@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
75092 {
75093 return 0;
75094 }
75095+
75096+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
75097+ unsigned long address)
75098+{
75099+ return 0;
75100+}
75101 #else
75102 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
75103+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
75104 #endif
75105
75106 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
75107@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
75108 NULL: pud_offset(pgd, address);
75109 }
75110
75111+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
75112+{
75113+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
75114+ NULL: pud_offset(pgd, address);
75115+}
75116+
75117 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
75118 {
75119 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
75120 NULL: pmd_offset(pud, address);
75121 }
75122+
75123+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
75124+{
75125+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
75126+ NULL: pmd_offset(pud, address);
75127+}
75128 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
75129
75130 #if USE_SPLIT_PTLOCKS
75131@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75132 unsigned long len, unsigned long prot, unsigned long flags,
75133 unsigned long pgoff, unsigned long *populate);
75134 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
75135+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
75136
75137 #ifdef CONFIG_MMU
75138 extern int __mm_populate(unsigned long addr, unsigned long len,
75139@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
75140 unsigned long high_limit;
75141 unsigned long align_mask;
75142 unsigned long align_offset;
75143+ unsigned long threadstack_offset;
75144 };
75145
75146-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
75147-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
75148+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
75149+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
75150
75151 /*
75152 * Search for an unmapped address range.
75153@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
75154 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
75155 */
75156 static inline unsigned long
75157-vm_unmapped_area(struct vm_unmapped_area_info *info)
75158+vm_unmapped_area(const struct vm_unmapped_area_info *info)
75159 {
75160 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
75161 return unmapped_area(info);
75162@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
75163 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
75164 struct vm_area_struct **pprev);
75165
75166+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
75167+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
75168+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
75169+
75170 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
75171 NULL if none. Assume start_addr < end_addr. */
75172 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
75173@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
75174 return vma;
75175 }
75176
75177-#ifdef CONFIG_MMU
75178-pgprot_t vm_get_page_prot(unsigned long vm_flags);
75179-#else
75180-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
75181-{
75182- return __pgprot(0);
75183-}
75184-#endif
75185-
75186 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
75187 unsigned long change_prot_numa(struct vm_area_struct *vma,
75188 unsigned long start, unsigned long end);
75189@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
75190 static inline void vm_stat_account(struct mm_struct *mm,
75191 unsigned long flags, struct file *file, long pages)
75192 {
75193+
75194+#ifdef CONFIG_PAX_RANDMMAP
75195+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
75196+#endif
75197+
75198 mm->total_vm += pages;
75199 }
75200 #endif /* CONFIG_PROC_FS */
75201@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
75202 extern int sysctl_memory_failure_early_kill;
75203 extern int sysctl_memory_failure_recovery;
75204 extern void shake_page(struct page *p, int access);
75205-extern atomic_long_t num_poisoned_pages;
75206+extern atomic_long_unchecked_t num_poisoned_pages;
75207 extern int soft_offline_page(struct page *page, int flags);
75208
75209 extern void dump_page(struct page *page);
75210@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
75211 static inline void setup_nr_node_ids(void) {}
75212 #endif
75213
75214+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75215+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
75216+#else
75217+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
75218+#endif
75219+
75220 #endif /* __KERNEL__ */
75221 #endif /* _LINUX_MM_H */
75222diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
75223index 4a189ba..04101d6 100644
75224--- a/include/linux/mm_types.h
75225+++ b/include/linux/mm_types.h
75226@@ -289,6 +289,8 @@ struct vm_area_struct {
75227 #ifdef CONFIG_NUMA
75228 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
75229 #endif
75230+
75231+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
75232 };
75233
75234 struct core_thread {
75235@@ -438,6 +440,24 @@ struct mm_struct {
75236 int first_nid;
75237 #endif
75238 struct uprobes_state uprobes_state;
75239+
75240+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
75241+ unsigned long pax_flags;
75242+#endif
75243+
75244+#ifdef CONFIG_PAX_DLRESOLVE
75245+ unsigned long call_dl_resolve;
75246+#endif
75247+
75248+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
75249+ unsigned long call_syscall;
75250+#endif
75251+
75252+#ifdef CONFIG_PAX_ASLR
75253+ unsigned long delta_mmap; /* randomized offset */
75254+ unsigned long delta_stack; /* randomized offset */
75255+#endif
75256+
75257 };
75258
75259 /* first nid will either be a valid NID or one of these values */
75260diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
75261index c5d5278..f0b68c8 100644
75262--- a/include/linux/mmiotrace.h
75263+++ b/include/linux/mmiotrace.h
75264@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
75265 /* Called from ioremap.c */
75266 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
75267 void __iomem *addr);
75268-extern void mmiotrace_iounmap(volatile void __iomem *addr);
75269+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
75270
75271 /* For anyone to insert markers. Remember trailing newline. */
75272 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
75273@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
75274 {
75275 }
75276
75277-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
75278+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
75279 {
75280 }
75281
75282diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
75283index 5c76737..61f518e 100644
75284--- a/include/linux/mmzone.h
75285+++ b/include/linux/mmzone.h
75286@@ -396,7 +396,7 @@ struct zone {
75287 unsigned long flags; /* zone flags, see below */
75288
75289 /* Zone statistics */
75290- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75291+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75292
75293 /*
75294 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
75295diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
75296index b508016..237cfe5 100644
75297--- a/include/linux/mod_devicetable.h
75298+++ b/include/linux/mod_devicetable.h
75299@@ -13,7 +13,7 @@
75300 typedef unsigned long kernel_ulong_t;
75301 #endif
75302
75303-#define PCI_ANY_ID (~0)
75304+#define PCI_ANY_ID ((__u16)~0)
75305
75306 struct pci_device_id {
75307 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
75308@@ -139,7 +139,7 @@ struct usb_device_id {
75309 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
75310 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
75311
75312-#define HID_ANY_ID (~0)
75313+#define HID_ANY_ID (~0U)
75314 #define HID_BUS_ANY 0xffff
75315 #define HID_GROUP_ANY 0x0000
75316
75317@@ -465,7 +465,7 @@ struct dmi_system_id {
75318 const char *ident;
75319 struct dmi_strmatch matches[4];
75320 void *driver_data;
75321-};
75322+} __do_const;
75323 /*
75324 * struct dmi_device_id appears during expansion of
75325 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
75326diff --git a/include/linux/module.h b/include/linux/module.h
75327index 46f1ea0..a34ca37 100644
75328--- a/include/linux/module.h
75329+++ b/include/linux/module.h
75330@@ -17,9 +17,11 @@
75331 #include <linux/moduleparam.h>
75332 #include <linux/tracepoint.h>
75333 #include <linux/export.h>
75334+#include <linux/fs.h>
75335
75336 #include <linux/percpu.h>
75337 #include <asm/module.h>
75338+#include <asm/pgtable.h>
75339
75340 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
75341 #define MODULE_SIG_STRING "~Module signature appended~\n"
75342@@ -54,12 +56,13 @@ struct module_attribute {
75343 int (*test)(struct module *);
75344 void (*free)(struct module *);
75345 };
75346+typedef struct module_attribute __no_const module_attribute_no_const;
75347
75348 struct module_version_attribute {
75349 struct module_attribute mattr;
75350 const char *module_name;
75351 const char *version;
75352-} __attribute__ ((__aligned__(sizeof(void *))));
75353+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
75354
75355 extern ssize_t __modver_version_show(struct module_attribute *,
75356 struct module_kobject *, char *);
75357@@ -232,7 +235,7 @@ struct module
75358
75359 /* Sysfs stuff. */
75360 struct module_kobject mkobj;
75361- struct module_attribute *modinfo_attrs;
75362+ module_attribute_no_const *modinfo_attrs;
75363 const char *version;
75364 const char *srcversion;
75365 struct kobject *holders_dir;
75366@@ -281,19 +284,16 @@ struct module
75367 int (*init)(void);
75368
75369 /* If this is non-NULL, vfree after init() returns */
75370- void *module_init;
75371+ void *module_init_rx, *module_init_rw;
75372
75373 /* Here is the actual code + data, vfree'd on unload. */
75374- void *module_core;
75375+ void *module_core_rx, *module_core_rw;
75376
75377 /* Here are the sizes of the init and core sections */
75378- unsigned int init_size, core_size;
75379+ unsigned int init_size_rw, core_size_rw;
75380
75381 /* The size of the executable code in each section. */
75382- unsigned int init_text_size, core_text_size;
75383-
75384- /* Size of RO sections of the module (text+rodata) */
75385- unsigned int init_ro_size, core_ro_size;
75386+ unsigned int init_size_rx, core_size_rx;
75387
75388 /* Arch-specific module values */
75389 struct mod_arch_specific arch;
75390@@ -349,6 +349,10 @@ struct module
75391 #ifdef CONFIG_EVENT_TRACING
75392 struct ftrace_event_call **trace_events;
75393 unsigned int num_trace_events;
75394+ struct file_operations trace_id;
75395+ struct file_operations trace_enable;
75396+ struct file_operations trace_format;
75397+ struct file_operations trace_filter;
75398 #endif
75399 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
75400 unsigned int num_ftrace_callsites;
75401@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
75402 bool is_module_percpu_address(unsigned long addr);
75403 bool is_module_text_address(unsigned long addr);
75404
75405+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
75406+{
75407+
75408+#ifdef CONFIG_PAX_KERNEXEC
75409+ if (ktla_ktva(addr) >= (unsigned long)start &&
75410+ ktla_ktva(addr) < (unsigned long)start + size)
75411+ return 1;
75412+#endif
75413+
75414+ return ((void *)addr >= start && (void *)addr < start + size);
75415+}
75416+
75417+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
75418+{
75419+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
75420+}
75421+
75422+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
75423+{
75424+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
75425+}
75426+
75427+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
75428+{
75429+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
75430+}
75431+
75432+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
75433+{
75434+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
75435+}
75436+
75437 static inline int within_module_core(unsigned long addr, const struct module *mod)
75438 {
75439- return (unsigned long)mod->module_core <= addr &&
75440- addr < (unsigned long)mod->module_core + mod->core_size;
75441+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
75442 }
75443
75444 static inline int within_module_init(unsigned long addr, const struct module *mod)
75445 {
75446- return (unsigned long)mod->module_init <= addr &&
75447- addr < (unsigned long)mod->module_init + mod->init_size;
75448+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
75449 }
75450
75451 /* Search for module by name: must hold module_mutex. */
75452diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
75453index 560ca53..ef621ef 100644
75454--- a/include/linux/moduleloader.h
75455+++ b/include/linux/moduleloader.h
75456@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
75457 sections. Returns NULL on failure. */
75458 void *module_alloc(unsigned long size);
75459
75460+#ifdef CONFIG_PAX_KERNEXEC
75461+void *module_alloc_exec(unsigned long size);
75462+#else
75463+#define module_alloc_exec(x) module_alloc(x)
75464+#endif
75465+
75466 /* Free memory returned from module_alloc. */
75467 void module_free(struct module *mod, void *module_region);
75468
75469+#ifdef CONFIG_PAX_KERNEXEC
75470+void module_free_exec(struct module *mod, void *module_region);
75471+#else
75472+#define module_free_exec(x, y) module_free((x), (y))
75473+#endif
75474+
75475 /*
75476 * Apply the given relocation to the (simplified) ELF. Return -error
75477 * or 0.
75478@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
75479 unsigned int relsec,
75480 struct module *me)
75481 {
75482+#ifdef CONFIG_MODULES
75483 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
75484+#endif
75485 return -ENOEXEC;
75486 }
75487 #endif
75488@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
75489 unsigned int relsec,
75490 struct module *me)
75491 {
75492+#ifdef CONFIG_MODULES
75493 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
75494+#endif
75495 return -ENOEXEC;
75496 }
75497 #endif
75498diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
75499index 137b419..fe663ec 100644
75500--- a/include/linux/moduleparam.h
75501+++ b/include/linux/moduleparam.h
75502@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
75503 * @len is usually just sizeof(string).
75504 */
75505 #define module_param_string(name, string, len, perm) \
75506- static const struct kparam_string __param_string_##name \
75507+ static const struct kparam_string __param_string_##name __used \
75508 = { len, string }; \
75509 __module_param_call(MODULE_PARAM_PREFIX, name, \
75510 &param_ops_string, \
75511@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
75512 */
75513 #define module_param_array_named(name, array, type, nump, perm) \
75514 param_check_##type(name, &(array)[0]); \
75515- static const struct kparam_array __param_arr_##name \
75516+ static const struct kparam_array __param_arr_##name __used \
75517 = { .max = ARRAY_SIZE(array), .num = nump, \
75518 .ops = &param_ops_##type, \
75519 .elemsize = sizeof(array[0]), .elem = array }; \
75520diff --git a/include/linux/namei.h b/include/linux/namei.h
75521index 5a5ff57..5ae5070 100644
75522--- a/include/linux/namei.h
75523+++ b/include/linux/namei.h
75524@@ -19,7 +19,7 @@ struct nameidata {
75525 unsigned seq;
75526 int last_type;
75527 unsigned depth;
75528- char *saved_names[MAX_NESTED_LINKS + 1];
75529+ const char *saved_names[MAX_NESTED_LINKS + 1];
75530 };
75531
75532 /*
75533@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
75534
75535 extern void nd_jump_link(struct nameidata *nd, struct path *path);
75536
75537-static inline void nd_set_link(struct nameidata *nd, char *path)
75538+static inline void nd_set_link(struct nameidata *nd, const char *path)
75539 {
75540 nd->saved_names[nd->depth] = path;
75541 }
75542
75543-static inline char *nd_get_link(struct nameidata *nd)
75544+static inline const char *nd_get_link(const struct nameidata *nd)
75545 {
75546 return nd->saved_names[nd->depth];
75547 }
75548diff --git a/include/linux/net.h b/include/linux/net.h
75549index 99c9f0c..e1cf296 100644
75550--- a/include/linux/net.h
75551+++ b/include/linux/net.h
75552@@ -183,7 +183,7 @@ struct net_proto_family {
75553 int (*create)(struct net *net, struct socket *sock,
75554 int protocol, int kern);
75555 struct module *owner;
75556-};
75557+} __do_const;
75558
75559 struct iovec;
75560 struct kvec;
75561diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
75562index 96e4c21..9cc8278 100644
75563--- a/include/linux/netdevice.h
75564+++ b/include/linux/netdevice.h
75565@@ -1026,6 +1026,7 @@ struct net_device_ops {
75566 int (*ndo_change_carrier)(struct net_device *dev,
75567 bool new_carrier);
75568 };
75569+typedef struct net_device_ops __no_const net_device_ops_no_const;
75570
75571 /*
75572 * The DEVICE structure.
75573@@ -1094,7 +1095,7 @@ struct net_device {
75574 int iflink;
75575
75576 struct net_device_stats stats;
75577- atomic_long_t rx_dropped; /* dropped packets by core network
75578+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
75579 * Do not use this in drivers.
75580 */
75581
75582diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
75583index 0060fde..481c6ae 100644
75584--- a/include/linux/netfilter.h
75585+++ b/include/linux/netfilter.h
75586@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
75587 #endif
75588 /* Use the module struct to lock set/get code in place */
75589 struct module *owner;
75590-};
75591+} __do_const;
75592
75593 /* Function to register/unregister hook points. */
75594 int nf_register_hook(struct nf_hook_ops *reg);
75595diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
75596index d80e275..c3510b8 100644
75597--- a/include/linux/netfilter/ipset/ip_set.h
75598+++ b/include/linux/netfilter/ipset/ip_set.h
75599@@ -124,7 +124,7 @@ struct ip_set_type_variant {
75600 /* Return true if "b" set is the same as "a"
75601 * according to the create set parameters */
75602 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
75603-};
75604+} __do_const;
75605
75606 /* The core set type structure */
75607 struct ip_set_type {
75608diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
75609index cadb740..d7c37c0 100644
75610--- a/include/linux/netfilter/nfnetlink.h
75611+++ b/include/linux/netfilter/nfnetlink.h
75612@@ -16,7 +16,7 @@ struct nfnl_callback {
75613 const struct nlattr * const cda[]);
75614 const struct nla_policy *policy; /* netlink attribute policy */
75615 const u_int16_t attr_count; /* number of nlattr's */
75616-};
75617+} __do_const;
75618
75619 struct nfnetlink_subsystem {
75620 const char *name;
75621diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
75622new file mode 100644
75623index 0000000..33f4af8
75624--- /dev/null
75625+++ b/include/linux/netfilter/xt_gradm.h
75626@@ -0,0 +1,9 @@
75627+#ifndef _LINUX_NETFILTER_XT_GRADM_H
75628+#define _LINUX_NETFILTER_XT_GRADM_H 1
75629+
75630+struct xt_gradm_mtinfo {
75631+ __u16 flags;
75632+ __u16 invflags;
75633+};
75634+
75635+#endif
75636diff --git a/include/linux/nls.h b/include/linux/nls.h
75637index 5dc635f..35f5e11 100644
75638--- a/include/linux/nls.h
75639+++ b/include/linux/nls.h
75640@@ -31,7 +31,7 @@ struct nls_table {
75641 const unsigned char *charset2upper;
75642 struct module *owner;
75643 struct nls_table *next;
75644-};
75645+} __do_const;
75646
75647 /* this value hold the maximum octet of charset */
75648 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
75649diff --git a/include/linux/notifier.h b/include/linux/notifier.h
75650index d14a4c3..a078786 100644
75651--- a/include/linux/notifier.h
75652+++ b/include/linux/notifier.h
75653@@ -54,7 +54,8 @@ struct notifier_block {
75654 notifier_fn_t notifier_call;
75655 struct notifier_block __rcu *next;
75656 int priority;
75657-};
75658+} __do_const;
75659+typedef struct notifier_block __no_const notifier_block_no_const;
75660
75661 struct atomic_notifier_head {
75662 spinlock_t lock;
75663diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
75664index a4c5624..79d6d88 100644
75665--- a/include/linux/oprofile.h
75666+++ b/include/linux/oprofile.h
75667@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
75668 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
75669 char const * name, ulong * val);
75670
75671-/** Create a file for read-only access to an atomic_t. */
75672+/** Create a file for read-only access to an atomic_unchecked_t. */
75673 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
75674- char const * name, atomic_t * val);
75675+ char const * name, atomic_unchecked_t * val);
75676
75677 /** create a directory */
75678 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
75679diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
75680index 8db71dc..a76bf2c 100644
75681--- a/include/linux/pci_hotplug.h
75682+++ b/include/linux/pci_hotplug.h
75683@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
75684 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
75685 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
75686 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
75687-};
75688+} __do_const;
75689+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
75690
75691 /**
75692 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
75693diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
75694index c5b6dbf..b124155 100644
75695--- a/include/linux/perf_event.h
75696+++ b/include/linux/perf_event.h
75697@@ -318,8 +318,8 @@ struct perf_event {
75698
75699 enum perf_event_active_state state;
75700 unsigned int attach_state;
75701- local64_t count;
75702- atomic64_t child_count;
75703+ local64_t count; /* PaX: fix it one day */
75704+ atomic64_unchecked_t child_count;
75705
75706 /*
75707 * These are the total time in nanoseconds that the event
75708@@ -370,8 +370,8 @@ struct perf_event {
75709 * These accumulate total time (in nanoseconds) that children
75710 * events have been enabled and running, respectively.
75711 */
75712- atomic64_t child_total_time_enabled;
75713- atomic64_t child_total_time_running;
75714+ atomic64_unchecked_t child_total_time_enabled;
75715+ atomic64_unchecked_t child_total_time_running;
75716
75717 /*
75718 * Protect attach/detach and child_list:
75719@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
75720 entry->ip[entry->nr++] = ip;
75721 }
75722
75723-extern int sysctl_perf_event_paranoid;
75724+extern int sysctl_perf_event_legitimately_concerned;
75725 extern int sysctl_perf_event_mlock;
75726 extern int sysctl_perf_event_sample_rate;
75727
75728@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
75729 void __user *buffer, size_t *lenp,
75730 loff_t *ppos);
75731
75732+static inline bool perf_paranoid_any(void)
75733+{
75734+ return sysctl_perf_event_legitimately_concerned > 2;
75735+}
75736+
75737 static inline bool perf_paranoid_tracepoint_raw(void)
75738 {
75739- return sysctl_perf_event_paranoid > -1;
75740+ return sysctl_perf_event_legitimately_concerned > -1;
75741 }
75742
75743 static inline bool perf_paranoid_cpu(void)
75744 {
75745- return sysctl_perf_event_paranoid > 0;
75746+ return sysctl_perf_event_legitimately_concerned > 0;
75747 }
75748
75749 static inline bool perf_paranoid_kernel(void)
75750 {
75751- return sysctl_perf_event_paranoid > 1;
75752+ return sysctl_perf_event_legitimately_concerned > 1;
75753 }
75754
75755 extern void perf_event_init(void);
75756@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
75757 */
75758 #define perf_cpu_notifier(fn) \
75759 do { \
75760- static struct notifier_block fn##_nb __cpuinitdata = \
75761+ static struct notifier_block fn##_nb = \
75762 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
75763 unsigned long cpu = smp_processor_id(); \
75764 unsigned long flags; \
75765@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
75766 struct device_attribute attr;
75767 u64 id;
75768 const char *event_str;
75769-};
75770+} __do_const;
75771
75772 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
75773 static struct perf_pmu_events_attr _var = { \
75774diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
75775index b8809fe..ae4ccd0 100644
75776--- a/include/linux/pipe_fs_i.h
75777+++ b/include/linux/pipe_fs_i.h
75778@@ -47,10 +47,10 @@ struct pipe_inode_info {
75779 struct mutex mutex;
75780 wait_queue_head_t wait;
75781 unsigned int nrbufs, curbuf, buffers;
75782- unsigned int readers;
75783- unsigned int writers;
75784- unsigned int files;
75785- unsigned int waiting_writers;
75786+ atomic_t readers;
75787+ atomic_t writers;
75788+ atomic_t files;
75789+ atomic_t waiting_writers;
75790 unsigned int r_counter;
75791 unsigned int w_counter;
75792 struct page *tmp_page;
75793diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
75794index 5f28cae..3d23723 100644
75795--- a/include/linux/platform_data/usb-ehci-s5p.h
75796+++ b/include/linux/platform_data/usb-ehci-s5p.h
75797@@ -14,7 +14,7 @@
75798 struct s5p_ehci_platdata {
75799 int (*phy_init)(struct platform_device *pdev, int type);
75800 int (*phy_exit)(struct platform_device *pdev, int type);
75801-};
75802+} __no_const;
75803
75804 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
75805
75806diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
75807index c256c59..8ea94c7 100644
75808--- a/include/linux/platform_data/usb-ohci-exynos.h
75809+++ b/include/linux/platform_data/usb-ohci-exynos.h
75810@@ -14,7 +14,7 @@
75811 struct exynos4_ohci_platdata {
75812 int (*phy_init)(struct platform_device *pdev, int type);
75813 int (*phy_exit)(struct platform_device *pdev, int type);
75814-};
75815+} __no_const;
75816
75817 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
75818
75819diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
75820index 7c1d252..c5c773e 100644
75821--- a/include/linux/pm_domain.h
75822+++ b/include/linux/pm_domain.h
75823@@ -48,7 +48,7 @@ struct gpd_dev_ops {
75824
75825 struct gpd_cpu_data {
75826 unsigned int saved_exit_latency;
75827- struct cpuidle_state *idle_state;
75828+ cpuidle_state_no_const *idle_state;
75829 };
75830
75831 struct generic_pm_domain {
75832diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
75833index 7d7e09e..8671ef8 100644
75834--- a/include/linux/pm_runtime.h
75835+++ b/include/linux/pm_runtime.h
75836@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
75837
75838 static inline void pm_runtime_mark_last_busy(struct device *dev)
75839 {
75840- ACCESS_ONCE(dev->power.last_busy) = jiffies;
75841+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
75842 }
75843
75844 #else /* !CONFIG_PM_RUNTIME */
75845diff --git a/include/linux/pnp.h b/include/linux/pnp.h
75846index 195aafc..49a7bc2 100644
75847--- a/include/linux/pnp.h
75848+++ b/include/linux/pnp.h
75849@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
75850 struct pnp_fixup {
75851 char id[7];
75852 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
75853-};
75854+} __do_const;
75855
75856 /* config parameters */
75857 #define PNP_CONFIG_NORMAL 0x0001
75858diff --git a/include/linux/poison.h b/include/linux/poison.h
75859index 2110a81..13a11bb 100644
75860--- a/include/linux/poison.h
75861+++ b/include/linux/poison.h
75862@@ -19,8 +19,8 @@
75863 * under normal circumstances, used to verify that nobody uses
75864 * non-initialized list entries.
75865 */
75866-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
75867-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
75868+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
75869+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
75870
75871 /********** include/linux/timer.h **********/
75872 /*
75873diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
75874index c0f44c2..1572583 100644
75875--- a/include/linux/power/smartreflex.h
75876+++ b/include/linux/power/smartreflex.h
75877@@ -238,7 +238,7 @@ struct omap_sr_class_data {
75878 int (*notify)(struct omap_sr *sr, u32 status);
75879 u8 notify_flags;
75880 u8 class_type;
75881-};
75882+} __do_const;
75883
75884 /**
75885 * struct omap_sr_nvalue_table - Smartreflex n-target value info
75886diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
75887index 4ea1d37..80f4b33 100644
75888--- a/include/linux/ppp-comp.h
75889+++ b/include/linux/ppp-comp.h
75890@@ -84,7 +84,7 @@ struct compressor {
75891 struct module *owner;
75892 /* Extra skb space needed by the compressor algorithm */
75893 unsigned int comp_extra;
75894-};
75895+} __do_const;
75896
75897 /*
75898 * The return value from decompress routine is the length of the
75899diff --git a/include/linux/preempt.h b/include/linux/preempt.h
75900index f5d4723..a6ea2fa 100644
75901--- a/include/linux/preempt.h
75902+++ b/include/linux/preempt.h
75903@@ -18,8 +18,13 @@
75904 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
75905 #endif
75906
75907+#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
75908+#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
75909+
75910 #define inc_preempt_count() add_preempt_count(1)
75911+#define raw_inc_preempt_count() raw_add_preempt_count(1)
75912 #define dec_preempt_count() sub_preempt_count(1)
75913+#define raw_dec_preempt_count() raw_sub_preempt_count(1)
75914
75915 #define preempt_count() (current_thread_info()->preempt_count)
75916
75917@@ -64,6 +69,12 @@ do { \
75918 barrier(); \
75919 } while (0)
75920
75921+#define raw_preempt_disable() \
75922+do { \
75923+ raw_inc_preempt_count(); \
75924+ barrier(); \
75925+} while (0)
75926+
75927 #define sched_preempt_enable_no_resched() \
75928 do { \
75929 barrier(); \
75930@@ -72,6 +83,12 @@ do { \
75931
75932 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
75933
75934+#define raw_preempt_enable_no_resched() \
75935+do { \
75936+ barrier(); \
75937+ raw_dec_preempt_count(); \
75938+} while (0)
75939+
75940 #define preempt_enable() \
75941 do { \
75942 preempt_enable_no_resched(); \
75943@@ -116,8 +133,10 @@ do { \
75944 * region.
75945 */
75946 #define preempt_disable() barrier()
75947+#define raw_preempt_disable() barrier()
75948 #define sched_preempt_enable_no_resched() barrier()
75949 #define preempt_enable_no_resched() barrier()
75950+#define raw_preempt_enable_no_resched() barrier()
75951 #define preempt_enable() barrier()
75952
75953 #define preempt_disable_notrace() barrier()
75954diff --git a/include/linux/printk.h b/include/linux/printk.h
75955index 22c7052..ad3fa0a 100644
75956--- a/include/linux/printk.h
75957+++ b/include/linux/printk.h
75958@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
75959 void early_printk(const char *s, ...) { }
75960 #endif
75961
75962+extern int kptr_restrict;
75963+
75964 #ifdef CONFIG_PRINTK
75965 asmlinkage __printf(5, 0)
75966 int vprintk_emit(int facility, int level,
75967@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
75968
75969 extern int printk_delay_msec;
75970 extern int dmesg_restrict;
75971-extern int kptr_restrict;
75972
75973 extern void wake_up_klogd(void);
75974
75975diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
75976index 608e60a..c26f864 100644
75977--- a/include/linux/proc_fs.h
75978+++ b/include/linux/proc_fs.h
75979@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
75980 return proc_create_data(name, mode, parent, proc_fops, NULL);
75981 }
75982
75983+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
75984+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
75985+{
75986+#ifdef CONFIG_GRKERNSEC_PROC_USER
75987+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
75988+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75989+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
75990+#else
75991+ return proc_create_data(name, mode, parent, proc_fops, NULL);
75992+#endif
75993+}
75994+
75995+
75996 extern void proc_set_size(struct proc_dir_entry *, loff_t);
75997 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
75998 extern void *PDE_DATA(const struct inode *);
75999diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
76000index 34a1e10..03a6d03 100644
76001--- a/include/linux/proc_ns.h
76002+++ b/include/linux/proc_ns.h
76003@@ -14,7 +14,7 @@ struct proc_ns_operations {
76004 void (*put)(void *ns);
76005 int (*install)(struct nsproxy *nsproxy, void *ns);
76006 unsigned int (*inum)(void *ns);
76007-};
76008+} __do_const;
76009
76010 struct proc_ns {
76011 void *ns;
76012diff --git a/include/linux/random.h b/include/linux/random.h
76013index 3b9377d..61b506a 100644
76014--- a/include/linux/random.h
76015+++ b/include/linux/random.h
76016@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
76017 u32 prandom_u32_state(struct rnd_state *);
76018 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
76019
76020+static inline unsigned long pax_get_random_long(void)
76021+{
76022+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
76023+}
76024+
76025 /*
76026 * Handle minimum values for seeds
76027 */
76028diff --git a/include/linux/rculist.h b/include/linux/rculist.h
76029index f4b1001..8ddb2b6 100644
76030--- a/include/linux/rculist.h
76031+++ b/include/linux/rculist.h
76032@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
76033 struct list_head *prev, struct list_head *next);
76034 #endif
76035
76036+extern void __pax_list_add_rcu(struct list_head *new,
76037+ struct list_head *prev, struct list_head *next);
76038+
76039 /**
76040 * list_add_rcu - add a new entry to rcu-protected list
76041 * @new: new entry to be added
76042@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
76043 __list_add_rcu(new, head, head->next);
76044 }
76045
76046+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
76047+{
76048+ __pax_list_add_rcu(new, head, head->next);
76049+}
76050+
76051 /**
76052 * list_add_tail_rcu - add a new entry to rcu-protected list
76053 * @new: new entry to be added
76054@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
76055 __list_add_rcu(new, head->prev, head);
76056 }
76057
76058+static inline void pax_list_add_tail_rcu(struct list_head *new,
76059+ struct list_head *head)
76060+{
76061+ __pax_list_add_rcu(new, head->prev, head);
76062+}
76063+
76064 /**
76065 * list_del_rcu - deletes entry from list without re-initialization
76066 * @entry: the element to delete from the list.
76067@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
76068 entry->prev = LIST_POISON2;
76069 }
76070
76071+extern void pax_list_del_rcu(struct list_head *entry);
76072+
76073 /**
76074 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
76075 * @n: the element to delete from the hash list.
76076diff --git a/include/linux/reboot.h b/include/linux/reboot.h
76077index 23b3630..e1bc12b 100644
76078--- a/include/linux/reboot.h
76079+++ b/include/linux/reboot.h
76080@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
76081 * Architecture-specific implementations of sys_reboot commands.
76082 */
76083
76084-extern void machine_restart(char *cmd);
76085-extern void machine_halt(void);
76086-extern void machine_power_off(void);
76087+extern void machine_restart(char *cmd) __noreturn;
76088+extern void machine_halt(void) __noreturn;
76089+extern void machine_power_off(void) __noreturn;
76090
76091 extern void machine_shutdown(void);
76092 struct pt_regs;
76093@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
76094 */
76095
76096 extern void kernel_restart_prepare(char *cmd);
76097-extern void kernel_restart(char *cmd);
76098-extern void kernel_halt(void);
76099-extern void kernel_power_off(void);
76100+extern void kernel_restart(char *cmd) __noreturn;
76101+extern void kernel_halt(void) __noreturn;
76102+extern void kernel_power_off(void) __noreturn;
76103
76104 extern int C_A_D; /* for sysctl */
76105 void ctrl_alt_del(void);
76106@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
76107 * Emergency restart, callable from an interrupt handler.
76108 */
76109
76110-extern void emergency_restart(void);
76111+extern void emergency_restart(void) __noreturn;
76112 #include <asm/emergency-restart.h>
76113
76114 #endif /* _LINUX_REBOOT_H */
76115diff --git a/include/linux/regset.h b/include/linux/regset.h
76116index 8e0c9fe..ac4d221 100644
76117--- a/include/linux/regset.h
76118+++ b/include/linux/regset.h
76119@@ -161,7 +161,8 @@ struct user_regset {
76120 unsigned int align;
76121 unsigned int bias;
76122 unsigned int core_note_type;
76123-};
76124+} __do_const;
76125+typedef struct user_regset __no_const user_regset_no_const;
76126
76127 /**
76128 * struct user_regset_view - available regsets
76129diff --git a/include/linux/relay.h b/include/linux/relay.h
76130index d7c8359..818daf5 100644
76131--- a/include/linux/relay.h
76132+++ b/include/linux/relay.h
76133@@ -157,7 +157,7 @@ struct rchan_callbacks
76134 * The callback should return 0 if successful, negative if not.
76135 */
76136 int (*remove_buf_file)(struct dentry *dentry);
76137-};
76138+} __no_const;
76139
76140 /*
76141 * CONFIG_RELAY kernel API, kernel/relay.c
76142diff --git a/include/linux/rio.h b/include/linux/rio.h
76143index 18e0993..8ab5b21 100644
76144--- a/include/linux/rio.h
76145+++ b/include/linux/rio.h
76146@@ -345,7 +345,7 @@ struct rio_ops {
76147 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
76148 u64 rstart, u32 size, u32 flags);
76149 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
76150-};
76151+} __no_const;
76152
76153 #define RIO_RESOURCE_MEM 0x00000100
76154 #define RIO_RESOURCE_DOORBELL 0x00000200
76155diff --git a/include/linux/rmap.h b/include/linux/rmap.h
76156index 6dacb93..6174423 100644
76157--- a/include/linux/rmap.h
76158+++ b/include/linux/rmap.h
76159@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
76160 void anon_vma_init(void); /* create anon_vma_cachep */
76161 int anon_vma_prepare(struct vm_area_struct *);
76162 void unlink_anon_vmas(struct vm_area_struct *);
76163-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
76164-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
76165+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
76166+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
76167
76168 static inline void anon_vma_merge(struct vm_area_struct *vma,
76169 struct vm_area_struct *next)
76170diff --git a/include/linux/sched.h b/include/linux/sched.h
76171index 178a8d9..918ea01 100644
76172--- a/include/linux/sched.h
76173+++ b/include/linux/sched.h
76174@@ -62,6 +62,7 @@ struct bio_list;
76175 struct fs_struct;
76176 struct perf_event_context;
76177 struct blk_plug;
76178+struct linux_binprm;
76179
76180 /*
76181 * List of flags we want to share for kernel threads,
76182@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
76183 extern int in_sched_functions(unsigned long addr);
76184
76185 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
76186-extern signed long schedule_timeout(signed long timeout);
76187+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
76188 extern signed long schedule_timeout_interruptible(signed long timeout);
76189 extern signed long schedule_timeout_killable(signed long timeout);
76190 extern signed long schedule_timeout_uninterruptible(signed long timeout);
76191@@ -314,6 +315,18 @@ struct nsproxy;
76192 struct user_namespace;
76193
76194 #ifdef CONFIG_MMU
76195+
76196+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
76197+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
76198+#else
76199+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
76200+{
76201+ return 0;
76202+}
76203+#endif
76204+
76205+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
76206+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
76207 extern void arch_pick_mmap_layout(struct mm_struct *mm);
76208 extern unsigned long
76209 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
76210@@ -591,6 +604,17 @@ struct signal_struct {
76211 #ifdef CONFIG_TASKSTATS
76212 struct taskstats *stats;
76213 #endif
76214+
76215+#ifdef CONFIG_GRKERNSEC
76216+ u32 curr_ip;
76217+ u32 saved_ip;
76218+ u32 gr_saddr;
76219+ u32 gr_daddr;
76220+ u16 gr_sport;
76221+ u16 gr_dport;
76222+ u8 used_accept:1;
76223+#endif
76224+
76225 #ifdef CONFIG_AUDIT
76226 unsigned audit_tty;
76227 unsigned audit_tty_log_passwd;
76228@@ -671,6 +695,14 @@ struct user_struct {
76229 struct key *session_keyring; /* UID's default session keyring */
76230 #endif
76231
76232+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
76233+ unsigned char kernel_banned;
76234+#endif
76235+#ifdef CONFIG_GRKERNSEC_BRUTE
76236+ unsigned char suid_banned;
76237+ unsigned long suid_ban_expires;
76238+#endif
76239+
76240 /* Hash table maintenance information */
76241 struct hlist_node uidhash_node;
76242 kuid_t uid;
76243@@ -1158,8 +1190,8 @@ struct task_struct {
76244 struct list_head thread_group;
76245
76246 struct completion *vfork_done; /* for vfork() */
76247- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
76248- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
76249+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
76250+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
76251
76252 cputime_t utime, stime, utimescaled, stimescaled;
76253 cputime_t gtime;
76254@@ -1184,11 +1216,6 @@ struct task_struct {
76255 struct task_cputime cputime_expires;
76256 struct list_head cpu_timers[3];
76257
76258-/* process credentials */
76259- const struct cred __rcu *real_cred; /* objective and real subjective task
76260- * credentials (COW) */
76261- const struct cred __rcu *cred; /* effective (overridable) subjective task
76262- * credentials (COW) */
76263 char comm[TASK_COMM_LEN]; /* executable name excluding path
76264 - access with [gs]et_task_comm (which lock
76265 it with task_lock())
76266@@ -1205,6 +1232,10 @@ struct task_struct {
76267 #endif
76268 /* CPU-specific state of this task */
76269 struct thread_struct thread;
76270+/* thread_info moved to task_struct */
76271+#ifdef CONFIG_X86
76272+ struct thread_info tinfo;
76273+#endif
76274 /* filesystem information */
76275 struct fs_struct *fs;
76276 /* open file information */
76277@@ -1278,6 +1309,10 @@ struct task_struct {
76278 gfp_t lockdep_reclaim_gfp;
76279 #endif
76280
76281+/* process credentials */
76282+ const struct cred __rcu *real_cred; /* objective and real subjective task
76283+ * credentials (COW) */
76284+
76285 /* journalling filesystem info */
76286 void *journal_info;
76287
76288@@ -1316,6 +1351,10 @@ struct task_struct {
76289 /* cg_list protected by css_set_lock and tsk->alloc_lock */
76290 struct list_head cg_list;
76291 #endif
76292+
76293+ const struct cred __rcu *cred; /* effective (overridable) subjective task
76294+ * credentials (COW) */
76295+
76296 #ifdef CONFIG_FUTEX
76297 struct robust_list_head __user *robust_list;
76298 #ifdef CONFIG_COMPAT
76299@@ -1416,8 +1455,76 @@ struct task_struct {
76300 unsigned int sequential_io;
76301 unsigned int sequential_io_avg;
76302 #endif
76303+
76304+#ifdef CONFIG_GRKERNSEC
76305+ /* grsecurity */
76306+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76307+ u64 exec_id;
76308+#endif
76309+#ifdef CONFIG_GRKERNSEC_SETXID
76310+ const struct cred *delayed_cred;
76311+#endif
76312+ struct dentry *gr_chroot_dentry;
76313+ struct acl_subject_label *acl;
76314+ struct acl_role_label *role;
76315+ struct file *exec_file;
76316+ unsigned long brute_expires;
76317+ u16 acl_role_id;
76318+ /* is this the task that authenticated to the special role */
76319+ u8 acl_sp_role;
76320+ u8 is_writable;
76321+ u8 brute;
76322+ u8 gr_is_chrooted;
76323+#endif
76324+
76325 };
76326
76327+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
76328+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
76329+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
76330+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
76331+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
76332+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
76333+
76334+#ifdef CONFIG_PAX_SOFTMODE
76335+extern int pax_softmode;
76336+#endif
76337+
76338+extern int pax_check_flags(unsigned long *);
76339+
76340+/* if tsk != current then task_lock must be held on it */
76341+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
76342+static inline unsigned long pax_get_flags(struct task_struct *tsk)
76343+{
76344+ if (likely(tsk->mm))
76345+ return tsk->mm->pax_flags;
76346+ else
76347+ return 0UL;
76348+}
76349+
76350+/* if tsk != current then task_lock must be held on it */
76351+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
76352+{
76353+ if (likely(tsk->mm)) {
76354+ tsk->mm->pax_flags = flags;
76355+ return 0;
76356+ }
76357+ return -EINVAL;
76358+}
76359+#endif
76360+
76361+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76362+extern void pax_set_initial_flags(struct linux_binprm *bprm);
76363+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
76364+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
76365+#endif
76366+
76367+struct path;
76368+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
76369+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
76370+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
76371+extern void pax_report_refcount_overflow(struct pt_regs *regs);
76372+
76373 /* Future-safe accessor for struct task_struct's cpus_allowed. */
76374 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76375
76376@@ -1476,7 +1583,7 @@ struct pid_namespace;
76377 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
76378 struct pid_namespace *ns);
76379
76380-static inline pid_t task_pid_nr(struct task_struct *tsk)
76381+static inline pid_t task_pid_nr(const struct task_struct *tsk)
76382 {
76383 return tsk->pid;
76384 }
76385@@ -1919,7 +2026,9 @@ void yield(void);
76386 extern struct exec_domain default_exec_domain;
76387
76388 union thread_union {
76389+#ifndef CONFIG_X86
76390 struct thread_info thread_info;
76391+#endif
76392 unsigned long stack[THREAD_SIZE/sizeof(long)];
76393 };
76394
76395@@ -1952,6 +2061,7 @@ extern struct pid_namespace init_pid_ns;
76396 */
76397
76398 extern struct task_struct *find_task_by_vpid(pid_t nr);
76399+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
76400 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
76401 struct pid_namespace *ns);
76402
76403@@ -2118,7 +2228,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
76404 extern void exit_itimers(struct signal_struct *);
76405 extern void flush_itimer_signals(void);
76406
76407-extern void do_group_exit(int);
76408+extern __noreturn void do_group_exit(int);
76409
76410 extern int allow_signal(int);
76411 extern int disallow_signal(int);
76412@@ -2309,9 +2419,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
76413
76414 #endif
76415
76416-static inline int object_is_on_stack(void *obj)
76417+static inline int object_starts_on_stack(void *obj)
76418 {
76419- void *stack = task_stack_page(current);
76420+ const void *stack = task_stack_page(current);
76421
76422 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
76423 }
76424diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
76425index bf8086b..962b035 100644
76426--- a/include/linux/sched/sysctl.h
76427+++ b/include/linux/sched/sysctl.h
76428@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
76429 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
76430
76431 extern int sysctl_max_map_count;
76432+extern unsigned long sysctl_heap_stack_gap;
76433
76434 extern unsigned int sysctl_sched_latency;
76435 extern unsigned int sysctl_sched_min_granularity;
76436diff --git a/include/linux/security.h b/include/linux/security.h
76437index 4686491..2bd210e 100644
76438--- a/include/linux/security.h
76439+++ b/include/linux/security.h
76440@@ -26,6 +26,7 @@
76441 #include <linux/capability.h>
76442 #include <linux/slab.h>
76443 #include <linux/err.h>
76444+#include <linux/grsecurity.h>
76445
76446 struct linux_binprm;
76447 struct cred;
76448diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
76449index 2da29ac..aac448ec 100644
76450--- a/include/linux/seq_file.h
76451+++ b/include/linux/seq_file.h
76452@@ -26,6 +26,9 @@ struct seq_file {
76453 struct mutex lock;
76454 const struct seq_operations *op;
76455 int poll_event;
76456+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76457+ u64 exec_id;
76458+#endif
76459 #ifdef CONFIG_USER_NS
76460 struct user_namespace *user_ns;
76461 #endif
76462@@ -38,6 +41,7 @@ struct seq_operations {
76463 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
76464 int (*show) (struct seq_file *m, void *v);
76465 };
76466+typedef struct seq_operations __no_const seq_operations_no_const;
76467
76468 #define SEQ_SKIP 1
76469
76470diff --git a/include/linux/shm.h b/include/linux/shm.h
76471index 429c199..4d42e38 100644
76472--- a/include/linux/shm.h
76473+++ b/include/linux/shm.h
76474@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
76475
76476 /* The task created the shm object. NULL if the task is dead. */
76477 struct task_struct *shm_creator;
76478+#ifdef CONFIG_GRKERNSEC
76479+ time_t shm_createtime;
76480+ pid_t shm_lapid;
76481+#endif
76482 };
76483
76484 /* shm_mode upper byte flags */
76485diff --git a/include/linux/signal.h b/include/linux/signal.h
76486index d897484..323ba98 100644
76487--- a/include/linux/signal.h
76488+++ b/include/linux/signal.h
76489@@ -433,6 +433,7 @@ void signals_init(void);
76490
76491 int restore_altstack(const stack_t __user *);
76492 int __save_altstack(stack_t __user *, unsigned long);
76493+void __save_altstack_ex(stack_t __user *, unsigned long);
76494
76495 #ifdef CONFIG_PROC_FS
76496 struct seq_file;
76497diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
76498index dec1748..112c1f9 100644
76499--- a/include/linux/skbuff.h
76500+++ b/include/linux/skbuff.h
76501@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
76502 extern struct sk_buff *__alloc_skb(unsigned int size,
76503 gfp_t priority, int flags, int node);
76504 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
76505-static inline struct sk_buff *alloc_skb(unsigned int size,
76506+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
76507 gfp_t priority)
76508 {
76509 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
76510@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
76511 */
76512 static inline int skb_queue_empty(const struct sk_buff_head *list)
76513 {
76514- return list->next == (struct sk_buff *)list;
76515+ return list->next == (const struct sk_buff *)list;
76516 }
76517
76518 /**
76519@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
76520 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
76521 const struct sk_buff *skb)
76522 {
76523- return skb->next == (struct sk_buff *)list;
76524+ return skb->next == (const struct sk_buff *)list;
76525 }
76526
76527 /**
76528@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
76529 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
76530 const struct sk_buff *skb)
76531 {
76532- return skb->prev == (struct sk_buff *)list;
76533+ return skb->prev == (const struct sk_buff *)list;
76534 }
76535
76536 /**
76537@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
76538 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
76539 */
76540 #ifndef NET_SKB_PAD
76541-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
76542+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
76543 #endif
76544
76545 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
76546@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
76547 int noblock, int *err);
76548 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
76549 struct poll_table_struct *wait);
76550-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
76551+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
76552 int offset, struct iovec *to,
76553 int size);
76554 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
76555@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
76556 nf_bridge_put(skb->nf_bridge);
76557 skb->nf_bridge = NULL;
76558 #endif
76559+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
76560+ skb->nf_trace = 0;
76561+#endif
76562 }
76563
76564 static inline void nf_reset_trace(struct sk_buff *skb)
76565diff --git a/include/linux/slab.h b/include/linux/slab.h
76566index 0c62175..f016ac1 100644
76567--- a/include/linux/slab.h
76568+++ b/include/linux/slab.h
76569@@ -12,15 +12,29 @@
76570 #include <linux/gfp.h>
76571 #include <linux/types.h>
76572 #include <linux/workqueue.h>
76573-
76574+#include <linux/err.h>
76575
76576 /*
76577 * Flags to pass to kmem_cache_create().
76578 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
76579 */
76580 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
76581+
76582+#ifdef CONFIG_PAX_USERCOPY_SLABS
76583+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
76584+#else
76585+#define SLAB_USERCOPY 0x00000000UL
76586+#endif
76587+
76588 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
76589 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
76590+
76591+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76592+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
76593+#else
76594+#define SLAB_NO_SANITIZE 0x00000000UL
76595+#endif
76596+
76597 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
76598 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
76599 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
76600@@ -89,10 +103,13 @@
76601 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
76602 * Both make kfree a no-op.
76603 */
76604-#define ZERO_SIZE_PTR ((void *)16)
76605+#define ZERO_SIZE_PTR \
76606+({ \
76607+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
76608+ (void *)(-MAX_ERRNO-1L); \
76609+})
76610
76611-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
76612- (unsigned long)ZERO_SIZE_PTR)
76613+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
76614
76615
76616 struct mem_cgroup;
76617@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
76618 void kfree(const void *);
76619 void kzfree(const void *);
76620 size_t ksize(const void *);
76621+const char *check_heap_object(const void *ptr, unsigned long n);
76622+bool is_usercopy_object(const void *ptr);
76623
76624 /*
76625 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
76626@@ -164,7 +183,7 @@ struct kmem_cache {
76627 unsigned int align; /* Alignment as calculated */
76628 unsigned long flags; /* Active flags on the slab */
76629 const char *name; /* Slab name for sysfs */
76630- int refcount; /* Use counter */
76631+ atomic_t refcount; /* Use counter */
76632 void (*ctor)(void *); /* Called on object slot creation */
76633 struct list_head list; /* List of all slab caches on the system */
76634 };
76635@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
76636 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
76637 #endif
76638
76639+#ifdef CONFIG_PAX_USERCOPY_SLABS
76640+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
76641+#endif
76642+
76643 /*
76644 * Figure out which kmalloc slab an allocation of a certain size
76645 * belongs to.
76646@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
76647 * 2 = 120 .. 192 bytes
76648 * n = 2^(n-1) .. 2^n -1
76649 */
76650-static __always_inline int kmalloc_index(size_t size)
76651+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
76652 {
76653 if (!size)
76654 return 0;
76655@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
76656 * for general use, and so are not documented here. For a full list of
76657 * potential flags, always refer to linux/gfp.h.
76658 */
76659+
76660 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
76661 {
76662 if (size != 0 && n > SIZE_MAX / size)
76663@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
76664 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
76665 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
76666 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
76667-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
76668+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
76669 #define kmalloc_track_caller(size, flags) \
76670 __kmalloc_track_caller(size, flags, _RET_IP_)
76671 #else
76672@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
76673 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
76674 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
76675 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
76676-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
76677+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
76678 #define kmalloc_node_track_caller(size, flags, node) \
76679 __kmalloc_node_track_caller(size, flags, node, \
76680 _RET_IP_)
76681diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
76682index cd40158..4e2f7af 100644
76683--- a/include/linux/slab_def.h
76684+++ b/include/linux/slab_def.h
76685@@ -50,7 +50,7 @@ struct kmem_cache {
76686 /* 4) cache creation/removal */
76687 const char *name;
76688 struct list_head list;
76689- int refcount;
76690+ atomic_t refcount;
76691 int object_size;
76692 int align;
76693
76694@@ -66,10 +66,14 @@ struct kmem_cache {
76695 unsigned long node_allocs;
76696 unsigned long node_frees;
76697 unsigned long node_overflow;
76698- atomic_t allochit;
76699- atomic_t allocmiss;
76700- atomic_t freehit;
76701- atomic_t freemiss;
76702+ atomic_unchecked_t allochit;
76703+ atomic_unchecked_t allocmiss;
76704+ atomic_unchecked_t freehit;
76705+ atomic_unchecked_t freemiss;
76706+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76707+ atomic_unchecked_t sanitized;
76708+ atomic_unchecked_t not_sanitized;
76709+#endif
76710
76711 /*
76712 * If debugging is enabled, then the allocator can add additional
76713@@ -103,7 +107,7 @@ struct kmem_cache {
76714 };
76715
76716 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
76717-void *__kmalloc(size_t size, gfp_t flags);
76718+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
76719
76720 #ifdef CONFIG_TRACING
76721 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
76722@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76723 cachep = kmalloc_dma_caches[i];
76724 else
76725 #endif
76726+
76727+#ifdef CONFIG_PAX_USERCOPY_SLABS
76728+ if (flags & GFP_USERCOPY)
76729+ cachep = kmalloc_usercopy_caches[i];
76730+ else
76731+#endif
76732+
76733 cachep = kmalloc_caches[i];
76734
76735 ret = kmem_cache_alloc_trace(cachep, flags, size);
76736@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76737 }
76738
76739 #ifdef CONFIG_NUMA
76740-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
76741+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76742 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
76743
76744 #ifdef CONFIG_TRACING
76745@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76746 cachep = kmalloc_dma_caches[i];
76747 else
76748 #endif
76749+
76750+#ifdef CONFIG_PAX_USERCOPY_SLABS
76751+ if (flags & GFP_USERCOPY)
76752+ cachep = kmalloc_usercopy_caches[i];
76753+ else
76754+#endif
76755+
76756 cachep = kmalloc_caches[i];
76757
76758 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
76759diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
76760index f28e14a..7831211 100644
76761--- a/include/linux/slob_def.h
76762+++ b/include/linux/slob_def.h
76763@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
76764 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
76765 }
76766
76767-void *__kmalloc_node(size_t size, gfp_t flags, int node);
76768+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76769
76770 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76771 {
76772@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76773 return __kmalloc_node(size, flags, NUMA_NO_NODE);
76774 }
76775
76776-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
76777+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
76778 {
76779 return kmalloc(size, flags);
76780 }
76781diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
76782index 027276f..092bfe8 100644
76783--- a/include/linux/slub_def.h
76784+++ b/include/linux/slub_def.h
76785@@ -80,7 +80,7 @@ struct kmem_cache {
76786 struct kmem_cache_order_objects max;
76787 struct kmem_cache_order_objects min;
76788 gfp_t allocflags; /* gfp flags to use on each alloc */
76789- int refcount; /* Refcount for slab cache destroy */
76790+ atomic_t refcount; /* Refcount for slab cache destroy */
76791 void (*ctor)(void *);
76792 int inuse; /* Offset to metadata */
76793 int align; /* Alignment */
76794@@ -105,7 +105,7 @@ struct kmem_cache {
76795 };
76796
76797 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
76798-void *__kmalloc(size_t size, gfp_t flags);
76799+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
76800
76801 static __always_inline void *
76802 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
76803@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
76804 }
76805 #endif
76806
76807-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
76808+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
76809 {
76810 unsigned int order = get_order(size);
76811 return kmalloc_order_trace(size, flags, order);
76812@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76813 }
76814
76815 #ifdef CONFIG_NUMA
76816-void *__kmalloc_node(size_t size, gfp_t flags, int node);
76817+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76818 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
76819
76820 #ifdef CONFIG_TRACING
76821diff --git a/include/linux/smp.h b/include/linux/smp.h
76822index c848876..11e8a84 100644
76823--- a/include/linux/smp.h
76824+++ b/include/linux/smp.h
76825@@ -221,7 +221,9 @@ static inline void kick_all_cpus_sync(void) { }
76826 #endif
76827
76828 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
76829+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
76830 #define put_cpu() preempt_enable()
76831+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
76832
76833 /*
76834 * Callback to arch code if there's nosmp or maxcpus=0 on the
76835diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
76836index 54f91d3..be2c379 100644
76837--- a/include/linux/sock_diag.h
76838+++ b/include/linux/sock_diag.h
76839@@ -11,7 +11,7 @@ struct sock;
76840 struct sock_diag_handler {
76841 __u8 family;
76842 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
76843-};
76844+} __do_const;
76845
76846 int sock_diag_register(const struct sock_diag_handler *h);
76847 void sock_diag_unregister(const struct sock_diag_handler *h);
76848diff --git a/include/linux/sonet.h b/include/linux/sonet.h
76849index 680f9a3..f13aeb0 100644
76850--- a/include/linux/sonet.h
76851+++ b/include/linux/sonet.h
76852@@ -7,7 +7,7 @@
76853 #include <uapi/linux/sonet.h>
76854
76855 struct k_sonet_stats {
76856-#define __HANDLE_ITEM(i) atomic_t i
76857+#define __HANDLE_ITEM(i) atomic_unchecked_t i
76858 __SONET_ITEMS
76859 #undef __HANDLE_ITEM
76860 };
76861diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
76862index 07d8e53..dc934c9 100644
76863--- a/include/linux/sunrpc/addr.h
76864+++ b/include/linux/sunrpc/addr.h
76865@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
76866 {
76867 switch (sap->sa_family) {
76868 case AF_INET:
76869- return ntohs(((struct sockaddr_in *)sap)->sin_port);
76870+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
76871 case AF_INET6:
76872- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
76873+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
76874 }
76875 return 0;
76876 }
76877@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
76878 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
76879 const struct sockaddr *src)
76880 {
76881- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
76882+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
76883 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
76884
76885 dsin->sin_family = ssin->sin_family;
76886@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
76887 if (sa->sa_family != AF_INET6)
76888 return 0;
76889
76890- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
76891+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
76892 }
76893
76894 #endif /* _LINUX_SUNRPC_ADDR_H */
76895diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
76896index bfe11be..12bc8c4 100644
76897--- a/include/linux/sunrpc/clnt.h
76898+++ b/include/linux/sunrpc/clnt.h
76899@@ -96,7 +96,7 @@ struct rpc_procinfo {
76900 unsigned int p_timer; /* Which RTT timer to use */
76901 u32 p_statidx; /* Which procedure to account */
76902 const char * p_name; /* name of procedure */
76903-};
76904+} __do_const;
76905
76906 #ifdef __KERNEL__
76907
76908diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
76909index 1f0216b..6a4fa50 100644
76910--- a/include/linux/sunrpc/svc.h
76911+++ b/include/linux/sunrpc/svc.h
76912@@ -411,7 +411,7 @@ struct svc_procedure {
76913 unsigned int pc_count; /* call count */
76914 unsigned int pc_cachetype; /* cache info (NFS) */
76915 unsigned int pc_xdrressize; /* maximum size of XDR reply */
76916-};
76917+} __do_const;
76918
76919 /*
76920 * Function prototypes.
76921diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
76922index 0b8e3e6..33e0a01 100644
76923--- a/include/linux/sunrpc/svc_rdma.h
76924+++ b/include/linux/sunrpc/svc_rdma.h
76925@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
76926 extern unsigned int svcrdma_max_requests;
76927 extern unsigned int svcrdma_max_req_size;
76928
76929-extern atomic_t rdma_stat_recv;
76930-extern atomic_t rdma_stat_read;
76931-extern atomic_t rdma_stat_write;
76932-extern atomic_t rdma_stat_sq_starve;
76933-extern atomic_t rdma_stat_rq_starve;
76934-extern atomic_t rdma_stat_rq_poll;
76935-extern atomic_t rdma_stat_rq_prod;
76936-extern atomic_t rdma_stat_sq_poll;
76937-extern atomic_t rdma_stat_sq_prod;
76938+extern atomic_unchecked_t rdma_stat_recv;
76939+extern atomic_unchecked_t rdma_stat_read;
76940+extern atomic_unchecked_t rdma_stat_write;
76941+extern atomic_unchecked_t rdma_stat_sq_starve;
76942+extern atomic_unchecked_t rdma_stat_rq_starve;
76943+extern atomic_unchecked_t rdma_stat_rq_poll;
76944+extern atomic_unchecked_t rdma_stat_rq_prod;
76945+extern atomic_unchecked_t rdma_stat_sq_poll;
76946+extern atomic_unchecked_t rdma_stat_sq_prod;
76947
76948 #define RPCRDMA_VERSION 1
76949
76950diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
76951index ff374ab..7fd2ecb 100644
76952--- a/include/linux/sunrpc/svcauth.h
76953+++ b/include/linux/sunrpc/svcauth.h
76954@@ -109,7 +109,7 @@ struct auth_ops {
76955 int (*release)(struct svc_rqst *rq);
76956 void (*domain_release)(struct auth_domain *);
76957 int (*set_client)(struct svc_rqst *rq);
76958-};
76959+} __do_const;
76960
76961 #define SVC_GARBAGE 1
76962 #define SVC_SYSERR 2
76963diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
76964index a5ffd32..0935dea 100644
76965--- a/include/linux/swiotlb.h
76966+++ b/include/linux/swiotlb.h
76967@@ -60,7 +60,8 @@ extern void
76968
76969 extern void
76970 swiotlb_free_coherent(struct device *hwdev, size_t size,
76971- void *vaddr, dma_addr_t dma_handle);
76972+ void *vaddr, dma_addr_t dma_handle,
76973+ struct dma_attrs *attrs);
76974
76975 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
76976 unsigned long offset, size_t size,
76977diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
76978index 84662ec..d8f8adb 100644
76979--- a/include/linux/syscalls.h
76980+++ b/include/linux/syscalls.h
76981@@ -97,8 +97,12 @@ struct sigaltstack;
76982 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
76983
76984 #define __SC_DECL(t, a) t a
76985-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
76986-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
76987+#define __TYPE_IS_SL(t) (__same_type((t)0, 0L))
76988+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
76989+#define __TYPE_IS_SLL(t) (__same_type((t)0, 0LL))
76990+#define __TYPE_IS_ULL(t) (__same_type((t)0, 0ULL))
76991+#define __TYPE_IS_LL(t) (__TYPE_IS_SLL(t) || __TYPE_IS_ULL(t))
76992+#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), __builtin_choose_expr(__TYPE_IS_ULL(t), 0ULL, 0LL), __builtin_choose_expr(__TYPE_IS_UL(t), 0UL, 0L))) a
76993 #define __SC_CAST(t, a) (t) a
76994 #define __SC_ARGS(t, a) a
76995 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
76996@@ -362,11 +366,11 @@ asmlinkage long sys_sync(void);
76997 asmlinkage long sys_fsync(unsigned int fd);
76998 asmlinkage long sys_fdatasync(unsigned int fd);
76999 asmlinkage long sys_bdflush(int func, long data);
77000-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
77001- char __user *type, unsigned long flags,
77002+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
77003+ const char __user *type, unsigned long flags,
77004 void __user *data);
77005-asmlinkage long sys_umount(char __user *name, int flags);
77006-asmlinkage long sys_oldumount(char __user *name);
77007+asmlinkage long sys_umount(const char __user *name, int flags);
77008+asmlinkage long sys_oldumount(const char __user *name);
77009 asmlinkage long sys_truncate(const char __user *path, long length);
77010 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
77011 asmlinkage long sys_stat(const char __user *filename,
77012@@ -578,7 +582,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
77013 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
77014 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
77015 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
77016- struct sockaddr __user *, int);
77017+ struct sockaddr __user *, int) __intentional_overflow(0);
77018 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
77019 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
77020 unsigned int vlen, unsigned flags);
77021diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
77022index 27b3b0b..e093dd9 100644
77023--- a/include/linux/syscore_ops.h
77024+++ b/include/linux/syscore_ops.h
77025@@ -16,7 +16,7 @@ struct syscore_ops {
77026 int (*suspend)(void);
77027 void (*resume)(void);
77028 void (*shutdown)(void);
77029-};
77030+} __do_const;
77031
77032 extern void register_syscore_ops(struct syscore_ops *ops);
77033 extern void unregister_syscore_ops(struct syscore_ops *ops);
77034diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
77035index 14a8ff2..af52bad 100644
77036--- a/include/linux/sysctl.h
77037+++ b/include/linux/sysctl.h
77038@@ -34,13 +34,13 @@ struct ctl_table_root;
77039 struct ctl_table_header;
77040 struct ctl_dir;
77041
77042-typedef struct ctl_table ctl_table;
77043-
77044 typedef int proc_handler (struct ctl_table *ctl, int write,
77045 void __user *buffer, size_t *lenp, loff_t *ppos);
77046
77047 extern int proc_dostring(struct ctl_table *, int,
77048 void __user *, size_t *, loff_t *);
77049+extern int proc_dostring_modpriv(struct ctl_table *, int,
77050+ void __user *, size_t *, loff_t *);
77051 extern int proc_dointvec(struct ctl_table *, int,
77052 void __user *, size_t *, loff_t *);
77053 extern int proc_dointvec_minmax(struct ctl_table *, int,
77054@@ -115,7 +115,9 @@ struct ctl_table
77055 struct ctl_table_poll *poll;
77056 void *extra1;
77057 void *extra2;
77058-};
77059+} __do_const;
77060+typedef struct ctl_table __no_const ctl_table_no_const;
77061+typedef struct ctl_table ctl_table;
77062
77063 struct ctl_node {
77064 struct rb_node node;
77065diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
77066index e2cee22..3ddb921 100644
77067--- a/include/linux/sysfs.h
77068+++ b/include/linux/sysfs.h
77069@@ -31,7 +31,8 @@ struct attribute {
77070 struct lock_class_key *key;
77071 struct lock_class_key skey;
77072 #endif
77073-};
77074+} __do_const;
77075+typedef struct attribute __no_const attribute_no_const;
77076
77077 /**
77078 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
77079@@ -59,8 +60,8 @@ struct attribute_group {
77080 umode_t (*is_visible)(struct kobject *,
77081 struct attribute *, int);
77082 struct attribute **attrs;
77083-};
77084-
77085+} __do_const;
77086+typedef struct attribute_group __no_const attribute_group_no_const;
77087
77088
77089 /**
77090@@ -107,7 +108,8 @@ struct bin_attribute {
77091 char *, loff_t, size_t);
77092 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
77093 struct vm_area_struct *vma);
77094-};
77095+} __do_const;
77096+typedef struct bin_attribute __no_const bin_attribute_no_const;
77097
77098 /**
77099 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
77100diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
77101index 7faf933..9b85a0c 100644
77102--- a/include/linux/sysrq.h
77103+++ b/include/linux/sysrq.h
77104@@ -16,6 +16,7 @@
77105
77106 #include <linux/errno.h>
77107 #include <linux/types.h>
77108+#include <linux/compiler.h>
77109
77110 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
77111 #define SYSRQ_DEFAULT_ENABLE 1
77112@@ -36,7 +37,7 @@ struct sysrq_key_op {
77113 char *help_msg;
77114 char *action_msg;
77115 int enable_mask;
77116-};
77117+} __do_const;
77118
77119 #ifdef CONFIG_MAGIC_SYSRQ
77120
77121diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
77122index e7e0473..7989295 100644
77123--- a/include/linux/thread_info.h
77124+++ b/include/linux/thread_info.h
77125@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
77126 #error "no set_restore_sigmask() provided and default one won't work"
77127 #endif
77128
77129+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
77130+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
77131+{
77132+#ifndef CONFIG_PAX_USERCOPY_DEBUG
77133+ if (!__builtin_constant_p(n))
77134+#endif
77135+ __check_object_size(ptr, n, to_user);
77136+}
77137+
77138 #endif /* __KERNEL__ */
77139
77140 #endif /* _LINUX_THREAD_INFO_H */
77141diff --git a/include/linux/tty.h b/include/linux/tty.h
77142index 8780bd2..d1ae08b 100644
77143--- a/include/linux/tty.h
77144+++ b/include/linux/tty.h
77145@@ -194,7 +194,7 @@ struct tty_port {
77146 const struct tty_port_operations *ops; /* Port operations */
77147 spinlock_t lock; /* Lock protecting tty field */
77148 int blocked_open; /* Waiting to open */
77149- int count; /* Usage count */
77150+ atomic_t count; /* Usage count */
77151 wait_queue_head_t open_wait; /* Open waiters */
77152 wait_queue_head_t close_wait; /* Close waiters */
77153 wait_queue_head_t delta_msr_wait; /* Modem status change */
77154@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
77155 struct tty_struct *tty, struct file *filp);
77156 static inline int tty_port_users(struct tty_port *port)
77157 {
77158- return port->count + port->blocked_open;
77159+ return atomic_read(&port->count) + port->blocked_open;
77160 }
77161
77162 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
77163diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
77164index 756a609..b302dd6 100644
77165--- a/include/linux/tty_driver.h
77166+++ b/include/linux/tty_driver.h
77167@@ -285,7 +285,7 @@ struct tty_operations {
77168 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
77169 #endif
77170 const struct file_operations *proc_fops;
77171-};
77172+} __do_const;
77173
77174 struct tty_driver {
77175 int magic; /* magic number for this structure */
77176diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
77177index 58390c7..95e214c 100644
77178--- a/include/linux/tty_ldisc.h
77179+++ b/include/linux/tty_ldisc.h
77180@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
77181
77182 struct module *owner;
77183
77184- int refcount;
77185+ atomic_t refcount;
77186 };
77187
77188 struct tty_ldisc {
77189diff --git a/include/linux/types.h b/include/linux/types.h
77190index 4d118ba..c3ee9bf 100644
77191--- a/include/linux/types.h
77192+++ b/include/linux/types.h
77193@@ -176,10 +176,26 @@ typedef struct {
77194 int counter;
77195 } atomic_t;
77196
77197+#ifdef CONFIG_PAX_REFCOUNT
77198+typedef struct {
77199+ int counter;
77200+} atomic_unchecked_t;
77201+#else
77202+typedef atomic_t atomic_unchecked_t;
77203+#endif
77204+
77205 #ifdef CONFIG_64BIT
77206 typedef struct {
77207 long counter;
77208 } atomic64_t;
77209+
77210+#ifdef CONFIG_PAX_REFCOUNT
77211+typedef struct {
77212+ long counter;
77213+} atomic64_unchecked_t;
77214+#else
77215+typedef atomic64_t atomic64_unchecked_t;
77216+#endif
77217 #endif
77218
77219 struct list_head {
77220diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
77221index 5ca0951..ab496a5 100644
77222--- a/include/linux/uaccess.h
77223+++ b/include/linux/uaccess.h
77224@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
77225 long ret; \
77226 mm_segment_t old_fs = get_fs(); \
77227 \
77228- set_fs(KERNEL_DS); \
77229 pagefault_disable(); \
77230- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
77231- pagefault_enable(); \
77232+ set_fs(KERNEL_DS); \
77233+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
77234 set_fs(old_fs); \
77235+ pagefault_enable(); \
77236 ret; \
77237 })
77238
77239diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
77240index 8e522cbc..aa8572d 100644
77241--- a/include/linux/uidgid.h
77242+++ b/include/linux/uidgid.h
77243@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
77244
77245 #endif /* CONFIG_USER_NS */
77246
77247+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
77248+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
77249+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
77250+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
77251+
77252 #endif /* _LINUX_UIDGID_H */
77253diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
77254index 99c1b4d..562e6f3 100644
77255--- a/include/linux/unaligned/access_ok.h
77256+++ b/include/linux/unaligned/access_ok.h
77257@@ -4,34 +4,34 @@
77258 #include <linux/kernel.h>
77259 #include <asm/byteorder.h>
77260
77261-static inline u16 get_unaligned_le16(const void *p)
77262+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
77263 {
77264- return le16_to_cpup((__le16 *)p);
77265+ return le16_to_cpup((const __le16 *)p);
77266 }
77267
77268-static inline u32 get_unaligned_le32(const void *p)
77269+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
77270 {
77271- return le32_to_cpup((__le32 *)p);
77272+ return le32_to_cpup((const __le32 *)p);
77273 }
77274
77275-static inline u64 get_unaligned_le64(const void *p)
77276+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
77277 {
77278- return le64_to_cpup((__le64 *)p);
77279+ return le64_to_cpup((const __le64 *)p);
77280 }
77281
77282-static inline u16 get_unaligned_be16(const void *p)
77283+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
77284 {
77285- return be16_to_cpup((__be16 *)p);
77286+ return be16_to_cpup((const __be16 *)p);
77287 }
77288
77289-static inline u32 get_unaligned_be32(const void *p)
77290+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
77291 {
77292- return be32_to_cpup((__be32 *)p);
77293+ return be32_to_cpup((const __be32 *)p);
77294 }
77295
77296-static inline u64 get_unaligned_be64(const void *p)
77297+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
77298 {
77299- return be64_to_cpup((__be64 *)p);
77300+ return be64_to_cpup((const __be64 *)p);
77301 }
77302
77303 static inline void put_unaligned_le16(u16 val, void *p)
77304diff --git a/include/linux/usb.h b/include/linux/usb.h
77305index a0bee5a..5533a52 100644
77306--- a/include/linux/usb.h
77307+++ b/include/linux/usb.h
77308@@ -552,7 +552,7 @@ struct usb_device {
77309 int maxchild;
77310
77311 u32 quirks;
77312- atomic_t urbnum;
77313+ atomic_unchecked_t urbnum;
77314
77315 unsigned long active_duration;
77316
77317@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
77318
77319 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
77320 __u8 request, __u8 requesttype, __u16 value, __u16 index,
77321- void *data, __u16 size, int timeout);
77322+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
77323 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
77324 void *data, int len, int *actual_length, int timeout);
77325 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
77326diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
77327index e452ba6..78f8e80 100644
77328--- a/include/linux/usb/renesas_usbhs.h
77329+++ b/include/linux/usb/renesas_usbhs.h
77330@@ -39,7 +39,7 @@ enum {
77331 */
77332 struct renesas_usbhs_driver_callback {
77333 int (*notify_hotplug)(struct platform_device *pdev);
77334-};
77335+} __no_const;
77336
77337 /*
77338 * callback functions for platform
77339diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
77340index 6f8fbcf..8259001 100644
77341--- a/include/linux/vermagic.h
77342+++ b/include/linux/vermagic.h
77343@@ -25,9 +25,35 @@
77344 #define MODULE_ARCH_VERMAGIC ""
77345 #endif
77346
77347+#ifdef CONFIG_PAX_REFCOUNT
77348+#define MODULE_PAX_REFCOUNT "REFCOUNT "
77349+#else
77350+#define MODULE_PAX_REFCOUNT ""
77351+#endif
77352+
77353+#ifdef CONSTIFY_PLUGIN
77354+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
77355+#else
77356+#define MODULE_CONSTIFY_PLUGIN ""
77357+#endif
77358+
77359+#ifdef STACKLEAK_PLUGIN
77360+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
77361+#else
77362+#define MODULE_STACKLEAK_PLUGIN ""
77363+#endif
77364+
77365+#ifdef CONFIG_GRKERNSEC
77366+#define MODULE_GRSEC "GRSEC "
77367+#else
77368+#define MODULE_GRSEC ""
77369+#endif
77370+
77371 #define VERMAGIC_STRING \
77372 UTS_RELEASE " " \
77373 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
77374 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
77375- MODULE_ARCH_VERMAGIC
77376+ MODULE_ARCH_VERMAGIC \
77377+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
77378+ MODULE_GRSEC
77379
77380diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
77381index 7d5773a..541c01c 100644
77382--- a/include/linux/vmalloc.h
77383+++ b/include/linux/vmalloc.h
77384@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
77385 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
77386 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
77387 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
77388+
77389+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77390+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
77391+#endif
77392+
77393 /* bits [20..32] reserved for arch specific ioremap internals */
77394
77395 /*
77396@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
77397 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
77398 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
77399 unsigned long start, unsigned long end, gfp_t gfp_mask,
77400- pgprot_t prot, int node, const void *caller);
77401+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
77402 extern void vfree(const void *addr);
77403
77404 extern void *vmap(struct page **pages, unsigned int count,
77405@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
77406 extern void free_vm_area(struct vm_struct *area);
77407
77408 /* for /dev/kmem */
77409-extern long vread(char *buf, char *addr, unsigned long count);
77410-extern long vwrite(char *buf, char *addr, unsigned long count);
77411+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
77412+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
77413
77414 /*
77415 * Internals. Dont't use..
77416diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
77417index c586679..f06b389 100644
77418--- a/include/linux/vmstat.h
77419+++ b/include/linux/vmstat.h
77420@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
77421 /*
77422 * Zone based page accounting with per cpu differentials.
77423 */
77424-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77425+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77426
77427 static inline void zone_page_state_add(long x, struct zone *zone,
77428 enum zone_stat_item item)
77429 {
77430- atomic_long_add(x, &zone->vm_stat[item]);
77431- atomic_long_add(x, &vm_stat[item]);
77432+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
77433+ atomic_long_add_unchecked(x, &vm_stat[item]);
77434 }
77435
77436 static inline unsigned long global_page_state(enum zone_stat_item item)
77437 {
77438- long x = atomic_long_read(&vm_stat[item]);
77439+ long x = atomic_long_read_unchecked(&vm_stat[item]);
77440 #ifdef CONFIG_SMP
77441 if (x < 0)
77442 x = 0;
77443@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
77444 static inline unsigned long zone_page_state(struct zone *zone,
77445 enum zone_stat_item item)
77446 {
77447- long x = atomic_long_read(&zone->vm_stat[item]);
77448+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
77449 #ifdef CONFIG_SMP
77450 if (x < 0)
77451 x = 0;
77452@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
77453 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
77454 enum zone_stat_item item)
77455 {
77456- long x = atomic_long_read(&zone->vm_stat[item]);
77457+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
77458
77459 #ifdef CONFIG_SMP
77460 int cpu;
77461@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
77462
77463 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
77464 {
77465- atomic_long_inc(&zone->vm_stat[item]);
77466- atomic_long_inc(&vm_stat[item]);
77467+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
77468+ atomic_long_inc_unchecked(&vm_stat[item]);
77469 }
77470
77471 static inline void __inc_zone_page_state(struct page *page,
77472@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
77473
77474 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
77475 {
77476- atomic_long_dec(&zone->vm_stat[item]);
77477- atomic_long_dec(&vm_stat[item]);
77478+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
77479+ atomic_long_dec_unchecked(&vm_stat[item]);
77480 }
77481
77482 static inline void __dec_zone_page_state(struct page *page,
77483diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
77484index 623488f..44b5742 100644
77485--- a/include/linux/workqueue.h
77486+++ b/include/linux/workqueue.h
77487@@ -410,11 +410,11 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
77488 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
77489
77490 #define create_workqueue(name) \
77491- alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
77492+ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
77493 #define create_freezable_workqueue(name) \
77494- alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
77495+ alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
77496 #define create_singlethread_workqueue(name) \
77497- alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
77498+ alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
77499
77500 extern void destroy_workqueue(struct workqueue_struct *wq);
77501
77502diff --git a/include/linux/xattr.h b/include/linux/xattr.h
77503index fdbafc6..49dfe4f 100644
77504--- a/include/linux/xattr.h
77505+++ b/include/linux/xattr.h
77506@@ -28,7 +28,7 @@ struct xattr_handler {
77507 size_t size, int handler_flags);
77508 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
77509 size_t size, int flags, int handler_flags);
77510-};
77511+} __do_const;
77512
77513 struct xattr {
77514 char *name;
77515@@ -37,6 +37,9 @@ struct xattr {
77516 };
77517
77518 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
77519+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77520+ssize_t pax_getxattr(struct dentry *, void *, size_t);
77521+#endif
77522 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
77523 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
77524 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
77525diff --git a/include/linux/zlib.h b/include/linux/zlib.h
77526index 9c5a6b4..09c9438 100644
77527--- a/include/linux/zlib.h
77528+++ b/include/linux/zlib.h
77529@@ -31,6 +31,7 @@
77530 #define _ZLIB_H
77531
77532 #include <linux/zconf.h>
77533+#include <linux/compiler.h>
77534
77535 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
77536 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
77537@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
77538
77539 /* basic functions */
77540
77541-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
77542+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
77543 /*
77544 Returns the number of bytes that needs to be allocated for a per-
77545 stream workspace with the specified parameters. A pointer to this
77546diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
77547index 95d1c91..6798cca 100644
77548--- a/include/media/v4l2-dev.h
77549+++ b/include/media/v4l2-dev.h
77550@@ -76,7 +76,7 @@ struct v4l2_file_operations {
77551 int (*mmap) (struct file *, struct vm_area_struct *);
77552 int (*open) (struct file *);
77553 int (*release) (struct file *);
77554-};
77555+} __do_const;
77556
77557 /*
77558 * Newer version of video_device, handled by videodev2.c
77559diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
77560index adcbb20..62c2559 100644
77561--- a/include/net/9p/transport.h
77562+++ b/include/net/9p/transport.h
77563@@ -57,7 +57,7 @@ struct p9_trans_module {
77564 int (*cancel) (struct p9_client *, struct p9_req_t *req);
77565 int (*zc_request)(struct p9_client *, struct p9_req_t *,
77566 char *, char *, int , int, int, int);
77567-};
77568+} __do_const;
77569
77570 void v9fs_register_trans(struct p9_trans_module *m);
77571 void v9fs_unregister_trans(struct p9_trans_module *m);
77572diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
77573index fb94cf1..7c0c987 100644
77574--- a/include/net/bluetooth/l2cap.h
77575+++ b/include/net/bluetooth/l2cap.h
77576@@ -551,7 +551,7 @@ struct l2cap_ops {
77577 void (*defer) (struct l2cap_chan *chan);
77578 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
77579 unsigned long len, int nb);
77580-};
77581+} __do_const;
77582
77583 struct l2cap_conn {
77584 struct hci_conn *hcon;
77585diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
77586index f2ae33d..c457cf0 100644
77587--- a/include/net/caif/cfctrl.h
77588+++ b/include/net/caif/cfctrl.h
77589@@ -52,7 +52,7 @@ struct cfctrl_rsp {
77590 void (*radioset_rsp)(void);
77591 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
77592 struct cflayer *client_layer);
77593-};
77594+} __no_const;
77595
77596 /* Link Setup Parameters for CAIF-Links. */
77597 struct cfctrl_link_param {
77598@@ -101,8 +101,8 @@ struct cfctrl_request_info {
77599 struct cfctrl {
77600 struct cfsrvl serv;
77601 struct cfctrl_rsp res;
77602- atomic_t req_seq_no;
77603- atomic_t rsp_seq_no;
77604+ atomic_unchecked_t req_seq_no;
77605+ atomic_unchecked_t rsp_seq_no;
77606 struct list_head list;
77607 /* Protects from simultaneous access to first_req list */
77608 spinlock_t info_list_lock;
77609diff --git a/include/net/flow.h b/include/net/flow.h
77610index 628e11b..4c475df 100644
77611--- a/include/net/flow.h
77612+++ b/include/net/flow.h
77613@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
77614
77615 extern void flow_cache_flush(void);
77616 extern void flow_cache_flush_deferred(void);
77617-extern atomic_t flow_cache_genid;
77618+extern atomic_unchecked_t flow_cache_genid;
77619
77620 #endif
77621diff --git a/include/net/genetlink.h b/include/net/genetlink.h
77622index 93024a4..eeb6b6e 100644
77623--- a/include/net/genetlink.h
77624+++ b/include/net/genetlink.h
77625@@ -119,7 +119,7 @@ struct genl_ops {
77626 struct netlink_callback *cb);
77627 int (*done)(struct netlink_callback *cb);
77628 struct list_head ops_list;
77629-};
77630+} __do_const;
77631
77632 extern int genl_register_family(struct genl_family *family);
77633 extern int genl_register_family_with_ops(struct genl_family *family,
77634diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
77635index 734d9b5..48a9a4b 100644
77636--- a/include/net/gro_cells.h
77637+++ b/include/net/gro_cells.h
77638@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
77639 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
77640
77641 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
77642- atomic_long_inc(&dev->rx_dropped);
77643+ atomic_long_inc_unchecked(&dev->rx_dropped);
77644 kfree_skb(skb);
77645 return;
77646 }
77647diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
77648index de2c785..0588a6b 100644
77649--- a/include/net/inet_connection_sock.h
77650+++ b/include/net/inet_connection_sock.h
77651@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
77652 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
77653 int (*bind_conflict)(const struct sock *sk,
77654 const struct inet_bind_bucket *tb, bool relax);
77655-};
77656+} __do_const;
77657
77658 /** inet_connection_sock - INET connection oriented sock
77659 *
77660diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
77661index 53f464d..ba76aaa 100644
77662--- a/include/net/inetpeer.h
77663+++ b/include/net/inetpeer.h
77664@@ -47,8 +47,8 @@ struct inet_peer {
77665 */
77666 union {
77667 struct {
77668- atomic_t rid; /* Frag reception counter */
77669- atomic_t ip_id_count; /* IP ID for the next packet */
77670+ atomic_unchecked_t rid; /* Frag reception counter */
77671+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
77672 };
77673 struct rcu_head rcu;
77674 struct inet_peer *gc_next;
77675@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
77676 more++;
77677 inet_peer_refcheck(p);
77678 do {
77679- old = atomic_read(&p->ip_id_count);
77680+ old = atomic_read_unchecked(&p->ip_id_count);
77681 new = old + more;
77682 if (!new)
77683 new = 1;
77684- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
77685+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
77686 return new;
77687 }
77688
77689diff --git a/include/net/ip.h b/include/net/ip.h
77690index a68f838..74518ab 100644
77691--- a/include/net/ip.h
77692+++ b/include/net/ip.h
77693@@ -202,7 +202,7 @@ extern struct local_ports {
77694 } sysctl_local_ports;
77695 extern void inet_get_local_port_range(int *low, int *high);
77696
77697-extern unsigned long *sysctl_local_reserved_ports;
77698+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
77699 static inline int inet_is_reserved_local_port(int port)
77700 {
77701 return test_bit(port, sysctl_local_reserved_ports);
77702diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
77703index e49db91..76a81de 100644
77704--- a/include/net/ip_fib.h
77705+++ b/include/net/ip_fib.h
77706@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
77707
77708 #define FIB_RES_SADDR(net, res) \
77709 ((FIB_RES_NH(res).nh_saddr_genid == \
77710- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
77711+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
77712 FIB_RES_NH(res).nh_saddr : \
77713 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
77714 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
77715diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
77716index 4c062cc..3562c31 100644
77717--- a/include/net/ip_vs.h
77718+++ b/include/net/ip_vs.h
77719@@ -612,7 +612,7 @@ struct ip_vs_conn {
77720 struct ip_vs_conn *control; /* Master control connection */
77721 atomic_t n_control; /* Number of controlled ones */
77722 struct ip_vs_dest *dest; /* real server */
77723- atomic_t in_pkts; /* incoming packet counter */
77724+ atomic_unchecked_t in_pkts; /* incoming packet counter */
77725
77726 /* packet transmitter for different forwarding methods. If it
77727 mangles the packet, it must return NF_DROP or better NF_STOLEN,
77728@@ -761,7 +761,7 @@ struct ip_vs_dest {
77729 __be16 port; /* port number of the server */
77730 union nf_inet_addr addr; /* IP address of the server */
77731 volatile unsigned int flags; /* dest status flags */
77732- atomic_t conn_flags; /* flags to copy to conn */
77733+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
77734 atomic_t weight; /* server weight */
77735
77736 atomic_t refcnt; /* reference counter */
77737@@ -1013,11 +1013,11 @@ struct netns_ipvs {
77738 /* ip_vs_lblc */
77739 int sysctl_lblc_expiration;
77740 struct ctl_table_header *lblc_ctl_header;
77741- struct ctl_table *lblc_ctl_table;
77742+ ctl_table_no_const *lblc_ctl_table;
77743 /* ip_vs_lblcr */
77744 int sysctl_lblcr_expiration;
77745 struct ctl_table_header *lblcr_ctl_header;
77746- struct ctl_table *lblcr_ctl_table;
77747+ ctl_table_no_const *lblcr_ctl_table;
77748 /* ip_vs_est */
77749 struct list_head est_list; /* estimator list */
77750 spinlock_t est_lock;
77751diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
77752index 80ffde3..968b0f4 100644
77753--- a/include/net/irda/ircomm_tty.h
77754+++ b/include/net/irda/ircomm_tty.h
77755@@ -35,6 +35,7 @@
77756 #include <linux/termios.h>
77757 #include <linux/timer.h>
77758 #include <linux/tty.h> /* struct tty_struct */
77759+#include <asm/local.h>
77760
77761 #include <net/irda/irias_object.h>
77762 #include <net/irda/ircomm_core.h>
77763diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
77764index 714cc9a..ea05f3e 100644
77765--- a/include/net/iucv/af_iucv.h
77766+++ b/include/net/iucv/af_iucv.h
77767@@ -149,7 +149,7 @@ struct iucv_skb_cb {
77768 struct iucv_sock_list {
77769 struct hlist_head head;
77770 rwlock_t lock;
77771- atomic_t autobind_name;
77772+ atomic_unchecked_t autobind_name;
77773 };
77774
77775 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
77776diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
77777index df83f69..9b640b8 100644
77778--- a/include/net/llc_c_ac.h
77779+++ b/include/net/llc_c_ac.h
77780@@ -87,7 +87,7 @@
77781 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
77782 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
77783
77784-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
77785+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
77786
77787 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
77788 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
77789diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
77790index 6ca3113..f8026dd 100644
77791--- a/include/net/llc_c_ev.h
77792+++ b/include/net/llc_c_ev.h
77793@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
77794 return (struct llc_conn_state_ev *)skb->cb;
77795 }
77796
77797-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
77798-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
77799+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
77800+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
77801
77802 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
77803 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
77804diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
77805index 0e79cfb..f46db31 100644
77806--- a/include/net/llc_c_st.h
77807+++ b/include/net/llc_c_st.h
77808@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
77809 u8 next_state;
77810 llc_conn_ev_qfyr_t *ev_qualifiers;
77811 llc_conn_action_t *ev_actions;
77812-};
77813+} __do_const;
77814
77815 struct llc_conn_state {
77816 u8 current_state;
77817diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
77818index 37a3bbd..55a4241 100644
77819--- a/include/net/llc_s_ac.h
77820+++ b/include/net/llc_s_ac.h
77821@@ -23,7 +23,7 @@
77822 #define SAP_ACT_TEST_IND 9
77823
77824 /* All action functions must look like this */
77825-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
77826+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
77827
77828 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
77829 struct sk_buff *skb);
77830diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
77831index 567c681..cd73ac0 100644
77832--- a/include/net/llc_s_st.h
77833+++ b/include/net/llc_s_st.h
77834@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
77835 llc_sap_ev_t ev;
77836 u8 next_state;
77837 llc_sap_action_t *ev_actions;
77838-};
77839+} __do_const;
77840
77841 struct llc_sap_state {
77842 u8 curr_state;
77843diff --git a/include/net/mac80211.h b/include/net/mac80211.h
77844index 885898a..cdace34 100644
77845--- a/include/net/mac80211.h
77846+++ b/include/net/mac80211.h
77847@@ -4205,7 +4205,7 @@ struct rate_control_ops {
77848 void (*add_sta_debugfs)(void *priv, void *priv_sta,
77849 struct dentry *dir);
77850 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
77851-};
77852+} __do_const;
77853
77854 static inline int rate_supported(struct ieee80211_sta *sta,
77855 enum ieee80211_band band,
77856diff --git a/include/net/neighbour.h b/include/net/neighbour.h
77857index 7e748ad..5c6229b 100644
77858--- a/include/net/neighbour.h
77859+++ b/include/net/neighbour.h
77860@@ -123,7 +123,7 @@ struct neigh_ops {
77861 void (*error_report)(struct neighbour *, struct sk_buff *);
77862 int (*output)(struct neighbour *, struct sk_buff *);
77863 int (*connected_output)(struct neighbour *, struct sk_buff *);
77864-};
77865+} __do_const;
77866
77867 struct pneigh_entry {
77868 struct pneigh_entry *next;
77869diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
77870index b176978..ea169f4 100644
77871--- a/include/net/net_namespace.h
77872+++ b/include/net/net_namespace.h
77873@@ -117,7 +117,7 @@ struct net {
77874 #endif
77875 struct netns_ipvs *ipvs;
77876 struct sock *diag_nlsk;
77877- atomic_t rt_genid;
77878+ atomic_unchecked_t rt_genid;
77879 };
77880
77881 /*
77882@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
77883 #define __net_init __init
77884 #define __net_exit __exit_refok
77885 #define __net_initdata __initdata
77886+#ifdef CONSTIFY_PLUGIN
77887 #define __net_initconst __initconst
77888+#else
77889+#define __net_initconst __initdata
77890+#endif
77891 #endif
77892
77893 struct pernet_operations {
77894@@ -284,7 +288,7 @@ struct pernet_operations {
77895 void (*exit_batch)(struct list_head *net_exit_list);
77896 int *id;
77897 size_t size;
77898-};
77899+} __do_const;
77900
77901 /*
77902 * Use these carefully. If you implement a network device and it
77903@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
77904
77905 static inline int rt_genid(struct net *net)
77906 {
77907- return atomic_read(&net->rt_genid);
77908+ return atomic_read_unchecked(&net->rt_genid);
77909 }
77910
77911 static inline void rt_genid_bump(struct net *net)
77912 {
77913- atomic_inc(&net->rt_genid);
77914+ atomic_inc_unchecked(&net->rt_genid);
77915 }
77916
77917 #endif /* __NET_NET_NAMESPACE_H */
77918diff --git a/include/net/netdma.h b/include/net/netdma.h
77919index 8ba8ce2..99b7fff 100644
77920--- a/include/net/netdma.h
77921+++ b/include/net/netdma.h
77922@@ -24,7 +24,7 @@
77923 #include <linux/dmaengine.h>
77924 #include <linux/skbuff.h>
77925
77926-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
77927+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
77928 struct sk_buff *skb, int offset, struct iovec *to,
77929 size_t len, struct dma_pinned_list *pinned_list);
77930
77931diff --git a/include/net/netlink.h b/include/net/netlink.h
77932index 9690b0f..87aded7 100644
77933--- a/include/net/netlink.h
77934+++ b/include/net/netlink.h
77935@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
77936 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
77937 {
77938 if (mark)
77939- skb_trim(skb, (unsigned char *) mark - skb->data);
77940+ skb_trim(skb, (const unsigned char *) mark - skb->data);
77941 }
77942
77943 /**
77944diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
77945index c9c0c53..53f24c3 100644
77946--- a/include/net/netns/conntrack.h
77947+++ b/include/net/netns/conntrack.h
77948@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
77949 struct nf_proto_net {
77950 #ifdef CONFIG_SYSCTL
77951 struct ctl_table_header *ctl_table_header;
77952- struct ctl_table *ctl_table;
77953+ ctl_table_no_const *ctl_table;
77954 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
77955 struct ctl_table_header *ctl_compat_header;
77956- struct ctl_table *ctl_compat_table;
77957+ ctl_table_no_const *ctl_compat_table;
77958 #endif
77959 #endif
77960 unsigned int users;
77961@@ -58,7 +58,7 @@ struct nf_ip_net {
77962 struct nf_icmp_net icmpv6;
77963 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
77964 struct ctl_table_header *ctl_table_header;
77965- struct ctl_table *ctl_table;
77966+ ctl_table_no_const *ctl_table;
77967 #endif
77968 };
77969
77970diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
77971index 2ba9de8..47bd6c7 100644
77972--- a/include/net/netns/ipv4.h
77973+++ b/include/net/netns/ipv4.h
77974@@ -67,7 +67,7 @@ struct netns_ipv4 {
77975 kgid_t sysctl_ping_group_range[2];
77976 long sysctl_tcp_mem[3];
77977
77978- atomic_t dev_addr_genid;
77979+ atomic_unchecked_t dev_addr_genid;
77980
77981 #ifdef CONFIG_IP_MROUTE
77982 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
77983diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
77984index 005e2c2..023d340 100644
77985--- a/include/net/netns/ipv6.h
77986+++ b/include/net/netns/ipv6.h
77987@@ -71,7 +71,7 @@ struct netns_ipv6 {
77988 struct fib_rules_ops *mr6_rules_ops;
77989 #endif
77990 #endif
77991- atomic_t dev_addr_genid;
77992+ atomic_unchecked_t dev_addr_genid;
77993 };
77994
77995 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
77996diff --git a/include/net/protocol.h b/include/net/protocol.h
77997index 047c047..b9dad15 100644
77998--- a/include/net/protocol.h
77999+++ b/include/net/protocol.h
78000@@ -44,7 +44,7 @@ struct net_protocol {
78001 void (*err_handler)(struct sk_buff *skb, u32 info);
78002 unsigned int no_policy:1,
78003 netns_ok:1;
78004-};
78005+} __do_const;
78006
78007 #if IS_ENABLED(CONFIG_IPV6)
78008 struct inet6_protocol {
78009@@ -57,7 +57,7 @@ struct inet6_protocol {
78010 u8 type, u8 code, int offset,
78011 __be32 info);
78012 unsigned int flags; /* INET6_PROTO_xxx */
78013-};
78014+} __do_const;
78015
78016 #define INET6_PROTO_NOPOLICY 0x1
78017 #define INET6_PROTO_FINAL 0x2
78018diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
78019index 7026648..584cc8c 100644
78020--- a/include/net/rtnetlink.h
78021+++ b/include/net/rtnetlink.h
78022@@ -81,7 +81,7 @@ struct rtnl_link_ops {
78023 const struct net_device *dev);
78024 unsigned int (*get_num_tx_queues)(void);
78025 unsigned int (*get_num_rx_queues)(void);
78026-};
78027+} __do_const;
78028
78029 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
78030 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
78031diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
78032index cd89510..d67810f 100644
78033--- a/include/net/sctp/sctp.h
78034+++ b/include/net/sctp/sctp.h
78035@@ -330,9 +330,9 @@ do { \
78036
78037 #else /* SCTP_DEBUG */
78038
78039-#define SCTP_DEBUG_PRINTK(whatever...)
78040-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
78041-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
78042+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
78043+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
78044+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
78045 #define SCTP_ENABLE_DEBUG
78046 #define SCTP_DISABLE_DEBUG
78047 #define SCTP_ASSERT(expr, str, func)
78048diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
78049index 2a82d13..62a31c2 100644
78050--- a/include/net/sctp/sm.h
78051+++ b/include/net/sctp/sm.h
78052@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
78053 typedef struct {
78054 sctp_state_fn_t *fn;
78055 const char *name;
78056-} sctp_sm_table_entry_t;
78057+} __do_const sctp_sm_table_entry_t;
78058
78059 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
78060 * currently in use.
78061@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
78062 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
78063
78064 /* Extern declarations for major data structures. */
78065-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
78066+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
78067
78068
78069 /* Get the size of a DATA chunk payload. */
78070diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
78071index 1bd4c41..9250b5b 100644
78072--- a/include/net/sctp/structs.h
78073+++ b/include/net/sctp/structs.h
78074@@ -516,7 +516,7 @@ struct sctp_pf {
78075 struct sctp_association *asoc);
78076 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
78077 struct sctp_af *af;
78078-};
78079+} __do_const;
78080
78081
78082 /* Structure to track chunk fragments that have been acked, but peer
78083diff --git a/include/net/sock.h b/include/net/sock.h
78084index 66772cf..25bc45b 100644
78085--- a/include/net/sock.h
78086+++ b/include/net/sock.h
78087@@ -325,7 +325,7 @@ struct sock {
78088 #ifdef CONFIG_RPS
78089 __u32 sk_rxhash;
78090 #endif
78091- atomic_t sk_drops;
78092+ atomic_unchecked_t sk_drops;
78093 int sk_rcvbuf;
78094
78095 struct sk_filter __rcu *sk_filter;
78096@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
78097 }
78098
78099 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
78100- char __user *from, char *to,
78101+ char __user *from, unsigned char *to,
78102 int copy, int offset)
78103 {
78104 if (skb->ip_summed == CHECKSUM_NONE) {
78105@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
78106 }
78107 }
78108
78109-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
78110+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
78111
78112 /**
78113 * sk_page_frag - return an appropriate page_frag
78114diff --git a/include/net/tcp.h b/include/net/tcp.h
78115index 5bba80f..8520a82 100644
78116--- a/include/net/tcp.h
78117+++ b/include/net/tcp.h
78118@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
78119 extern void tcp_xmit_retransmit_queue(struct sock *);
78120 extern void tcp_simple_retransmit(struct sock *);
78121 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
78122-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
78123+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
78124
78125 extern void tcp_send_probe0(struct sock *);
78126 extern void tcp_send_partial(struct sock *);
78127@@ -697,8 +697,8 @@ struct tcp_skb_cb {
78128 struct inet6_skb_parm h6;
78129 #endif
78130 } header; /* For incoming frames */
78131- __u32 seq; /* Starting sequence number */
78132- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
78133+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
78134+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
78135 __u32 when; /* used to compute rtt's */
78136 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
78137
78138@@ -712,7 +712,7 @@ struct tcp_skb_cb {
78139
78140 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
78141 /* 1 byte hole */
78142- __u32 ack_seq; /* Sequence number ACK'd */
78143+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
78144 };
78145
78146 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
78147diff --git a/include/net/xfrm.h b/include/net/xfrm.h
78148index 94ce082..62b278d 100644
78149--- a/include/net/xfrm.h
78150+++ b/include/net/xfrm.h
78151@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
78152 struct net_device *dev,
78153 const struct flowi *fl);
78154 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
78155-};
78156+} __do_const;
78157
78158 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
78159 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
78160@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
78161 struct sk_buff *skb);
78162 int (*transport_finish)(struct sk_buff *skb,
78163 int async);
78164-};
78165+} __do_const;
78166
78167 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
78168 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
78169@@ -424,7 +424,7 @@ struct xfrm_mode {
78170 struct module *owner;
78171 unsigned int encap;
78172 int flags;
78173-};
78174+} __do_const;
78175
78176 /* Flags for xfrm_mode. */
78177 enum {
78178@@ -521,7 +521,7 @@ struct xfrm_policy {
78179 struct timer_list timer;
78180
78181 struct flow_cache_object flo;
78182- atomic_t genid;
78183+ atomic_unchecked_t genid;
78184 u32 priority;
78185 u32 index;
78186 struct xfrm_mark mark;
78187diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
78188index 1a046b1..ee0bef0 100644
78189--- a/include/rdma/iw_cm.h
78190+++ b/include/rdma/iw_cm.h
78191@@ -122,7 +122,7 @@ struct iw_cm_verbs {
78192 int backlog);
78193
78194 int (*destroy_listen)(struct iw_cm_id *cm_id);
78195-};
78196+} __no_const;
78197
78198 /**
78199 * iw_create_cm_id - Create an IW CM identifier.
78200diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
78201index e1379b4..67eafbe 100644
78202--- a/include/scsi/libfc.h
78203+++ b/include/scsi/libfc.h
78204@@ -762,6 +762,7 @@ struct libfc_function_template {
78205 */
78206 void (*disc_stop_final) (struct fc_lport *);
78207 };
78208+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
78209
78210 /**
78211 * struct fc_disc - Discovery context
78212@@ -866,7 +867,7 @@ struct fc_lport {
78213 struct fc_vport *vport;
78214
78215 /* Operational Information */
78216- struct libfc_function_template tt;
78217+ libfc_function_template_no_const tt;
78218 u8 link_up;
78219 u8 qfull;
78220 enum fc_lport_state state;
78221diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
78222index cc64587..608f523 100644
78223--- a/include/scsi/scsi_device.h
78224+++ b/include/scsi/scsi_device.h
78225@@ -171,9 +171,9 @@ struct scsi_device {
78226 unsigned int max_device_blocked; /* what device_blocked counts down from */
78227 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
78228
78229- atomic_t iorequest_cnt;
78230- atomic_t iodone_cnt;
78231- atomic_t ioerr_cnt;
78232+ atomic_unchecked_t iorequest_cnt;
78233+ atomic_unchecked_t iodone_cnt;
78234+ atomic_unchecked_t ioerr_cnt;
78235
78236 struct device sdev_gendev,
78237 sdev_dev;
78238diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
78239index b797e8f..8e2c3aa 100644
78240--- a/include/scsi/scsi_transport_fc.h
78241+++ b/include/scsi/scsi_transport_fc.h
78242@@ -751,7 +751,8 @@ struct fc_function_template {
78243 unsigned long show_host_system_hostname:1;
78244
78245 unsigned long disable_target_scan:1;
78246-};
78247+} __do_const;
78248+typedef struct fc_function_template __no_const fc_function_template_no_const;
78249
78250
78251 /**
78252diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
78253index 9031a26..750d592 100644
78254--- a/include/sound/compress_driver.h
78255+++ b/include/sound/compress_driver.h
78256@@ -128,7 +128,7 @@ struct snd_compr_ops {
78257 struct snd_compr_caps *caps);
78258 int (*get_codec_caps) (struct snd_compr_stream *stream,
78259 struct snd_compr_codec_caps *codec);
78260-};
78261+} __no_const;
78262
78263 /**
78264 * struct snd_compr: Compressed device
78265diff --git a/include/sound/soc.h b/include/sound/soc.h
78266index 85c1522..f44bad1 100644
78267--- a/include/sound/soc.h
78268+++ b/include/sound/soc.h
78269@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
78270 /* probe ordering - for components with runtime dependencies */
78271 int probe_order;
78272 int remove_order;
78273-};
78274+} __do_const;
78275
78276 /* SoC platform interface */
78277 struct snd_soc_platform_driver {
78278@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
78279 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
78280 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
78281 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
78282-};
78283+} __do_const;
78284
78285 struct snd_soc_platform {
78286 const char *name;
78287diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
78288index 4ea4f98..a63629b 100644
78289--- a/include/target/target_core_base.h
78290+++ b/include/target/target_core_base.h
78291@@ -653,7 +653,7 @@ struct se_device {
78292 spinlock_t stats_lock;
78293 /* Active commands on this virtual SE device */
78294 atomic_t simple_cmds;
78295- atomic_t dev_ordered_id;
78296+ atomic_unchecked_t dev_ordered_id;
78297 atomic_t dev_ordered_sync;
78298 atomic_t dev_qf_count;
78299 int export_count;
78300diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
78301new file mode 100644
78302index 0000000..fb634b7
78303--- /dev/null
78304+++ b/include/trace/events/fs.h
78305@@ -0,0 +1,53 @@
78306+#undef TRACE_SYSTEM
78307+#define TRACE_SYSTEM fs
78308+
78309+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
78310+#define _TRACE_FS_H
78311+
78312+#include <linux/fs.h>
78313+#include <linux/tracepoint.h>
78314+
78315+TRACE_EVENT(do_sys_open,
78316+
78317+ TP_PROTO(const char *filename, int flags, int mode),
78318+
78319+ TP_ARGS(filename, flags, mode),
78320+
78321+ TP_STRUCT__entry(
78322+ __string( filename, filename )
78323+ __field( int, flags )
78324+ __field( int, mode )
78325+ ),
78326+
78327+ TP_fast_assign(
78328+ __assign_str(filename, filename);
78329+ __entry->flags = flags;
78330+ __entry->mode = mode;
78331+ ),
78332+
78333+ TP_printk("\"%s\" %x %o",
78334+ __get_str(filename), __entry->flags, __entry->mode)
78335+);
78336+
78337+TRACE_EVENT(open_exec,
78338+
78339+ TP_PROTO(const char *filename),
78340+
78341+ TP_ARGS(filename),
78342+
78343+ TP_STRUCT__entry(
78344+ __string( filename, filename )
78345+ ),
78346+
78347+ TP_fast_assign(
78348+ __assign_str(filename, filename);
78349+ ),
78350+
78351+ TP_printk("\"%s\"",
78352+ __get_str(filename))
78353+);
78354+
78355+#endif /* _TRACE_FS_H */
78356+
78357+/* This part must be outside protection */
78358+#include <trace/define_trace.h>
78359diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
78360index 1c09820..7f5ec79 100644
78361--- a/include/trace/events/irq.h
78362+++ b/include/trace/events/irq.h
78363@@ -36,7 +36,7 @@ struct softirq_action;
78364 */
78365 TRACE_EVENT(irq_handler_entry,
78366
78367- TP_PROTO(int irq, struct irqaction *action),
78368+ TP_PROTO(int irq, const struct irqaction *action),
78369
78370 TP_ARGS(irq, action),
78371
78372@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
78373 */
78374 TRACE_EVENT(irq_handler_exit,
78375
78376- TP_PROTO(int irq, struct irqaction *action, int ret),
78377+ TP_PROTO(int irq, const struct irqaction *action, int ret),
78378
78379 TP_ARGS(irq, action, ret),
78380
78381diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
78382index 7caf44c..23c6f27 100644
78383--- a/include/uapi/linux/a.out.h
78384+++ b/include/uapi/linux/a.out.h
78385@@ -39,6 +39,14 @@ enum machine_type {
78386 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
78387 };
78388
78389+/* Constants for the N_FLAGS field */
78390+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78391+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
78392+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
78393+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
78394+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
78395+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
78396+
78397 #if !defined (N_MAGIC)
78398 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
78399 #endif
78400diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
78401index d876736..ccce5c0 100644
78402--- a/include/uapi/linux/byteorder/little_endian.h
78403+++ b/include/uapi/linux/byteorder/little_endian.h
78404@@ -42,51 +42,51 @@
78405
78406 static inline __le64 __cpu_to_le64p(const __u64 *p)
78407 {
78408- return (__force __le64)*p;
78409+ return (__force const __le64)*p;
78410 }
78411-static inline __u64 __le64_to_cpup(const __le64 *p)
78412+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
78413 {
78414- return (__force __u64)*p;
78415+ return (__force const __u64)*p;
78416 }
78417 static inline __le32 __cpu_to_le32p(const __u32 *p)
78418 {
78419- return (__force __le32)*p;
78420+ return (__force const __le32)*p;
78421 }
78422 static inline __u32 __le32_to_cpup(const __le32 *p)
78423 {
78424- return (__force __u32)*p;
78425+ return (__force const __u32)*p;
78426 }
78427 static inline __le16 __cpu_to_le16p(const __u16 *p)
78428 {
78429- return (__force __le16)*p;
78430+ return (__force const __le16)*p;
78431 }
78432 static inline __u16 __le16_to_cpup(const __le16 *p)
78433 {
78434- return (__force __u16)*p;
78435+ return (__force const __u16)*p;
78436 }
78437 static inline __be64 __cpu_to_be64p(const __u64 *p)
78438 {
78439- return (__force __be64)__swab64p(p);
78440+ return (__force const __be64)__swab64p(p);
78441 }
78442 static inline __u64 __be64_to_cpup(const __be64 *p)
78443 {
78444- return __swab64p((__u64 *)p);
78445+ return __swab64p((const __u64 *)p);
78446 }
78447 static inline __be32 __cpu_to_be32p(const __u32 *p)
78448 {
78449- return (__force __be32)__swab32p(p);
78450+ return (__force const __be32)__swab32p(p);
78451 }
78452-static inline __u32 __be32_to_cpup(const __be32 *p)
78453+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
78454 {
78455- return __swab32p((__u32 *)p);
78456+ return __swab32p((const __u32 *)p);
78457 }
78458 static inline __be16 __cpu_to_be16p(const __u16 *p)
78459 {
78460- return (__force __be16)__swab16p(p);
78461+ return (__force const __be16)__swab16p(p);
78462 }
78463 static inline __u16 __be16_to_cpup(const __be16 *p)
78464 {
78465- return __swab16p((__u16 *)p);
78466+ return __swab16p((const __u16 *)p);
78467 }
78468 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
78469 #define __le64_to_cpus(x) do { (void)(x); } while (0)
78470diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
78471index ef6103b..d4e65dd 100644
78472--- a/include/uapi/linux/elf.h
78473+++ b/include/uapi/linux/elf.h
78474@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
78475 #define PT_GNU_EH_FRAME 0x6474e550
78476
78477 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
78478+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
78479+
78480+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
78481+
78482+/* Constants for the e_flags field */
78483+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78484+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
78485+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
78486+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
78487+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
78488+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
78489
78490 /*
78491 * Extended Numbering
78492@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
78493 #define DT_DEBUG 21
78494 #define DT_TEXTREL 22
78495 #define DT_JMPREL 23
78496+#define DT_FLAGS 30
78497+ #define DF_TEXTREL 0x00000004
78498 #define DT_ENCODING 32
78499 #define OLD_DT_LOOS 0x60000000
78500 #define DT_LOOS 0x6000000d
78501@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
78502 #define PF_W 0x2
78503 #define PF_X 0x1
78504
78505+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
78506+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
78507+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
78508+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
78509+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
78510+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
78511+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
78512+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
78513+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
78514+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
78515+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
78516+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
78517+
78518 typedef struct elf32_phdr{
78519 Elf32_Word p_type;
78520 Elf32_Off p_offset;
78521@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
78522 #define EI_OSABI 7
78523 #define EI_PAD 8
78524
78525+#define EI_PAX 14
78526+
78527 #define ELFMAG0 0x7f /* EI_MAG */
78528 #define ELFMAG1 'E'
78529 #define ELFMAG2 'L'
78530diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
78531index aa169c4..6a2771d 100644
78532--- a/include/uapi/linux/personality.h
78533+++ b/include/uapi/linux/personality.h
78534@@ -30,6 +30,7 @@ enum {
78535 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
78536 ADDR_NO_RANDOMIZE | \
78537 ADDR_COMPAT_LAYOUT | \
78538+ ADDR_LIMIT_3GB | \
78539 MMAP_PAGE_ZERO)
78540
78541 /*
78542diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
78543index 7530e74..e714828 100644
78544--- a/include/uapi/linux/screen_info.h
78545+++ b/include/uapi/linux/screen_info.h
78546@@ -43,7 +43,8 @@ struct screen_info {
78547 __u16 pages; /* 0x32 */
78548 __u16 vesa_attributes; /* 0x34 */
78549 __u32 capabilities; /* 0x36 */
78550- __u8 _reserved[6]; /* 0x3a */
78551+ __u16 vesapm_size; /* 0x3a */
78552+ __u8 _reserved[4]; /* 0x3c */
78553 } __attribute__((packed));
78554
78555 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
78556diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
78557index 0e011eb..82681b1 100644
78558--- a/include/uapi/linux/swab.h
78559+++ b/include/uapi/linux/swab.h
78560@@ -43,7 +43,7 @@
78561 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
78562 */
78563
78564-static inline __attribute_const__ __u16 __fswab16(__u16 val)
78565+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
78566 {
78567 #ifdef __HAVE_BUILTIN_BSWAP16__
78568 return __builtin_bswap16(val);
78569@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
78570 #endif
78571 }
78572
78573-static inline __attribute_const__ __u32 __fswab32(__u32 val)
78574+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
78575 {
78576 #ifdef __HAVE_BUILTIN_BSWAP32__
78577 return __builtin_bswap32(val);
78578@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
78579 #endif
78580 }
78581
78582-static inline __attribute_const__ __u64 __fswab64(__u64 val)
78583+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
78584 {
78585 #ifdef __HAVE_BUILTIN_BSWAP64__
78586 return __builtin_bswap64(val);
78587diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
78588index 6d67213..8dab561 100644
78589--- a/include/uapi/linux/sysctl.h
78590+++ b/include/uapi/linux/sysctl.h
78591@@ -155,7 +155,11 @@ enum
78592 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
78593 };
78594
78595-
78596+#ifdef CONFIG_PAX_SOFTMODE
78597+enum {
78598+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
78599+};
78600+#endif
78601
78602 /* CTL_VM names: */
78603 enum
78604diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
78605index e4629b9..6958086 100644
78606--- a/include/uapi/linux/xattr.h
78607+++ b/include/uapi/linux/xattr.h
78608@@ -63,5 +63,9 @@
78609 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
78610 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
78611
78612+/* User namespace */
78613+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
78614+#define XATTR_PAX_FLAGS_SUFFIX "flags"
78615+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
78616
78617 #endif /* _UAPI_LINUX_XATTR_H */
78618diff --git a/include/video/udlfb.h b/include/video/udlfb.h
78619index f9466fa..f4e2b81 100644
78620--- a/include/video/udlfb.h
78621+++ b/include/video/udlfb.h
78622@@ -53,10 +53,10 @@ struct dlfb_data {
78623 u32 pseudo_palette[256];
78624 int blank_mode; /*one of FB_BLANK_ */
78625 /* blit-only rendering path metrics, exposed through sysfs */
78626- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
78627- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
78628- atomic_t bytes_sent; /* to usb, after compression including overhead */
78629- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
78630+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
78631+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
78632+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
78633+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
78634 };
78635
78636 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
78637diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
78638index 1a91850..28573f8 100644
78639--- a/include/video/uvesafb.h
78640+++ b/include/video/uvesafb.h
78641@@ -122,6 +122,7 @@ struct uvesafb_par {
78642 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
78643 u8 pmi_setpal; /* PMI for palette changes */
78644 u16 *pmi_base; /* protected mode interface location */
78645+ u8 *pmi_code; /* protected mode code location */
78646 void *pmi_start;
78647 void *pmi_pal;
78648 u8 *vbe_state_orig; /*
78649diff --git a/init/Kconfig b/init/Kconfig
78650index 2d9b831..ae4c8ac 100644
78651--- a/init/Kconfig
78652+++ b/init/Kconfig
78653@@ -1029,6 +1029,7 @@ endif # CGROUPS
78654
78655 config CHECKPOINT_RESTORE
78656 bool "Checkpoint/restore support" if EXPERT
78657+ depends on !GRKERNSEC
78658 default n
78659 help
78660 Enables additional kernel features in a sake of checkpoint/restore.
78661@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
78662
78663 config COMPAT_BRK
78664 bool "Disable heap randomization"
78665- default y
78666+ default n
78667 help
78668 Randomizing heap placement makes heap exploits harder, but it
78669 also breaks ancient binaries (including anything libc5 based).
78670@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
78671 config STOP_MACHINE
78672 bool
78673 default y
78674- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
78675+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
78676 help
78677 Need stop_machine() primitive.
78678
78679diff --git a/init/Makefile b/init/Makefile
78680index 7bc47ee..6da2dc7 100644
78681--- a/init/Makefile
78682+++ b/init/Makefile
78683@@ -2,6 +2,9 @@
78684 # Makefile for the linux kernel.
78685 #
78686
78687+ccflags-y := $(GCC_PLUGINS_CFLAGS)
78688+asflags-y := $(GCC_PLUGINS_AFLAGS)
78689+
78690 obj-y := main.o version.o mounts.o
78691 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
78692 obj-y += noinitramfs.o
78693diff --git a/init/do_mounts.c b/init/do_mounts.c
78694index a2b49f2..03a0e17c 100644
78695--- a/init/do_mounts.c
78696+++ b/init/do_mounts.c
78697@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
78698 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
78699 {
78700 struct super_block *s;
78701- int err = sys_mount(name, "/root", fs, flags, data);
78702+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
78703 if (err)
78704 return err;
78705
78706- sys_chdir("/root");
78707+ sys_chdir((const char __force_user *)"/root");
78708 s = current->fs->pwd.dentry->d_sb;
78709 ROOT_DEV = s->s_dev;
78710 printk(KERN_INFO
78711@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
78712 va_start(args, fmt);
78713 vsprintf(buf, fmt, args);
78714 va_end(args);
78715- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
78716+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
78717 if (fd >= 0) {
78718 sys_ioctl(fd, FDEJECT, 0);
78719 sys_close(fd);
78720 }
78721 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
78722- fd = sys_open("/dev/console", O_RDWR, 0);
78723+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
78724 if (fd >= 0) {
78725 sys_ioctl(fd, TCGETS, (long)&termios);
78726 termios.c_lflag &= ~ICANON;
78727 sys_ioctl(fd, TCSETSF, (long)&termios);
78728- sys_read(fd, &c, 1);
78729+ sys_read(fd, (char __user *)&c, 1);
78730 termios.c_lflag |= ICANON;
78731 sys_ioctl(fd, TCSETSF, (long)&termios);
78732 sys_close(fd);
78733@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
78734 mount_root();
78735 out:
78736 devtmpfs_mount("dev");
78737- sys_mount(".", "/", NULL, MS_MOVE, NULL);
78738- sys_chroot(".");
78739+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
78740+ sys_chroot((const char __force_user *)".");
78741 }
78742diff --git a/init/do_mounts.h b/init/do_mounts.h
78743index f5b978a..69dbfe8 100644
78744--- a/init/do_mounts.h
78745+++ b/init/do_mounts.h
78746@@ -15,15 +15,15 @@ extern int root_mountflags;
78747
78748 static inline int create_dev(char *name, dev_t dev)
78749 {
78750- sys_unlink(name);
78751- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
78752+ sys_unlink((char __force_user *)name);
78753+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
78754 }
78755
78756 #if BITS_PER_LONG == 32
78757 static inline u32 bstat(char *name)
78758 {
78759 struct stat64 stat;
78760- if (sys_stat64(name, &stat) != 0)
78761+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
78762 return 0;
78763 if (!S_ISBLK(stat.st_mode))
78764 return 0;
78765@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
78766 static inline u32 bstat(char *name)
78767 {
78768 struct stat stat;
78769- if (sys_newstat(name, &stat) != 0)
78770+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
78771 return 0;
78772 if (!S_ISBLK(stat.st_mode))
78773 return 0;
78774diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
78775index 3e0878e..8a9d7a0 100644
78776--- a/init/do_mounts_initrd.c
78777+++ b/init/do_mounts_initrd.c
78778@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
78779 {
78780 sys_unshare(CLONE_FS | CLONE_FILES);
78781 /* stdin/stdout/stderr for /linuxrc */
78782- sys_open("/dev/console", O_RDWR, 0);
78783+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
78784 sys_dup(0);
78785 sys_dup(0);
78786 /* move initrd over / and chdir/chroot in initrd root */
78787- sys_chdir("/root");
78788- sys_mount(".", "/", NULL, MS_MOVE, NULL);
78789- sys_chroot(".");
78790+ sys_chdir((const char __force_user *)"/root");
78791+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
78792+ sys_chroot((const char __force_user *)".");
78793 sys_setsid();
78794 return 0;
78795 }
78796@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
78797 create_dev("/dev/root.old", Root_RAM0);
78798 /* mount initrd on rootfs' /root */
78799 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
78800- sys_mkdir("/old", 0700);
78801- sys_chdir("/old");
78802+ sys_mkdir((const char __force_user *)"/old", 0700);
78803+ sys_chdir((const char __force_user *)"/old");
78804
78805 /* try loading default modules from initrd */
78806 load_default_modules();
78807@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
78808 current->flags &= ~PF_FREEZER_SKIP;
78809
78810 /* move initrd to rootfs' /old */
78811- sys_mount("..", ".", NULL, MS_MOVE, NULL);
78812+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
78813 /* switch root and cwd back to / of rootfs */
78814- sys_chroot("..");
78815+ sys_chroot((const char __force_user *)"..");
78816
78817 if (new_decode_dev(real_root_dev) == Root_RAM0) {
78818- sys_chdir("/old");
78819+ sys_chdir((const char __force_user *)"/old");
78820 return;
78821 }
78822
78823- sys_chdir("/");
78824+ sys_chdir((const char __force_user *)"/");
78825 ROOT_DEV = new_decode_dev(real_root_dev);
78826 mount_root();
78827
78828 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
78829- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
78830+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
78831 if (!error)
78832 printk("okay\n");
78833 else {
78834- int fd = sys_open("/dev/root.old", O_RDWR, 0);
78835+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
78836 if (error == -ENOENT)
78837 printk("/initrd does not exist. Ignored.\n");
78838 else
78839 printk("failed\n");
78840 printk(KERN_NOTICE "Unmounting old root\n");
78841- sys_umount("/old", MNT_DETACH);
78842+ sys_umount((char __force_user *)"/old", MNT_DETACH);
78843 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
78844 if (fd < 0) {
78845 error = fd;
78846@@ -127,11 +127,11 @@ int __init initrd_load(void)
78847 * mounted in the normal path.
78848 */
78849 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
78850- sys_unlink("/initrd.image");
78851+ sys_unlink((const char __force_user *)"/initrd.image");
78852 handle_initrd();
78853 return 1;
78854 }
78855 }
78856- sys_unlink("/initrd.image");
78857+ sys_unlink((const char __force_user *)"/initrd.image");
78858 return 0;
78859 }
78860diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
78861index 8cb6db5..d729f50 100644
78862--- a/init/do_mounts_md.c
78863+++ b/init/do_mounts_md.c
78864@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
78865 partitioned ? "_d" : "", minor,
78866 md_setup_args[ent].device_names);
78867
78868- fd = sys_open(name, 0, 0);
78869+ fd = sys_open((char __force_user *)name, 0, 0);
78870 if (fd < 0) {
78871 printk(KERN_ERR "md: open failed - cannot start "
78872 "array %s\n", name);
78873@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
78874 * array without it
78875 */
78876 sys_close(fd);
78877- fd = sys_open(name, 0, 0);
78878+ fd = sys_open((char __force_user *)name, 0, 0);
78879 sys_ioctl(fd, BLKRRPART, 0);
78880 }
78881 sys_close(fd);
78882@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
78883
78884 wait_for_device_probe();
78885
78886- fd = sys_open("/dev/md0", 0, 0);
78887+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
78888 if (fd >= 0) {
78889 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
78890 sys_close(fd);
78891diff --git a/init/init_task.c b/init/init_task.c
78892index ba0a7f36..2bcf1d5 100644
78893--- a/init/init_task.c
78894+++ b/init/init_task.c
78895@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
78896 * Initial thread structure. Alignment of this is handled by a special
78897 * linker map entry.
78898 */
78899+#ifdef CONFIG_X86
78900+union thread_union init_thread_union __init_task_data;
78901+#else
78902 union thread_union init_thread_union __init_task_data =
78903 { INIT_THREAD_INFO(init_task) };
78904+#endif
78905diff --git a/init/initramfs.c b/init/initramfs.c
78906index a67ef9d..2d17ed9 100644
78907--- a/init/initramfs.c
78908+++ b/init/initramfs.c
78909@@ -84,7 +84,7 @@ static void __init free_hash(void)
78910 }
78911 }
78912
78913-static long __init do_utime(char *filename, time_t mtime)
78914+static long __init do_utime(char __force_user *filename, time_t mtime)
78915 {
78916 struct timespec t[2];
78917
78918@@ -119,7 +119,7 @@ static void __init dir_utime(void)
78919 struct dir_entry *de, *tmp;
78920 list_for_each_entry_safe(de, tmp, &dir_list, list) {
78921 list_del(&de->list);
78922- do_utime(de->name, de->mtime);
78923+ do_utime((char __force_user *)de->name, de->mtime);
78924 kfree(de->name);
78925 kfree(de);
78926 }
78927@@ -281,7 +281,7 @@ static int __init maybe_link(void)
78928 if (nlink >= 2) {
78929 char *old = find_link(major, minor, ino, mode, collected);
78930 if (old)
78931- return (sys_link(old, collected) < 0) ? -1 : 1;
78932+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
78933 }
78934 return 0;
78935 }
78936@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
78937 {
78938 struct stat st;
78939
78940- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
78941+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
78942 if (S_ISDIR(st.st_mode))
78943- sys_rmdir(path);
78944+ sys_rmdir((char __force_user *)path);
78945 else
78946- sys_unlink(path);
78947+ sys_unlink((char __force_user *)path);
78948 }
78949 }
78950
78951@@ -315,7 +315,7 @@ static int __init do_name(void)
78952 int openflags = O_WRONLY|O_CREAT;
78953 if (ml != 1)
78954 openflags |= O_TRUNC;
78955- wfd = sys_open(collected, openflags, mode);
78956+ wfd = sys_open((char __force_user *)collected, openflags, mode);
78957
78958 if (wfd >= 0) {
78959 sys_fchown(wfd, uid, gid);
78960@@ -327,17 +327,17 @@ static int __init do_name(void)
78961 }
78962 }
78963 } else if (S_ISDIR(mode)) {
78964- sys_mkdir(collected, mode);
78965- sys_chown(collected, uid, gid);
78966- sys_chmod(collected, mode);
78967+ sys_mkdir((char __force_user *)collected, mode);
78968+ sys_chown((char __force_user *)collected, uid, gid);
78969+ sys_chmod((char __force_user *)collected, mode);
78970 dir_add(collected, mtime);
78971 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
78972 S_ISFIFO(mode) || S_ISSOCK(mode)) {
78973 if (maybe_link() == 0) {
78974- sys_mknod(collected, mode, rdev);
78975- sys_chown(collected, uid, gid);
78976- sys_chmod(collected, mode);
78977- do_utime(collected, mtime);
78978+ sys_mknod((char __force_user *)collected, mode, rdev);
78979+ sys_chown((char __force_user *)collected, uid, gid);
78980+ sys_chmod((char __force_user *)collected, mode);
78981+ do_utime((char __force_user *)collected, mtime);
78982 }
78983 }
78984 return 0;
78985@@ -346,15 +346,15 @@ static int __init do_name(void)
78986 static int __init do_copy(void)
78987 {
78988 if (count >= body_len) {
78989- sys_write(wfd, victim, body_len);
78990+ sys_write(wfd, (char __force_user *)victim, body_len);
78991 sys_close(wfd);
78992- do_utime(vcollected, mtime);
78993+ do_utime((char __force_user *)vcollected, mtime);
78994 kfree(vcollected);
78995 eat(body_len);
78996 state = SkipIt;
78997 return 0;
78998 } else {
78999- sys_write(wfd, victim, count);
79000+ sys_write(wfd, (char __force_user *)victim, count);
79001 body_len -= count;
79002 eat(count);
79003 return 1;
79004@@ -365,9 +365,9 @@ static int __init do_symlink(void)
79005 {
79006 collected[N_ALIGN(name_len) + body_len] = '\0';
79007 clean_path(collected, 0);
79008- sys_symlink(collected + N_ALIGN(name_len), collected);
79009- sys_lchown(collected, uid, gid);
79010- do_utime(collected, mtime);
79011+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
79012+ sys_lchown((char __force_user *)collected, uid, gid);
79013+ do_utime((char __force_user *)collected, mtime);
79014 state = SkipIt;
79015 next_state = Reset;
79016 return 0;
79017@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
79018 {
79019 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
79020 if (err)
79021- panic(err); /* Failed to decompress INTERNAL initramfs */
79022+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
79023 if (initrd_start) {
79024 #ifdef CONFIG_BLK_DEV_RAM
79025 int fd;
79026diff --git a/init/main.c b/init/main.c
79027index 9484f4b..0eac7c3 100644
79028--- a/init/main.c
79029+++ b/init/main.c
79030@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
79031 extern void tc_init(void);
79032 #endif
79033
79034+extern void grsecurity_init(void);
79035+
79036 /*
79037 * Debug helper: via this flag we know that we are in 'early bootup code'
79038 * where only the boot processor is running with IRQ disabled. This means
79039@@ -153,6 +155,74 @@ static int __init set_reset_devices(char *str)
79040
79041 __setup("reset_devices", set_reset_devices);
79042
79043+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79044+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
79045+static int __init setup_grsec_proc_gid(char *str)
79046+{
79047+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
79048+ return 1;
79049+}
79050+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
79051+#endif
79052+
79053+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
79054+unsigned long pax_user_shadow_base __read_only;
79055+EXPORT_SYMBOL(pax_user_shadow_base);
79056+extern char pax_enter_kernel_user[];
79057+extern char pax_exit_kernel_user[];
79058+#endif
79059+
79060+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
79061+static int __init setup_pax_nouderef(char *str)
79062+{
79063+#ifdef CONFIG_X86_32
79064+ unsigned int cpu;
79065+ struct desc_struct *gdt;
79066+
79067+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
79068+ gdt = get_cpu_gdt_table(cpu);
79069+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
79070+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
79071+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
79072+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
79073+ }
79074+ loadsegment(ds, __KERNEL_DS);
79075+ loadsegment(es, __KERNEL_DS);
79076+ loadsegment(ss, __KERNEL_DS);
79077+#else
79078+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
79079+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
79080+ clone_pgd_mask = ~(pgdval_t)0UL;
79081+ pax_user_shadow_base = 0UL;
79082+ setup_clear_cpu_cap(X86_FEATURE_PCID);
79083+#endif
79084+
79085+ return 0;
79086+}
79087+early_param("pax_nouderef", setup_pax_nouderef);
79088+
79089+#ifdef CONFIG_X86_64
79090+static int __init setup_pax_weakuderef(char *str)
79091+{
79092+ if (clone_pgd_mask != ~(pgdval_t)0UL)
79093+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
79094+ return 1;
79095+}
79096+__setup("pax_weakuderef", setup_pax_weakuderef);
79097+#endif
79098+#endif
79099+
79100+#ifdef CONFIG_PAX_SOFTMODE
79101+int pax_softmode;
79102+
79103+static int __init setup_pax_softmode(char *str)
79104+{
79105+ get_option(&str, &pax_softmode);
79106+ return 1;
79107+}
79108+__setup("pax_softmode=", setup_pax_softmode);
79109+#endif
79110+
79111 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
79112 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
79113 static const char *panic_later, *panic_param;
79114@@ -655,8 +725,6 @@ static void __init do_ctors(void)
79115 bool initcall_debug;
79116 core_param(initcall_debug, initcall_debug, bool, 0644);
79117
79118-static char msgbuf[64];
79119-
79120 static int __init_or_module do_one_initcall_debug(initcall_t fn)
79121 {
79122 ktime_t calltime, delta, rettime;
79123@@ -679,23 +747,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
79124 {
79125 int count = preempt_count();
79126 int ret;
79127+ const char *msg1 = "", *msg2 = "";
79128
79129 if (initcall_debug)
79130 ret = do_one_initcall_debug(fn);
79131 else
79132 ret = fn();
79133
79134- msgbuf[0] = 0;
79135-
79136 if (preempt_count() != count) {
79137- sprintf(msgbuf, "preemption imbalance ");
79138+ msg1 = " preemption imbalance";
79139 preempt_count() = count;
79140 }
79141 if (irqs_disabled()) {
79142- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
79143+ msg2 = " disabled interrupts";
79144 local_irq_enable();
79145 }
79146- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
79147+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
79148
79149 return ret;
79150 }
79151@@ -748,8 +815,14 @@ static void __init do_initcall_level(int level)
79152 level, level,
79153 &repair_env_string);
79154
79155- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
79156+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
79157 do_one_initcall(*fn);
79158+
79159+#ifdef LATENT_ENTROPY_PLUGIN
79160+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79161+#endif
79162+
79163+ }
79164 }
79165
79166 static void __init do_initcalls(void)
79167@@ -783,8 +856,14 @@ static void __init do_pre_smp_initcalls(void)
79168 {
79169 initcall_t *fn;
79170
79171- for (fn = __initcall_start; fn < __initcall0_start; fn++)
79172+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
79173 do_one_initcall(*fn);
79174+
79175+#ifdef LATENT_ENTROPY_PLUGIN
79176+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79177+#endif
79178+
79179+ }
79180 }
79181
79182 /*
79183@@ -802,8 +881,8 @@ static int run_init_process(const char *init_filename)
79184 {
79185 argv_init[0] = init_filename;
79186 return do_execve(init_filename,
79187- (const char __user *const __user *)argv_init,
79188- (const char __user *const __user *)envp_init);
79189+ (const char __user *const __force_user *)argv_init,
79190+ (const char __user *const __force_user *)envp_init);
79191 }
79192
79193 static noinline void __init kernel_init_freeable(void);
79194@@ -880,7 +959,7 @@ static noinline void __init kernel_init_freeable(void)
79195 do_basic_setup();
79196
79197 /* Open the /dev/console on the rootfs, this should never fail */
79198- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
79199+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
79200 pr_err("Warning: unable to open an initial console.\n");
79201
79202 (void) sys_dup(0);
79203@@ -893,11 +972,13 @@ static noinline void __init kernel_init_freeable(void)
79204 if (!ramdisk_execute_command)
79205 ramdisk_execute_command = "/init";
79206
79207- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
79208+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
79209 ramdisk_execute_command = NULL;
79210 prepare_namespace();
79211 }
79212
79213+ grsecurity_init();
79214+
79215 /*
79216 * Ok, we have completed the initial bootup, and
79217 * we're essentially up and running. Get rid of the
79218diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
79219index 130dfec..cc88451 100644
79220--- a/ipc/ipc_sysctl.c
79221+++ b/ipc/ipc_sysctl.c
79222@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
79223 static int proc_ipc_dointvec(ctl_table *table, int write,
79224 void __user *buffer, size_t *lenp, loff_t *ppos)
79225 {
79226- struct ctl_table ipc_table;
79227+ ctl_table_no_const ipc_table;
79228
79229 memcpy(&ipc_table, table, sizeof(ipc_table));
79230 ipc_table.data = get_ipc(table);
79231@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
79232 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
79233 void __user *buffer, size_t *lenp, loff_t *ppos)
79234 {
79235- struct ctl_table ipc_table;
79236+ ctl_table_no_const ipc_table;
79237
79238 memcpy(&ipc_table, table, sizeof(ipc_table));
79239 ipc_table.data = get_ipc(table);
79240@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
79241 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
79242 void __user *buffer, size_t *lenp, loff_t *ppos)
79243 {
79244- struct ctl_table ipc_table;
79245+ ctl_table_no_const ipc_table;
79246 size_t lenp_bef = *lenp;
79247 int rc;
79248
79249@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
79250 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
79251 void __user *buffer, size_t *lenp, loff_t *ppos)
79252 {
79253- struct ctl_table ipc_table;
79254+ ctl_table_no_const ipc_table;
79255 memcpy(&ipc_table, table, sizeof(ipc_table));
79256 ipc_table.data = get_ipc(table);
79257
79258@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
79259 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
79260 void __user *buffer, size_t *lenp, loff_t *ppos)
79261 {
79262- struct ctl_table ipc_table;
79263+ ctl_table_no_const ipc_table;
79264 size_t lenp_bef = *lenp;
79265 int oldval;
79266 int rc;
79267diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
79268index 383d638..943fdbb 100644
79269--- a/ipc/mq_sysctl.c
79270+++ b/ipc/mq_sysctl.c
79271@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
79272 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
79273 void __user *buffer, size_t *lenp, loff_t *ppos)
79274 {
79275- struct ctl_table mq_table;
79276+ ctl_table_no_const mq_table;
79277 memcpy(&mq_table, table, sizeof(mq_table));
79278 mq_table.data = get_mq(table);
79279
79280diff --git a/ipc/mqueue.c b/ipc/mqueue.c
79281index e4e47f6..a85e0ad 100644
79282--- a/ipc/mqueue.c
79283+++ b/ipc/mqueue.c
79284@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
79285 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
79286 info->attr.mq_msgsize);
79287
79288+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
79289 spin_lock(&mq_lock);
79290 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
79291 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
79292diff --git a/ipc/msg.c b/ipc/msg.c
79293index d0c6d96..69a893c 100644
79294--- a/ipc/msg.c
79295+++ b/ipc/msg.c
79296@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
79297 return security_msg_queue_associate(msq, msgflg);
79298 }
79299
79300+static struct ipc_ops msg_ops = {
79301+ .getnew = newque,
79302+ .associate = msg_security,
79303+ .more_checks = NULL
79304+};
79305+
79306 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
79307 {
79308 struct ipc_namespace *ns;
79309- struct ipc_ops msg_ops;
79310 struct ipc_params msg_params;
79311
79312 ns = current->nsproxy->ipc_ns;
79313
79314- msg_ops.getnew = newque;
79315- msg_ops.associate = msg_security;
79316- msg_ops.more_checks = NULL;
79317-
79318 msg_params.key = key;
79319 msg_params.flg = msgflg;
79320
79321diff --git a/ipc/sem.c b/ipc/sem.c
79322index 70480a3..f4e8262 100644
79323--- a/ipc/sem.c
79324+++ b/ipc/sem.c
79325@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
79326 return 0;
79327 }
79328
79329+static struct ipc_ops sem_ops = {
79330+ .getnew = newary,
79331+ .associate = sem_security,
79332+ .more_checks = sem_more_checks
79333+};
79334+
79335 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
79336 {
79337 struct ipc_namespace *ns;
79338- struct ipc_ops sem_ops;
79339 struct ipc_params sem_params;
79340
79341 ns = current->nsproxy->ipc_ns;
79342@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
79343 if (nsems < 0 || nsems > ns->sc_semmsl)
79344 return -EINVAL;
79345
79346- sem_ops.getnew = newary;
79347- sem_ops.associate = sem_security;
79348- sem_ops.more_checks = sem_more_checks;
79349-
79350 sem_params.key = key;
79351 sem_params.flg = semflg;
79352 sem_params.u.nsems = nsems;
79353diff --git a/ipc/shm.c b/ipc/shm.c
79354index 7e199fa..180a1ca 100644
79355--- a/ipc/shm.c
79356+++ b/ipc/shm.c
79357@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
79358 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
79359 #endif
79360
79361+#ifdef CONFIG_GRKERNSEC
79362+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
79363+ const time_t shm_createtime, const kuid_t cuid,
79364+ const int shmid);
79365+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
79366+ const time_t shm_createtime);
79367+#endif
79368+
79369 void shm_init_ns(struct ipc_namespace *ns)
79370 {
79371 ns->shm_ctlmax = SHMMAX;
79372@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
79373 shp->shm_lprid = 0;
79374 shp->shm_atim = shp->shm_dtim = 0;
79375 shp->shm_ctim = get_seconds();
79376+#ifdef CONFIG_GRKERNSEC
79377+ {
79378+ struct timespec timeval;
79379+ do_posix_clock_monotonic_gettime(&timeval);
79380+
79381+ shp->shm_createtime = timeval.tv_sec;
79382+ }
79383+#endif
79384 shp->shm_segsz = size;
79385 shp->shm_nattch = 0;
79386 shp->shm_file = file;
79387@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
79388 return 0;
79389 }
79390
79391+static struct ipc_ops shm_ops = {
79392+ .getnew = newseg,
79393+ .associate = shm_security,
79394+ .more_checks = shm_more_checks
79395+};
79396+
79397 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
79398 {
79399 struct ipc_namespace *ns;
79400- struct ipc_ops shm_ops;
79401 struct ipc_params shm_params;
79402
79403 ns = current->nsproxy->ipc_ns;
79404
79405- shm_ops.getnew = newseg;
79406- shm_ops.associate = shm_security;
79407- shm_ops.more_checks = shm_more_checks;
79408-
79409 shm_params.key = key;
79410 shm_params.flg = shmflg;
79411 shm_params.u.size = size;
79412@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
79413 f_mode = FMODE_READ | FMODE_WRITE;
79414 }
79415 if (shmflg & SHM_EXEC) {
79416+
79417+#ifdef CONFIG_PAX_MPROTECT
79418+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
79419+ goto out;
79420+#endif
79421+
79422 prot |= PROT_EXEC;
79423 acc_mode |= S_IXUGO;
79424 }
79425@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
79426 if (err)
79427 goto out_unlock;
79428
79429+#ifdef CONFIG_GRKERNSEC
79430+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
79431+ shp->shm_perm.cuid, shmid) ||
79432+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
79433+ err = -EACCES;
79434+ goto out_unlock;
79435+ }
79436+#endif
79437+
79438 path = shp->shm_file->f_path;
79439 path_get(&path);
79440 shp->shm_nattch++;
79441+#ifdef CONFIG_GRKERNSEC
79442+ shp->shm_lapid = current->pid;
79443+#endif
79444 size = i_size_read(path.dentry->d_inode);
79445 shm_unlock(shp);
79446
79447diff --git a/kernel/acct.c b/kernel/acct.c
79448index 8d6e145..33e0b1e 100644
79449--- a/kernel/acct.c
79450+++ b/kernel/acct.c
79451@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
79452 */
79453 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
79454 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
79455- file->f_op->write(file, (char *)&ac,
79456+ file->f_op->write(file, (char __force_user *)&ac,
79457 sizeof(acct_t), &file->f_pos);
79458 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
79459 set_fs(fs);
79460diff --git a/kernel/audit.c b/kernel/audit.c
79461index 91e53d0..d9e3ec4 100644
79462--- a/kernel/audit.c
79463+++ b/kernel/audit.c
79464@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
79465 3) suppressed due to audit_rate_limit
79466 4) suppressed due to audit_backlog_limit
79467 */
79468-static atomic_t audit_lost = ATOMIC_INIT(0);
79469+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
79470
79471 /* The netlink socket. */
79472 static struct sock *audit_sock;
79473@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
79474 unsigned long now;
79475 int print;
79476
79477- atomic_inc(&audit_lost);
79478+ atomic_inc_unchecked(&audit_lost);
79479
79480 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
79481
79482@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
79483 printk(KERN_WARNING
79484 "audit: audit_lost=%d audit_rate_limit=%d "
79485 "audit_backlog_limit=%d\n",
79486- atomic_read(&audit_lost),
79487+ atomic_read_unchecked(&audit_lost),
79488 audit_rate_limit,
79489 audit_backlog_limit);
79490 audit_panic(message);
79491@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
79492 status_set.pid = audit_pid;
79493 status_set.rate_limit = audit_rate_limit;
79494 status_set.backlog_limit = audit_backlog_limit;
79495- status_set.lost = atomic_read(&audit_lost);
79496+ status_set.lost = atomic_read_unchecked(&audit_lost);
79497 status_set.backlog = skb_queue_len(&audit_skb_queue);
79498 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
79499 &status_set, sizeof(status_set));
79500diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
79501index 6bd4a90..0ee9eff 100644
79502--- a/kernel/auditfilter.c
79503+++ b/kernel/auditfilter.c
79504@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
79505 f->lsm_rule = NULL;
79506
79507 /* Support legacy tests for a valid loginuid */
79508- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
79509+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
79510 f->type = AUDIT_LOGINUID_SET;
79511 f->val = 0;
79512 }
79513diff --git a/kernel/auditsc.c b/kernel/auditsc.c
79514index 3c8a601..3a416f6 100644
79515--- a/kernel/auditsc.c
79516+++ b/kernel/auditsc.c
79517@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
79518 }
79519
79520 /* global counter which is incremented every time something logs in */
79521-static atomic_t session_id = ATOMIC_INIT(0);
79522+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
79523
79524 /**
79525 * audit_set_loginuid - set current task's audit_context loginuid
79526@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
79527 return -EPERM;
79528 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
79529
79530- sessionid = atomic_inc_return(&session_id);
79531+ sessionid = atomic_inc_return_unchecked(&session_id);
79532 if (context && context->in_syscall) {
79533 struct audit_buffer *ab;
79534
79535diff --git a/kernel/capability.c b/kernel/capability.c
79536index f6c2ce5..982c0f9 100644
79537--- a/kernel/capability.c
79538+++ b/kernel/capability.c
79539@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
79540 * before modification is attempted and the application
79541 * fails.
79542 */
79543+ if (tocopy > ARRAY_SIZE(kdata))
79544+ return -EFAULT;
79545+
79546 if (copy_to_user(dataptr, kdata, tocopy
79547 * sizeof(struct __user_cap_data_struct))) {
79548 return -EFAULT;
79549@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
79550 int ret;
79551
79552 rcu_read_lock();
79553- ret = security_capable(__task_cred(t), ns, cap);
79554+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
79555+ gr_task_is_capable(t, __task_cred(t), cap);
79556 rcu_read_unlock();
79557
79558- return (ret == 0);
79559+ return ret;
79560 }
79561
79562 /**
79563@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
79564 int ret;
79565
79566 rcu_read_lock();
79567- ret = security_capable_noaudit(__task_cred(t), ns, cap);
79568+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
79569 rcu_read_unlock();
79570
79571- return (ret == 0);
79572+ return ret;
79573 }
79574
79575 /**
79576@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
79577 BUG();
79578 }
79579
79580- if (security_capable(current_cred(), ns, cap) == 0) {
79581+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
79582 current->flags |= PF_SUPERPRIV;
79583 return true;
79584 }
79585@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
79586 }
79587 EXPORT_SYMBOL(ns_capable);
79588
79589+bool ns_capable_nolog(struct user_namespace *ns, int cap)
79590+{
79591+ if (unlikely(!cap_valid(cap))) {
79592+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
79593+ BUG();
79594+ }
79595+
79596+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
79597+ current->flags |= PF_SUPERPRIV;
79598+ return true;
79599+ }
79600+ return false;
79601+}
79602+EXPORT_SYMBOL(ns_capable_nolog);
79603+
79604 /**
79605 * file_ns_capable - Determine if the file's opener had a capability in effect
79606 * @file: The file we want to check
79607@@ -432,6 +451,12 @@ bool capable(int cap)
79608 }
79609 EXPORT_SYMBOL(capable);
79610
79611+bool capable_nolog(int cap)
79612+{
79613+ return ns_capable_nolog(&init_user_ns, cap);
79614+}
79615+EXPORT_SYMBOL(capable_nolog);
79616+
79617 /**
79618 * nsown_capable - Check superior capability to one's own user_ns
79619 * @cap: The capability in question
79620@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
79621
79622 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
79623 }
79624+
79625+bool inode_capable_nolog(const struct inode *inode, int cap)
79626+{
79627+ struct user_namespace *ns = current_user_ns();
79628+
79629+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
79630+}
79631diff --git a/kernel/cgroup.c b/kernel/cgroup.c
79632index 2e9b387..61817b1 100644
79633--- a/kernel/cgroup.c
79634+++ b/kernel/cgroup.c
79635@@ -5398,7 +5398,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
79636 struct css_set *cg = link->cg;
79637 struct task_struct *task;
79638 int count = 0;
79639- seq_printf(seq, "css_set %p\n", cg);
79640+ seq_printf(seq, "css_set %pK\n", cg);
79641 list_for_each_entry(task, &cg->tasks, cg_list) {
79642 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
79643 seq_puts(seq, " ...\n");
79644diff --git a/kernel/compat.c b/kernel/compat.c
79645index 0a09e48..f44f3f0 100644
79646--- a/kernel/compat.c
79647+++ b/kernel/compat.c
79648@@ -13,6 +13,7 @@
79649
79650 #include <linux/linkage.h>
79651 #include <linux/compat.h>
79652+#include <linux/module.h>
79653 #include <linux/errno.h>
79654 #include <linux/time.h>
79655 #include <linux/signal.h>
79656@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
79657 mm_segment_t oldfs;
79658 long ret;
79659
79660- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
79661+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
79662 oldfs = get_fs();
79663 set_fs(KERNEL_DS);
79664 ret = hrtimer_nanosleep_restart(restart);
79665@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
79666 oldfs = get_fs();
79667 set_fs(KERNEL_DS);
79668 ret = hrtimer_nanosleep(&tu,
79669- rmtp ? (struct timespec __user *)&rmt : NULL,
79670+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
79671 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
79672 set_fs(oldfs);
79673
79674@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
79675 mm_segment_t old_fs = get_fs();
79676
79677 set_fs(KERNEL_DS);
79678- ret = sys_sigpending((old_sigset_t __user *) &s);
79679+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
79680 set_fs(old_fs);
79681 if (ret == 0)
79682 ret = put_user(s, set);
79683@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
79684 mm_segment_t old_fs = get_fs();
79685
79686 set_fs(KERNEL_DS);
79687- ret = sys_old_getrlimit(resource, &r);
79688+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
79689 set_fs(old_fs);
79690
79691 if (!ret) {
79692@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
79693 set_fs (KERNEL_DS);
79694 ret = sys_wait4(pid,
79695 (stat_addr ?
79696- (unsigned int __user *) &status : NULL),
79697- options, (struct rusage __user *) &r);
79698+ (unsigned int __force_user *) &status : NULL),
79699+ options, (struct rusage __force_user *) &r);
79700 set_fs (old_fs);
79701
79702 if (ret > 0) {
79703@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
79704 memset(&info, 0, sizeof(info));
79705
79706 set_fs(KERNEL_DS);
79707- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
79708- uru ? (struct rusage __user *)&ru : NULL);
79709+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
79710+ uru ? (struct rusage __force_user *)&ru : NULL);
79711 set_fs(old_fs);
79712
79713 if ((ret < 0) || (info.si_signo == 0))
79714@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
79715 oldfs = get_fs();
79716 set_fs(KERNEL_DS);
79717 err = sys_timer_settime(timer_id, flags,
79718- (struct itimerspec __user *) &newts,
79719- (struct itimerspec __user *) &oldts);
79720+ (struct itimerspec __force_user *) &newts,
79721+ (struct itimerspec __force_user *) &oldts);
79722 set_fs(oldfs);
79723 if (!err && old && put_compat_itimerspec(old, &oldts))
79724 return -EFAULT;
79725@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
79726 oldfs = get_fs();
79727 set_fs(KERNEL_DS);
79728 err = sys_timer_gettime(timer_id,
79729- (struct itimerspec __user *) &ts);
79730+ (struct itimerspec __force_user *) &ts);
79731 set_fs(oldfs);
79732 if (!err && put_compat_itimerspec(setting, &ts))
79733 return -EFAULT;
79734@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
79735 oldfs = get_fs();
79736 set_fs(KERNEL_DS);
79737 err = sys_clock_settime(which_clock,
79738- (struct timespec __user *) &ts);
79739+ (struct timespec __force_user *) &ts);
79740 set_fs(oldfs);
79741 return err;
79742 }
79743@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
79744 oldfs = get_fs();
79745 set_fs(KERNEL_DS);
79746 err = sys_clock_gettime(which_clock,
79747- (struct timespec __user *) &ts);
79748+ (struct timespec __force_user *) &ts);
79749 set_fs(oldfs);
79750 if (!err && put_compat_timespec(&ts, tp))
79751 return -EFAULT;
79752@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
79753
79754 oldfs = get_fs();
79755 set_fs(KERNEL_DS);
79756- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
79757+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
79758 set_fs(oldfs);
79759
79760 err = compat_put_timex(utp, &txc);
79761@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
79762 oldfs = get_fs();
79763 set_fs(KERNEL_DS);
79764 err = sys_clock_getres(which_clock,
79765- (struct timespec __user *) &ts);
79766+ (struct timespec __force_user *) &ts);
79767 set_fs(oldfs);
79768 if (!err && tp && put_compat_timespec(&ts, tp))
79769 return -EFAULT;
79770@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
79771 long err;
79772 mm_segment_t oldfs;
79773 struct timespec tu;
79774- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
79775+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
79776
79777- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
79778+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
79779 oldfs = get_fs();
79780 set_fs(KERNEL_DS);
79781 err = clock_nanosleep_restart(restart);
79782@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
79783 oldfs = get_fs();
79784 set_fs(KERNEL_DS);
79785 err = sys_clock_nanosleep(which_clock, flags,
79786- (struct timespec __user *) &in,
79787- (struct timespec __user *) &out);
79788+ (struct timespec __force_user *) &in,
79789+ (struct timespec __force_user *) &out);
79790 set_fs(oldfs);
79791
79792 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
79793diff --git a/kernel/configs.c b/kernel/configs.c
79794index c18b1f1..b9a0132 100644
79795--- a/kernel/configs.c
79796+++ b/kernel/configs.c
79797@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
79798 struct proc_dir_entry *entry;
79799
79800 /* create the current config file */
79801+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
79802+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
79803+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
79804+ &ikconfig_file_ops);
79805+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79806+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
79807+ &ikconfig_file_ops);
79808+#endif
79809+#else
79810 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
79811 &ikconfig_file_ops);
79812+#endif
79813+
79814 if (!entry)
79815 return -ENOMEM;
79816
79817diff --git a/kernel/cred.c b/kernel/cred.c
79818index e0573a4..3874e41 100644
79819--- a/kernel/cred.c
79820+++ b/kernel/cred.c
79821@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
79822 validate_creds(cred);
79823 alter_cred_subscribers(cred, -1);
79824 put_cred(cred);
79825+
79826+#ifdef CONFIG_GRKERNSEC_SETXID
79827+ cred = (struct cred *) tsk->delayed_cred;
79828+ if (cred != NULL) {
79829+ tsk->delayed_cred = NULL;
79830+ validate_creds(cred);
79831+ alter_cred_subscribers(cred, -1);
79832+ put_cred(cred);
79833+ }
79834+#endif
79835 }
79836
79837 /**
79838@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
79839 * Always returns 0 thus allowing this function to be tail-called at the end
79840 * of, say, sys_setgid().
79841 */
79842-int commit_creds(struct cred *new)
79843+static int __commit_creds(struct cred *new)
79844 {
79845 struct task_struct *task = current;
79846 const struct cred *old = task->real_cred;
79847@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
79848
79849 get_cred(new); /* we will require a ref for the subj creds too */
79850
79851+ gr_set_role_label(task, new->uid, new->gid);
79852+
79853 /* dumpability changes */
79854 if (!uid_eq(old->euid, new->euid) ||
79855 !gid_eq(old->egid, new->egid) ||
79856@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
79857 put_cred(old);
79858 return 0;
79859 }
79860+#ifdef CONFIG_GRKERNSEC_SETXID
79861+extern int set_user(struct cred *new);
79862+
79863+void gr_delayed_cred_worker(void)
79864+{
79865+ const struct cred *new = current->delayed_cred;
79866+ struct cred *ncred;
79867+
79868+ current->delayed_cred = NULL;
79869+
79870+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
79871+ // from doing get_cred on it when queueing this
79872+ put_cred(new);
79873+ return;
79874+ } else if (new == NULL)
79875+ return;
79876+
79877+ ncred = prepare_creds();
79878+ if (!ncred)
79879+ goto die;
79880+ // uids
79881+ ncred->uid = new->uid;
79882+ ncred->euid = new->euid;
79883+ ncred->suid = new->suid;
79884+ ncred->fsuid = new->fsuid;
79885+ // gids
79886+ ncred->gid = new->gid;
79887+ ncred->egid = new->egid;
79888+ ncred->sgid = new->sgid;
79889+ ncred->fsgid = new->fsgid;
79890+ // groups
79891+ if (set_groups(ncred, new->group_info) < 0) {
79892+ abort_creds(ncred);
79893+ goto die;
79894+ }
79895+ // caps
79896+ ncred->securebits = new->securebits;
79897+ ncred->cap_inheritable = new->cap_inheritable;
79898+ ncred->cap_permitted = new->cap_permitted;
79899+ ncred->cap_effective = new->cap_effective;
79900+ ncred->cap_bset = new->cap_bset;
79901+
79902+ if (set_user(ncred)) {
79903+ abort_creds(ncred);
79904+ goto die;
79905+ }
79906+
79907+ // from doing get_cred on it when queueing this
79908+ put_cred(new);
79909+
79910+ __commit_creds(ncred);
79911+ return;
79912+die:
79913+ // from doing get_cred on it when queueing this
79914+ put_cred(new);
79915+ do_group_exit(SIGKILL);
79916+}
79917+#endif
79918+
79919+int commit_creds(struct cred *new)
79920+{
79921+#ifdef CONFIG_GRKERNSEC_SETXID
79922+ int ret;
79923+ int schedule_it = 0;
79924+ struct task_struct *t;
79925+
79926+ /* we won't get called with tasklist_lock held for writing
79927+ and interrupts disabled as the cred struct in that case is
79928+ init_cred
79929+ */
79930+ if (grsec_enable_setxid && !current_is_single_threaded() &&
79931+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
79932+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
79933+ schedule_it = 1;
79934+ }
79935+ ret = __commit_creds(new);
79936+ if (schedule_it) {
79937+ rcu_read_lock();
79938+ read_lock(&tasklist_lock);
79939+ for (t = next_thread(current); t != current;
79940+ t = next_thread(t)) {
79941+ if (t->delayed_cred == NULL) {
79942+ t->delayed_cred = get_cred(new);
79943+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
79944+ set_tsk_need_resched(t);
79945+ }
79946+ }
79947+ read_unlock(&tasklist_lock);
79948+ rcu_read_unlock();
79949+ }
79950+ return ret;
79951+#else
79952+ return __commit_creds(new);
79953+#endif
79954+}
79955+
79956 EXPORT_SYMBOL(commit_creds);
79957
79958 /**
79959diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
79960index 0506d44..2c20034 100644
79961--- a/kernel/debug/debug_core.c
79962+++ b/kernel/debug/debug_core.c
79963@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
79964 */
79965 static atomic_t masters_in_kgdb;
79966 static atomic_t slaves_in_kgdb;
79967-static atomic_t kgdb_break_tasklet_var;
79968+static atomic_unchecked_t kgdb_break_tasklet_var;
79969 atomic_t kgdb_setting_breakpoint;
79970
79971 struct task_struct *kgdb_usethread;
79972@@ -133,7 +133,7 @@ int kgdb_single_step;
79973 static pid_t kgdb_sstep_pid;
79974
79975 /* to keep track of the CPU which is doing the single stepping*/
79976-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
79977+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
79978
79979 /*
79980 * If you are debugging a problem where roundup (the collection of
79981@@ -541,7 +541,7 @@ return_normal:
79982 * kernel will only try for the value of sstep_tries before
79983 * giving up and continuing on.
79984 */
79985- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
79986+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
79987 (kgdb_info[cpu].task &&
79988 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
79989 atomic_set(&kgdb_active, -1);
79990@@ -635,8 +635,8 @@ cpu_master_loop:
79991 }
79992
79993 kgdb_restore:
79994- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
79995- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
79996+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
79997+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
79998 if (kgdb_info[sstep_cpu].task)
79999 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
80000 else
80001@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
80002 static void kgdb_tasklet_bpt(unsigned long ing)
80003 {
80004 kgdb_breakpoint();
80005- atomic_set(&kgdb_break_tasklet_var, 0);
80006+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
80007 }
80008
80009 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
80010
80011 void kgdb_schedule_breakpoint(void)
80012 {
80013- if (atomic_read(&kgdb_break_tasklet_var) ||
80014+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
80015 atomic_read(&kgdb_active) != -1 ||
80016 atomic_read(&kgdb_setting_breakpoint))
80017 return;
80018- atomic_inc(&kgdb_break_tasklet_var);
80019+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
80020 tasklet_schedule(&kgdb_tasklet_breakpoint);
80021 }
80022 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
80023diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
80024index 00eb8f7..d7e3244 100644
80025--- a/kernel/debug/kdb/kdb_main.c
80026+++ b/kernel/debug/kdb/kdb_main.c
80027@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
80028 continue;
80029
80030 kdb_printf("%-20s%8u 0x%p ", mod->name,
80031- mod->core_size, (void *)mod);
80032+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
80033 #ifdef CONFIG_MODULE_UNLOAD
80034 kdb_printf("%4ld ", module_refcount(mod));
80035 #endif
80036@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
80037 kdb_printf(" (Loading)");
80038 else
80039 kdb_printf(" (Live)");
80040- kdb_printf(" 0x%p", mod->module_core);
80041+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
80042
80043 #ifdef CONFIG_MODULE_UNLOAD
80044 {
80045diff --git a/kernel/events/core.c b/kernel/events/core.c
80046index e76e495..cbfe63a 100644
80047--- a/kernel/events/core.c
80048+++ b/kernel/events/core.c
80049@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
80050 * 0 - disallow raw tracepoint access for unpriv
80051 * 1 - disallow cpu events for unpriv
80052 * 2 - disallow kernel profiling for unpriv
80053+ * 3 - disallow all unpriv perf event use
80054 */
80055-int sysctl_perf_event_paranoid __read_mostly = 1;
80056+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
80057+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
80058+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
80059+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
80060+#else
80061+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
80062+#endif
80063
80064 /* Minimum for 512 kiB + 1 user control page */
80065 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
80066@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
80067 return 0;
80068 }
80069
80070-static atomic64_t perf_event_id;
80071+static atomic64_unchecked_t perf_event_id;
80072
80073 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
80074 enum event_type_t event_type);
80075@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
80076
80077 static inline u64 perf_event_count(struct perf_event *event)
80078 {
80079- return local64_read(&event->count) + atomic64_read(&event->child_count);
80080+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
80081 }
80082
80083 static u64 perf_event_read(struct perf_event *event)
80084@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
80085 mutex_lock(&event->child_mutex);
80086 total += perf_event_read(event);
80087 *enabled += event->total_time_enabled +
80088- atomic64_read(&event->child_total_time_enabled);
80089+ atomic64_read_unchecked(&event->child_total_time_enabled);
80090 *running += event->total_time_running +
80091- atomic64_read(&event->child_total_time_running);
80092+ atomic64_read_unchecked(&event->child_total_time_running);
80093
80094 list_for_each_entry(child, &event->child_list, child_list) {
80095 total += perf_event_read(child);
80096@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
80097 userpg->offset -= local64_read(&event->hw.prev_count);
80098
80099 userpg->time_enabled = enabled +
80100- atomic64_read(&event->child_total_time_enabled);
80101+ atomic64_read_unchecked(&event->child_total_time_enabled);
80102
80103 userpg->time_running = running +
80104- atomic64_read(&event->child_total_time_running);
80105+ atomic64_read_unchecked(&event->child_total_time_running);
80106
80107 arch_perf_update_userpage(userpg, now);
80108
80109@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
80110
80111 /* Data. */
80112 sp = perf_user_stack_pointer(regs);
80113- rem = __output_copy_user(handle, (void *) sp, dump_size);
80114+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
80115 dyn_size = dump_size - rem;
80116
80117 perf_output_skip(handle, rem);
80118@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
80119 values[n++] = perf_event_count(event);
80120 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
80121 values[n++] = enabled +
80122- atomic64_read(&event->child_total_time_enabled);
80123+ atomic64_read_unchecked(&event->child_total_time_enabled);
80124 }
80125 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
80126 values[n++] = running +
80127- atomic64_read(&event->child_total_time_running);
80128+ atomic64_read_unchecked(&event->child_total_time_running);
80129 }
80130 if (read_format & PERF_FORMAT_ID)
80131 values[n++] = primary_event_id(event);
80132@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
80133 * need to add enough zero bytes after the string to handle
80134 * the 64bit alignment we do later.
80135 */
80136- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
80137+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
80138 if (!buf) {
80139 name = strncpy(tmp, "//enomem", sizeof(tmp));
80140 goto got_name;
80141 }
80142- name = d_path(&file->f_path, buf, PATH_MAX);
80143+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
80144 if (IS_ERR(name)) {
80145 name = strncpy(tmp, "//toolong", sizeof(tmp));
80146 goto got_name;
80147@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
80148 event->parent = parent_event;
80149
80150 event->ns = get_pid_ns(task_active_pid_ns(current));
80151- event->id = atomic64_inc_return(&perf_event_id);
80152+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
80153
80154 event->state = PERF_EVENT_STATE_INACTIVE;
80155
80156@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
80157 if (flags & ~PERF_FLAG_ALL)
80158 return -EINVAL;
80159
80160+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
80161+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
80162+ return -EACCES;
80163+#endif
80164+
80165 err = perf_copy_attr(attr_uptr, &attr);
80166 if (err)
80167 return err;
80168@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
80169 /*
80170 * Add back the child's count to the parent's count:
80171 */
80172- atomic64_add(child_val, &parent_event->child_count);
80173- atomic64_add(child_event->total_time_enabled,
80174+ atomic64_add_unchecked(child_val, &parent_event->child_count);
80175+ atomic64_add_unchecked(child_event->total_time_enabled,
80176 &parent_event->child_total_time_enabled);
80177- atomic64_add(child_event->total_time_running,
80178+ atomic64_add_unchecked(child_event->total_time_running,
80179 &parent_event->child_total_time_running);
80180
80181 /*
80182diff --git a/kernel/events/internal.h b/kernel/events/internal.h
80183index ca65997..60df03d 100644
80184--- a/kernel/events/internal.h
80185+++ b/kernel/events/internal.h
80186@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
80187 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
80188 }
80189
80190-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
80191-static inline unsigned int \
80192+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
80193+static inline unsigned long \
80194 func_name(struct perf_output_handle *handle, \
80195- const void *buf, unsigned int len) \
80196+ const void user *buf, unsigned long len) \
80197 { \
80198 unsigned long size, written; \
80199 \
80200@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
80201 return n;
80202 }
80203
80204-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
80205+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
80206
80207 #define MEMCPY_SKIP(dst, src, n) (n)
80208
80209-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
80210+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
80211
80212 #ifndef arch_perf_out_copy_user
80213 #define arch_perf_out_copy_user __copy_from_user_inatomic
80214 #endif
80215
80216-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
80217+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
80218
80219 /* Callchain handling */
80220 extern struct perf_callchain_entry *
80221diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
80222index f356974..cb8c570 100644
80223--- a/kernel/events/uprobes.c
80224+++ b/kernel/events/uprobes.c
80225@@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
80226 {
80227 struct page *page;
80228 uprobe_opcode_t opcode;
80229- int result;
80230+ long result;
80231
80232 pagefault_disable();
80233 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
80234diff --git a/kernel/exit.c b/kernel/exit.c
80235index 7bb73f9..d7978ed 100644
80236--- a/kernel/exit.c
80237+++ b/kernel/exit.c
80238@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
80239 struct task_struct *leader;
80240 int zap_leader;
80241 repeat:
80242+#ifdef CONFIG_NET
80243+ gr_del_task_from_ip_table(p);
80244+#endif
80245+
80246 /* don't need to get the RCU readlock here - the process is dead and
80247 * can't be modifying its own credentials. But shut RCU-lockdep up */
80248 rcu_read_lock();
80249@@ -340,7 +344,7 @@ int allow_signal(int sig)
80250 * know it'll be handled, so that they don't get converted to
80251 * SIGKILL or just silently dropped.
80252 */
80253- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
80254+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
80255 recalc_sigpending();
80256 spin_unlock_irq(&current->sighand->siglock);
80257 return 0;
80258@@ -709,6 +713,8 @@ void do_exit(long code)
80259 struct task_struct *tsk = current;
80260 int group_dead;
80261
80262+ set_fs(USER_DS);
80263+
80264 profile_task_exit(tsk);
80265
80266 WARN_ON(blk_needs_flush_plug(tsk));
80267@@ -725,7 +731,6 @@ void do_exit(long code)
80268 * mm_release()->clear_child_tid() from writing to a user-controlled
80269 * kernel address.
80270 */
80271- set_fs(USER_DS);
80272
80273 ptrace_event(PTRACE_EVENT_EXIT, code);
80274
80275@@ -784,6 +789,9 @@ void do_exit(long code)
80276 tsk->exit_code = code;
80277 taskstats_exit(tsk, group_dead);
80278
80279+ gr_acl_handle_psacct(tsk, code);
80280+ gr_acl_handle_exit();
80281+
80282 exit_mm(tsk);
80283
80284 if (group_dead)
80285@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
80286 * Take down every thread in the group. This is called by fatal signals
80287 * as well as by sys_exit_group (below).
80288 */
80289-void
80290+__noreturn void
80291 do_group_exit(int exit_code)
80292 {
80293 struct signal_struct *sig = current->signal;
80294diff --git a/kernel/fork.c b/kernel/fork.c
80295index ffbc090..08ceeee 100644
80296--- a/kernel/fork.c
80297+++ b/kernel/fork.c
80298@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
80299 *stackend = STACK_END_MAGIC; /* for overflow detection */
80300
80301 #ifdef CONFIG_CC_STACKPROTECTOR
80302- tsk->stack_canary = get_random_int();
80303+ tsk->stack_canary = pax_get_random_long();
80304 #endif
80305
80306 /*
80307@@ -345,13 +345,81 @@ free_tsk:
80308 }
80309
80310 #ifdef CONFIG_MMU
80311+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
80312+{
80313+ struct vm_area_struct *tmp;
80314+ unsigned long charge;
80315+ struct mempolicy *pol;
80316+ struct file *file;
80317+
80318+ charge = 0;
80319+ if (mpnt->vm_flags & VM_ACCOUNT) {
80320+ unsigned long len = vma_pages(mpnt);
80321+
80322+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
80323+ goto fail_nomem;
80324+ charge = len;
80325+ }
80326+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80327+ if (!tmp)
80328+ goto fail_nomem;
80329+ *tmp = *mpnt;
80330+ tmp->vm_mm = mm;
80331+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
80332+ pol = mpol_dup(vma_policy(mpnt));
80333+ if (IS_ERR(pol))
80334+ goto fail_nomem_policy;
80335+ vma_set_policy(tmp, pol);
80336+ if (anon_vma_fork(tmp, mpnt))
80337+ goto fail_nomem_anon_vma_fork;
80338+ tmp->vm_flags &= ~VM_LOCKED;
80339+ tmp->vm_next = tmp->vm_prev = NULL;
80340+ tmp->vm_mirror = NULL;
80341+ file = tmp->vm_file;
80342+ if (file) {
80343+ struct inode *inode = file_inode(file);
80344+ struct address_space *mapping = file->f_mapping;
80345+
80346+ get_file(file);
80347+ if (tmp->vm_flags & VM_DENYWRITE)
80348+ atomic_dec(&inode->i_writecount);
80349+ mutex_lock(&mapping->i_mmap_mutex);
80350+ if (tmp->vm_flags & VM_SHARED)
80351+ mapping->i_mmap_writable++;
80352+ flush_dcache_mmap_lock(mapping);
80353+ /* insert tmp into the share list, just after mpnt */
80354+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
80355+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
80356+ else
80357+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
80358+ flush_dcache_mmap_unlock(mapping);
80359+ mutex_unlock(&mapping->i_mmap_mutex);
80360+ }
80361+
80362+ /*
80363+ * Clear hugetlb-related page reserves for children. This only
80364+ * affects MAP_PRIVATE mappings. Faults generated by the child
80365+ * are not guaranteed to succeed, even if read-only
80366+ */
80367+ if (is_vm_hugetlb_page(tmp))
80368+ reset_vma_resv_huge_pages(tmp);
80369+
80370+ return tmp;
80371+
80372+fail_nomem_anon_vma_fork:
80373+ mpol_put(pol);
80374+fail_nomem_policy:
80375+ kmem_cache_free(vm_area_cachep, tmp);
80376+fail_nomem:
80377+ vm_unacct_memory(charge);
80378+ return NULL;
80379+}
80380+
80381 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80382 {
80383 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
80384 struct rb_node **rb_link, *rb_parent;
80385 int retval;
80386- unsigned long charge;
80387- struct mempolicy *pol;
80388
80389 uprobe_start_dup_mmap();
80390 down_write(&oldmm->mmap_sem);
80391@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80392 mm->locked_vm = 0;
80393 mm->mmap = NULL;
80394 mm->mmap_cache = NULL;
80395- mm->free_area_cache = oldmm->mmap_base;
80396- mm->cached_hole_size = ~0UL;
80397+ mm->free_area_cache = oldmm->free_area_cache;
80398+ mm->cached_hole_size = oldmm->cached_hole_size;
80399 mm->map_count = 0;
80400 cpumask_clear(mm_cpumask(mm));
80401 mm->mm_rb = RB_ROOT;
80402@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80403
80404 prev = NULL;
80405 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
80406- struct file *file;
80407-
80408 if (mpnt->vm_flags & VM_DONTCOPY) {
80409 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
80410 -vma_pages(mpnt));
80411 continue;
80412 }
80413- charge = 0;
80414- if (mpnt->vm_flags & VM_ACCOUNT) {
80415- unsigned long len = vma_pages(mpnt);
80416-
80417- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
80418- goto fail_nomem;
80419- charge = len;
80420- }
80421- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80422- if (!tmp)
80423- goto fail_nomem;
80424- *tmp = *mpnt;
80425- INIT_LIST_HEAD(&tmp->anon_vma_chain);
80426- pol = mpol_dup(vma_policy(mpnt));
80427- retval = PTR_ERR(pol);
80428- if (IS_ERR(pol))
80429- goto fail_nomem_policy;
80430- vma_set_policy(tmp, pol);
80431- tmp->vm_mm = mm;
80432- if (anon_vma_fork(tmp, mpnt))
80433- goto fail_nomem_anon_vma_fork;
80434- tmp->vm_flags &= ~VM_LOCKED;
80435- tmp->vm_next = tmp->vm_prev = NULL;
80436- file = tmp->vm_file;
80437- if (file) {
80438- struct inode *inode = file_inode(file);
80439- struct address_space *mapping = file->f_mapping;
80440-
80441- get_file(file);
80442- if (tmp->vm_flags & VM_DENYWRITE)
80443- atomic_dec(&inode->i_writecount);
80444- mutex_lock(&mapping->i_mmap_mutex);
80445- if (tmp->vm_flags & VM_SHARED)
80446- mapping->i_mmap_writable++;
80447- flush_dcache_mmap_lock(mapping);
80448- /* insert tmp into the share list, just after mpnt */
80449- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
80450- vma_nonlinear_insert(tmp,
80451- &mapping->i_mmap_nonlinear);
80452- else
80453- vma_interval_tree_insert_after(tmp, mpnt,
80454- &mapping->i_mmap);
80455- flush_dcache_mmap_unlock(mapping);
80456- mutex_unlock(&mapping->i_mmap_mutex);
80457+ tmp = dup_vma(mm, oldmm, mpnt);
80458+ if (!tmp) {
80459+ retval = -ENOMEM;
80460+ goto out;
80461 }
80462
80463 /*
80464@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80465 if (retval)
80466 goto out;
80467 }
80468+
80469+#ifdef CONFIG_PAX_SEGMEXEC
80470+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
80471+ struct vm_area_struct *mpnt_m;
80472+
80473+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
80474+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
80475+
80476+ if (!mpnt->vm_mirror)
80477+ continue;
80478+
80479+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
80480+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
80481+ mpnt->vm_mirror = mpnt_m;
80482+ } else {
80483+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
80484+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
80485+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
80486+ mpnt->vm_mirror->vm_mirror = mpnt;
80487+ }
80488+ }
80489+ BUG_ON(mpnt_m);
80490+ }
80491+#endif
80492+
80493 /* a new mm has just been created */
80494 arch_dup_mmap(oldmm, mm);
80495 retval = 0;
80496@@ -473,14 +524,6 @@ out:
80497 up_write(&oldmm->mmap_sem);
80498 uprobe_end_dup_mmap();
80499 return retval;
80500-fail_nomem_anon_vma_fork:
80501- mpol_put(pol);
80502-fail_nomem_policy:
80503- kmem_cache_free(vm_area_cachep, tmp);
80504-fail_nomem:
80505- retval = -ENOMEM;
80506- vm_unacct_memory(charge);
80507- goto out;
80508 }
80509
80510 static inline int mm_alloc_pgd(struct mm_struct *mm)
80511@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
80512 return ERR_PTR(err);
80513
80514 mm = get_task_mm(task);
80515- if (mm && mm != current->mm &&
80516- !ptrace_may_access(task, mode)) {
80517+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
80518+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
80519 mmput(mm);
80520 mm = ERR_PTR(-EACCES);
80521 }
80522@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
80523 spin_unlock(&fs->lock);
80524 return -EAGAIN;
80525 }
80526- fs->users++;
80527+ atomic_inc(&fs->users);
80528 spin_unlock(&fs->lock);
80529 return 0;
80530 }
80531 tsk->fs = copy_fs_struct(fs);
80532 if (!tsk->fs)
80533 return -ENOMEM;
80534+ /* Carry through gr_chroot_dentry and is_chrooted instead
80535+ of recomputing it here. Already copied when the task struct
80536+ is duplicated. This allows pivot_root to not be treated as
80537+ a chroot
80538+ */
80539+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
80540+
80541 return 0;
80542 }
80543
80544@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
80545 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
80546 #endif
80547 retval = -EAGAIN;
80548+
80549+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
80550+
80551 if (atomic_read(&p->real_cred->user->processes) >=
80552 task_rlimit(p, RLIMIT_NPROC)) {
80553- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
80554- p->real_cred->user != INIT_USER)
80555+ if (p->real_cred->user != INIT_USER &&
80556+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
80557 goto bad_fork_free;
80558 }
80559 current->flags &= ~PF_NPROC_EXCEEDED;
80560@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
80561 goto bad_fork_free_pid;
80562 }
80563
80564+ /* synchronizes with gr_set_acls()
80565+ we need to call this past the point of no return for fork()
80566+ */
80567+ gr_copy_label(p);
80568+
80569 if (clone_flags & CLONE_THREAD) {
80570 current->signal->nr_threads++;
80571 atomic_inc(&current->signal->live);
80572@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
80573 bad_fork_free:
80574 free_task(p);
80575 fork_out:
80576+ gr_log_forkfail(retval);
80577+
80578 return ERR_PTR(retval);
80579 }
80580
80581@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
80582 if (clone_flags & CLONE_PARENT_SETTID)
80583 put_user(nr, parent_tidptr);
80584
80585+ gr_handle_brute_check();
80586+
80587 if (clone_flags & CLONE_VFORK) {
80588 p->vfork_done = &vfork;
80589 init_completion(&vfork);
80590@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
80591 mm_cachep = kmem_cache_create("mm_struct",
80592 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
80593 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
80594- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
80595+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
80596 mmap_init();
80597 nsproxy_cache_init();
80598 }
80599@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
80600 return 0;
80601
80602 /* don't need lock here; in the worst case we'll do useless copy */
80603- if (fs->users == 1)
80604+ if (atomic_read(&fs->users) == 1)
80605 return 0;
80606
80607 *new_fsp = copy_fs_struct(fs);
80608@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
80609 fs = current->fs;
80610 spin_lock(&fs->lock);
80611 current->fs = new_fs;
80612- if (--fs->users)
80613+ gr_set_chroot_entries(current, &current->fs->root);
80614+ if (atomic_dec_return(&fs->users))
80615 new_fs = NULL;
80616 else
80617 new_fs = fs;
80618diff --git a/kernel/futex.c b/kernel/futex.c
80619index 49dacfb..2ac4526 100644
80620--- a/kernel/futex.c
80621+++ b/kernel/futex.c
80622@@ -54,6 +54,7 @@
80623 #include <linux/mount.h>
80624 #include <linux/pagemap.h>
80625 #include <linux/syscalls.h>
80626+#include <linux/ptrace.h>
80627 #include <linux/signal.h>
80628 #include <linux/export.h>
80629 #include <linux/magic.h>
80630@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
80631 struct page *page, *page_head;
80632 int err, ro = 0;
80633
80634+#ifdef CONFIG_PAX_SEGMEXEC
80635+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
80636+ return -EFAULT;
80637+#endif
80638+
80639 /*
80640 * The futex address must be "naturally" aligned.
80641 */
80642@@ -440,7 +446,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
80643
80644 static int get_futex_value_locked(u32 *dest, u32 __user *from)
80645 {
80646- int ret;
80647+ unsigned long ret;
80648
80649 pagefault_disable();
80650 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
80651@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
80652 {
80653 u32 curval;
80654 int i;
80655+ mm_segment_t oldfs;
80656
80657 /*
80658 * This will fail and we want it. Some arch implementations do
80659@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
80660 * implementation, the non-functional ones will return
80661 * -ENOSYS.
80662 */
80663+ oldfs = get_fs();
80664+ set_fs(USER_DS);
80665 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
80666 futex_cmpxchg_enabled = 1;
80667+ set_fs(oldfs);
80668
80669 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
80670 plist_head_init(&futex_queues[i].chain);
80671diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
80672index f9f44fd..29885e4 100644
80673--- a/kernel/futex_compat.c
80674+++ b/kernel/futex_compat.c
80675@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
80676 return 0;
80677 }
80678
80679-static void __user *futex_uaddr(struct robust_list __user *entry,
80680+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
80681 compat_long_t futex_offset)
80682 {
80683 compat_uptr_t base = ptr_to_compat(entry);
80684diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
80685index 9b22d03..6295b62 100644
80686--- a/kernel/gcov/base.c
80687+++ b/kernel/gcov/base.c
80688@@ -102,11 +102,6 @@ void gcov_enable_events(void)
80689 }
80690
80691 #ifdef CONFIG_MODULES
80692-static inline int within(void *addr, void *start, unsigned long size)
80693-{
80694- return ((addr >= start) && (addr < start + size));
80695-}
80696-
80697 /* Update list and generate events when modules are unloaded. */
80698 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
80699 void *data)
80700@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
80701 prev = NULL;
80702 /* Remove entries located in module from linked list. */
80703 for (info = gcov_info_head; info; info = info->next) {
80704- if (within(info, mod->module_core, mod->core_size)) {
80705+ if (within_module_core_rw((unsigned long)info, mod)) {
80706 if (prev)
80707 prev->next = info->next;
80708 else
80709diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
80710index 2288fbd..0f3941f 100644
80711--- a/kernel/hrtimer.c
80712+++ b/kernel/hrtimer.c
80713@@ -1435,7 +1435,7 @@ void hrtimer_peek_ahead_timers(void)
80714 local_irq_restore(flags);
80715 }
80716
80717-static void run_hrtimer_softirq(struct softirq_action *h)
80718+static void run_hrtimer_softirq(void)
80719 {
80720 hrtimer_peek_ahead_timers();
80721 }
80722@@ -1770,7 +1770,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
80723 return NOTIFY_OK;
80724 }
80725
80726-static struct notifier_block __cpuinitdata hrtimers_nb = {
80727+static struct notifier_block hrtimers_nb = {
80728 .notifier_call = hrtimer_cpu_notify,
80729 };
80730
80731diff --git a/kernel/irq_work.c b/kernel/irq_work.c
80732index 55fcce6..0e4cf34 100644
80733--- a/kernel/irq_work.c
80734+++ b/kernel/irq_work.c
80735@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
80736 return NOTIFY_OK;
80737 }
80738
80739-static struct notifier_block cpu_notify;
80740+static struct notifier_block cpu_notify = {
80741+ .notifier_call = irq_work_cpu_notify,
80742+ .priority = 0,
80743+};
80744
80745 static __init int irq_work_init_cpu_notifier(void)
80746 {
80747- cpu_notify.notifier_call = irq_work_cpu_notify;
80748- cpu_notify.priority = 0;
80749 register_cpu_notifier(&cpu_notify);
80750 return 0;
80751 }
80752diff --git a/kernel/jump_label.c b/kernel/jump_label.c
80753index 60f48fa..7f3a770 100644
80754--- a/kernel/jump_label.c
80755+++ b/kernel/jump_label.c
80756@@ -13,6 +13,7 @@
80757 #include <linux/sort.h>
80758 #include <linux/err.h>
80759 #include <linux/static_key.h>
80760+#include <linux/mm.h>
80761
80762 #ifdef HAVE_JUMP_LABEL
80763
80764@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
80765
80766 size = (((unsigned long)stop - (unsigned long)start)
80767 / sizeof(struct jump_entry));
80768+ pax_open_kernel();
80769 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
80770+ pax_close_kernel();
80771 }
80772
80773 static void jump_label_update(struct static_key *key, int enable);
80774@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
80775 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
80776 struct jump_entry *iter;
80777
80778+ pax_open_kernel();
80779 for (iter = iter_start; iter < iter_stop; iter++) {
80780 if (within_module_init(iter->code, mod))
80781 iter->code = 0;
80782 }
80783+ pax_close_kernel();
80784 }
80785
80786 static int
80787diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
80788index 3127ad5..159d880 100644
80789--- a/kernel/kallsyms.c
80790+++ b/kernel/kallsyms.c
80791@@ -11,6 +11,9 @@
80792 * Changed the compression method from stem compression to "table lookup"
80793 * compression (see scripts/kallsyms.c for a more complete description)
80794 */
80795+#ifdef CONFIG_GRKERNSEC_HIDESYM
80796+#define __INCLUDED_BY_HIDESYM 1
80797+#endif
80798 #include <linux/kallsyms.h>
80799 #include <linux/module.h>
80800 #include <linux/init.h>
80801@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
80802
80803 static inline int is_kernel_inittext(unsigned long addr)
80804 {
80805+ if (system_state != SYSTEM_BOOTING)
80806+ return 0;
80807+
80808 if (addr >= (unsigned long)_sinittext
80809 && addr <= (unsigned long)_einittext)
80810 return 1;
80811 return 0;
80812 }
80813
80814+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80815+#ifdef CONFIG_MODULES
80816+static inline int is_module_text(unsigned long addr)
80817+{
80818+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
80819+ return 1;
80820+
80821+ addr = ktla_ktva(addr);
80822+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
80823+}
80824+#else
80825+static inline int is_module_text(unsigned long addr)
80826+{
80827+ return 0;
80828+}
80829+#endif
80830+#endif
80831+
80832 static inline int is_kernel_text(unsigned long addr)
80833 {
80834 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
80835@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
80836
80837 static inline int is_kernel(unsigned long addr)
80838 {
80839+
80840+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80841+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
80842+ return 1;
80843+
80844+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
80845+#else
80846 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
80847+#endif
80848+
80849 return 1;
80850 return in_gate_area_no_mm(addr);
80851 }
80852
80853 static int is_ksym_addr(unsigned long addr)
80854 {
80855+
80856+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80857+ if (is_module_text(addr))
80858+ return 0;
80859+#endif
80860+
80861 if (all_var)
80862 return is_kernel(addr);
80863
80864@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
80865
80866 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
80867 {
80868- iter->name[0] = '\0';
80869 iter->nameoff = get_symbol_offset(new_pos);
80870 iter->pos = new_pos;
80871 }
80872@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
80873 {
80874 struct kallsym_iter *iter = m->private;
80875
80876+#ifdef CONFIG_GRKERNSEC_HIDESYM
80877+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
80878+ return 0;
80879+#endif
80880+
80881 /* Some debugging symbols have no name. Ignore them. */
80882 if (!iter->name[0])
80883 return 0;
80884@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
80885 */
80886 type = iter->exported ? toupper(iter->type) :
80887 tolower(iter->type);
80888+
80889 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
80890 type, iter->name, iter->module_name);
80891 } else
80892@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
80893 struct kallsym_iter *iter;
80894 int ret;
80895
80896- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
80897+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
80898 if (!iter)
80899 return -ENOMEM;
80900 reset_iter(iter, 0);
80901diff --git a/kernel/kcmp.c b/kernel/kcmp.c
80902index e30ac0f..3528cac 100644
80903--- a/kernel/kcmp.c
80904+++ b/kernel/kcmp.c
80905@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
80906 struct task_struct *task1, *task2;
80907 int ret;
80908
80909+#ifdef CONFIG_GRKERNSEC
80910+ return -ENOSYS;
80911+#endif
80912+
80913 rcu_read_lock();
80914
80915 /*
80916diff --git a/kernel/kexec.c b/kernel/kexec.c
80917index 59f7b55..4022f65 100644
80918--- a/kernel/kexec.c
80919+++ b/kernel/kexec.c
80920@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
80921 unsigned long flags)
80922 {
80923 struct compat_kexec_segment in;
80924- struct kexec_segment out, __user *ksegments;
80925+ struct kexec_segment out;
80926+ struct kexec_segment __user *ksegments;
80927 unsigned long i, result;
80928
80929 /* Don't allow clients that don't understand the native
80930diff --git a/kernel/kmod.c b/kernel/kmod.c
80931index 8241906..d625f2c 100644
80932--- a/kernel/kmod.c
80933+++ b/kernel/kmod.c
80934@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
80935 kfree(info->argv);
80936 }
80937
80938-static int call_modprobe(char *module_name, int wait)
80939+static int call_modprobe(char *module_name, char *module_param, int wait)
80940 {
80941 struct subprocess_info *info;
80942 static char *envp[] = {
80943@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
80944 NULL
80945 };
80946
80947- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
80948+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
80949 if (!argv)
80950 goto out;
80951
80952@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
80953 argv[1] = "-q";
80954 argv[2] = "--";
80955 argv[3] = module_name; /* check free_modprobe_argv() */
80956- argv[4] = NULL;
80957+ argv[4] = module_param;
80958+ argv[5] = NULL;
80959
80960 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
80961 NULL, free_modprobe_argv, NULL);
80962@@ -129,9 +130,8 @@ out:
80963 * If module auto-loading support is disabled then this function
80964 * becomes a no-operation.
80965 */
80966-int __request_module(bool wait, const char *fmt, ...)
80967+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
80968 {
80969- va_list args;
80970 char module_name[MODULE_NAME_LEN];
80971 unsigned int max_modprobes;
80972 int ret;
80973@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
80974 */
80975 WARN_ON_ONCE(wait && current_is_async());
80976
80977- va_start(args, fmt);
80978- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
80979- va_end(args);
80980+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
80981 if (ret >= MODULE_NAME_LEN)
80982 return -ENAMETOOLONG;
80983
80984@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
80985 if (ret)
80986 return ret;
80987
80988+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80989+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
80990+ /* hack to workaround consolekit/udisks stupidity */
80991+ read_lock(&tasklist_lock);
80992+ if (!strcmp(current->comm, "mount") &&
80993+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
80994+ read_unlock(&tasklist_lock);
80995+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
80996+ return -EPERM;
80997+ }
80998+ read_unlock(&tasklist_lock);
80999+ }
81000+#endif
81001+
81002 /* If modprobe needs a service that is in a module, we get a recursive
81003 * loop. Limit the number of running kmod threads to max_threads/2 or
81004 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
81005@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
81006
81007 trace_module_request(module_name, wait, _RET_IP_);
81008
81009- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
81010+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
81011
81012 atomic_dec(&kmod_concurrent);
81013 return ret;
81014 }
81015+
81016+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
81017+{
81018+ va_list args;
81019+ int ret;
81020+
81021+ va_start(args, fmt);
81022+ ret = ____request_module(wait, module_param, fmt, args);
81023+ va_end(args);
81024+
81025+ return ret;
81026+}
81027+
81028+int __request_module(bool wait, const char *fmt, ...)
81029+{
81030+ va_list args;
81031+ int ret;
81032+
81033+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81034+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
81035+ char module_param[MODULE_NAME_LEN];
81036+
81037+ memset(module_param, 0, sizeof(module_param));
81038+
81039+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
81040+
81041+ va_start(args, fmt);
81042+ ret = ____request_module(wait, module_param, fmt, args);
81043+ va_end(args);
81044+
81045+ return ret;
81046+ }
81047+#endif
81048+
81049+ va_start(args, fmt);
81050+ ret = ____request_module(wait, NULL, fmt, args);
81051+ va_end(args);
81052+
81053+ return ret;
81054+}
81055+
81056 EXPORT_SYMBOL(__request_module);
81057 #endif /* CONFIG_MODULES */
81058
81059@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
81060 *
81061 * Thus the __user pointer cast is valid here.
81062 */
81063- sys_wait4(pid, (int __user *)&ret, 0, NULL);
81064+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
81065
81066 /*
81067 * If ret is 0, either ____call_usermodehelper failed and the
81068@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
81069 static int proc_cap_handler(struct ctl_table *table, int write,
81070 void __user *buffer, size_t *lenp, loff_t *ppos)
81071 {
81072- struct ctl_table t;
81073+ ctl_table_no_const t;
81074 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
81075 kernel_cap_t new_cap;
81076 int err, i;
81077diff --git a/kernel/kprobes.c b/kernel/kprobes.c
81078index bddf3b2..233bf40 100644
81079--- a/kernel/kprobes.c
81080+++ b/kernel/kprobes.c
81081@@ -31,6 +31,9 @@
81082 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
81083 * <prasanna@in.ibm.com> added function-return probes.
81084 */
81085+#ifdef CONFIG_GRKERNSEC_HIDESYM
81086+#define __INCLUDED_BY_HIDESYM 1
81087+#endif
81088 #include <linux/kprobes.h>
81089 #include <linux/hash.h>
81090 #include <linux/init.h>
81091@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
81092 * kernel image and loaded module images reside. This is required
81093 * so x86_64 can correctly handle the %rip-relative fixups.
81094 */
81095- kip->insns = module_alloc(PAGE_SIZE);
81096+ kip->insns = module_alloc_exec(PAGE_SIZE);
81097 if (!kip->insns) {
81098 kfree(kip);
81099 return NULL;
81100@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
81101 */
81102 if (!list_is_singular(&kip->list)) {
81103 list_del(&kip->list);
81104- module_free(NULL, kip->insns);
81105+ module_free_exec(NULL, kip->insns);
81106 kfree(kip);
81107 }
81108 return 1;
81109@@ -2083,7 +2086,7 @@ static int __init init_kprobes(void)
81110 {
81111 int i, err = 0;
81112 unsigned long offset = 0, size = 0;
81113- char *modname, namebuf[128];
81114+ char *modname, namebuf[KSYM_NAME_LEN];
81115 const char *symbol_name;
81116 void *addr;
81117 struct kprobe_blackpoint *kb;
81118@@ -2168,11 +2171,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
81119 kprobe_type = "k";
81120
81121 if (sym)
81122- seq_printf(pi, "%p %s %s+0x%x %s ",
81123+ seq_printf(pi, "%pK %s %s+0x%x %s ",
81124 p->addr, kprobe_type, sym, offset,
81125 (modname ? modname : " "));
81126 else
81127- seq_printf(pi, "%p %s %p ",
81128+ seq_printf(pi, "%pK %s %pK ",
81129 p->addr, kprobe_type, p->addr);
81130
81131 if (!pp)
81132@@ -2209,7 +2212,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
81133 const char *sym = NULL;
81134 unsigned int i = *(loff_t *) v;
81135 unsigned long offset = 0;
81136- char *modname, namebuf[128];
81137+ char *modname, namebuf[KSYM_NAME_LEN];
81138
81139 head = &kprobe_table[i];
81140 preempt_disable();
81141diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
81142index 6ada93c..dce7d5d 100644
81143--- a/kernel/ksysfs.c
81144+++ b/kernel/ksysfs.c
81145@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
81146 {
81147 if (count+1 > UEVENT_HELPER_PATH_LEN)
81148 return -ENOENT;
81149+ if (!capable(CAP_SYS_ADMIN))
81150+ return -EPERM;
81151 memcpy(uevent_helper, buf, count);
81152 uevent_helper[count] = '\0';
81153 if (count && uevent_helper[count-1] == '\n')
81154@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
81155 return count;
81156 }
81157
81158-static struct bin_attribute notes_attr = {
81159+static bin_attribute_no_const notes_attr __read_only = {
81160 .attr = {
81161 .name = "notes",
81162 .mode = S_IRUGO,
81163diff --git a/kernel/lockdep.c b/kernel/lockdep.c
81164index 1f3186b..bb7dbc6 100644
81165--- a/kernel/lockdep.c
81166+++ b/kernel/lockdep.c
81167@@ -596,6 +596,10 @@ static int static_obj(void *obj)
81168 end = (unsigned long) &_end,
81169 addr = (unsigned long) obj;
81170
81171+#ifdef CONFIG_PAX_KERNEXEC
81172+ start = ktla_ktva(start);
81173+#endif
81174+
81175 /*
81176 * static variable?
81177 */
81178@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
81179 if (!static_obj(lock->key)) {
81180 debug_locks_off();
81181 printk("INFO: trying to register non-static key.\n");
81182+ printk("lock:%pS key:%pS.\n", lock, lock->key);
81183 printk("the code is fine but needs lockdep annotation.\n");
81184 printk("turning off the locking correctness validator.\n");
81185 dump_stack();
81186@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
81187 if (!class)
81188 return 0;
81189 }
81190- atomic_inc((atomic_t *)&class->ops);
81191+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
81192 if (very_verbose(class)) {
81193 printk("\nacquire class [%p] %s", class->key, class->name);
81194 if (class->name_version > 1)
81195diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
81196index b2c71c5..7b88d63 100644
81197--- a/kernel/lockdep_proc.c
81198+++ b/kernel/lockdep_proc.c
81199@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
81200 return 0;
81201 }
81202
81203- seq_printf(m, "%p", class->key);
81204+ seq_printf(m, "%pK", class->key);
81205 #ifdef CONFIG_DEBUG_LOCKDEP
81206 seq_printf(m, " OPS:%8ld", class->ops);
81207 #endif
81208@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
81209
81210 list_for_each_entry(entry, &class->locks_after, entry) {
81211 if (entry->distance == 1) {
81212- seq_printf(m, " -> [%p] ", entry->class->key);
81213+ seq_printf(m, " -> [%pK] ", entry->class->key);
81214 print_name(m, entry->class);
81215 seq_puts(m, "\n");
81216 }
81217@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
81218 if (!class->key)
81219 continue;
81220
81221- seq_printf(m, "[%p] ", class->key);
81222+ seq_printf(m, "[%pK] ", class->key);
81223 print_name(m, class);
81224 seq_puts(m, "\n");
81225 }
81226@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
81227 if (!i)
81228 seq_line(m, '-', 40-namelen, namelen);
81229
81230- snprintf(ip, sizeof(ip), "[<%p>]",
81231+ snprintf(ip, sizeof(ip), "[<%pK>]",
81232 (void *)class->contention_point[i]);
81233 seq_printf(m, "%40s %14lu %29s %pS\n",
81234 name, stats->contention_point[i],
81235@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
81236 if (!i)
81237 seq_line(m, '-', 40-namelen, namelen);
81238
81239- snprintf(ip, sizeof(ip), "[<%p>]",
81240+ snprintf(ip, sizeof(ip), "[<%pK>]",
81241 (void *)class->contending_point[i]);
81242 seq_printf(m, "%40s %14lu %29s %pS\n",
81243 name, stats->contending_point[i],
81244diff --git a/kernel/module.c b/kernel/module.c
81245index fa53db8..6f17200 100644
81246--- a/kernel/module.c
81247+++ b/kernel/module.c
81248@@ -61,6 +61,7 @@
81249 #include <linux/pfn.h>
81250 #include <linux/bsearch.h>
81251 #include <linux/fips.h>
81252+#include <linux/grsecurity.h>
81253 #include <uapi/linux/module.h>
81254 #include "module-internal.h"
81255
81256@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
81257
81258 /* Bounds of module allocation, for speeding __module_address.
81259 * Protected by module_mutex. */
81260-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
81261+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
81262+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
81263
81264 int register_module_notifier(struct notifier_block * nb)
81265 {
81266@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
81267 return true;
81268
81269 list_for_each_entry_rcu(mod, &modules, list) {
81270- struct symsearch arr[] = {
81271+ struct symsearch modarr[] = {
81272 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
81273 NOT_GPL_ONLY, false },
81274 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
81275@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
81276 if (mod->state == MODULE_STATE_UNFORMED)
81277 continue;
81278
81279- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
81280+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
81281 return true;
81282 }
81283 return false;
81284@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
81285 static int percpu_modalloc(struct module *mod,
81286 unsigned long size, unsigned long align)
81287 {
81288- if (align > PAGE_SIZE) {
81289+ if (align-1 >= PAGE_SIZE) {
81290 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
81291 mod->name, align, PAGE_SIZE);
81292 align = PAGE_SIZE;
81293@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
81294 static ssize_t show_coresize(struct module_attribute *mattr,
81295 struct module_kobject *mk, char *buffer)
81296 {
81297- return sprintf(buffer, "%u\n", mk->mod->core_size);
81298+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
81299 }
81300
81301 static struct module_attribute modinfo_coresize =
81302@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
81303 static ssize_t show_initsize(struct module_attribute *mattr,
81304 struct module_kobject *mk, char *buffer)
81305 {
81306- return sprintf(buffer, "%u\n", mk->mod->init_size);
81307+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
81308 }
81309
81310 static struct module_attribute modinfo_initsize =
81311@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
81312 */
81313 #ifdef CONFIG_SYSFS
81314
81315-#ifdef CONFIG_KALLSYMS
81316+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
81317 static inline bool sect_empty(const Elf_Shdr *sect)
81318 {
81319 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
81320@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
81321 {
81322 unsigned int notes, loaded, i;
81323 struct module_notes_attrs *notes_attrs;
81324- struct bin_attribute *nattr;
81325+ bin_attribute_no_const *nattr;
81326
81327 /* failed to create section attributes, so can't create notes */
81328 if (!mod->sect_attrs)
81329@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
81330 static int module_add_modinfo_attrs(struct module *mod)
81331 {
81332 struct module_attribute *attr;
81333- struct module_attribute *temp_attr;
81334+ module_attribute_no_const *temp_attr;
81335 int error = 0;
81336 int i;
81337
81338@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
81339
81340 static void unset_module_core_ro_nx(struct module *mod)
81341 {
81342- set_page_attributes(mod->module_core + mod->core_text_size,
81343- mod->module_core + mod->core_size,
81344+ set_page_attributes(mod->module_core_rw,
81345+ mod->module_core_rw + mod->core_size_rw,
81346 set_memory_x);
81347- set_page_attributes(mod->module_core,
81348- mod->module_core + mod->core_ro_size,
81349+ set_page_attributes(mod->module_core_rx,
81350+ mod->module_core_rx + mod->core_size_rx,
81351 set_memory_rw);
81352 }
81353
81354 static void unset_module_init_ro_nx(struct module *mod)
81355 {
81356- set_page_attributes(mod->module_init + mod->init_text_size,
81357- mod->module_init + mod->init_size,
81358+ set_page_attributes(mod->module_init_rw,
81359+ mod->module_init_rw + mod->init_size_rw,
81360 set_memory_x);
81361- set_page_attributes(mod->module_init,
81362- mod->module_init + mod->init_ro_size,
81363+ set_page_attributes(mod->module_init_rx,
81364+ mod->module_init_rx + mod->init_size_rx,
81365 set_memory_rw);
81366 }
81367
81368@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
81369 list_for_each_entry_rcu(mod, &modules, list) {
81370 if (mod->state == MODULE_STATE_UNFORMED)
81371 continue;
81372- if ((mod->module_core) && (mod->core_text_size)) {
81373- set_page_attributes(mod->module_core,
81374- mod->module_core + mod->core_text_size,
81375+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
81376+ set_page_attributes(mod->module_core_rx,
81377+ mod->module_core_rx + mod->core_size_rx,
81378 set_memory_rw);
81379 }
81380- if ((mod->module_init) && (mod->init_text_size)) {
81381- set_page_attributes(mod->module_init,
81382- mod->module_init + mod->init_text_size,
81383+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
81384+ set_page_attributes(mod->module_init_rx,
81385+ mod->module_init_rx + mod->init_size_rx,
81386 set_memory_rw);
81387 }
81388 }
81389@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
81390 list_for_each_entry_rcu(mod, &modules, list) {
81391 if (mod->state == MODULE_STATE_UNFORMED)
81392 continue;
81393- if ((mod->module_core) && (mod->core_text_size)) {
81394- set_page_attributes(mod->module_core,
81395- mod->module_core + mod->core_text_size,
81396+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
81397+ set_page_attributes(mod->module_core_rx,
81398+ mod->module_core_rx + mod->core_size_rx,
81399 set_memory_ro);
81400 }
81401- if ((mod->module_init) && (mod->init_text_size)) {
81402- set_page_attributes(mod->module_init,
81403- mod->module_init + mod->init_text_size,
81404+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
81405+ set_page_attributes(mod->module_init_rx,
81406+ mod->module_init_rx + mod->init_size_rx,
81407 set_memory_ro);
81408 }
81409 }
81410@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
81411
81412 /* This may be NULL, but that's OK */
81413 unset_module_init_ro_nx(mod);
81414- module_free(mod, mod->module_init);
81415+ module_free(mod, mod->module_init_rw);
81416+ module_free_exec(mod, mod->module_init_rx);
81417 kfree(mod->args);
81418 percpu_modfree(mod);
81419
81420 /* Free lock-classes: */
81421- lockdep_free_key_range(mod->module_core, mod->core_size);
81422+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
81423+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
81424
81425 /* Finally, free the core (containing the module structure) */
81426 unset_module_core_ro_nx(mod);
81427- module_free(mod, mod->module_core);
81428+ module_free_exec(mod, mod->module_core_rx);
81429+ module_free(mod, mod->module_core_rw);
81430
81431 #ifdef CONFIG_MPU
81432 update_protections(current->mm);
81433@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81434 int ret = 0;
81435 const struct kernel_symbol *ksym;
81436
81437+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81438+ int is_fs_load = 0;
81439+ int register_filesystem_found = 0;
81440+ char *p;
81441+
81442+ p = strstr(mod->args, "grsec_modharden_fs");
81443+ if (p) {
81444+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
81445+ /* copy \0 as well */
81446+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
81447+ is_fs_load = 1;
81448+ }
81449+#endif
81450+
81451 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
81452 const char *name = info->strtab + sym[i].st_name;
81453
81454+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81455+ /* it's a real shame this will never get ripped and copied
81456+ upstream! ;(
81457+ */
81458+ if (is_fs_load && !strcmp(name, "register_filesystem"))
81459+ register_filesystem_found = 1;
81460+#endif
81461+
81462 switch (sym[i].st_shndx) {
81463 case SHN_COMMON:
81464 /* We compiled with -fno-common. These are not
81465@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81466 ksym = resolve_symbol_wait(mod, info, name);
81467 /* Ok if resolved. */
81468 if (ksym && !IS_ERR(ksym)) {
81469+ pax_open_kernel();
81470 sym[i].st_value = ksym->value;
81471+ pax_close_kernel();
81472 break;
81473 }
81474
81475@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81476 secbase = (unsigned long)mod_percpu(mod);
81477 else
81478 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
81479+ pax_open_kernel();
81480 sym[i].st_value += secbase;
81481+ pax_close_kernel();
81482 break;
81483 }
81484 }
81485
81486+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81487+ if (is_fs_load && !register_filesystem_found) {
81488+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
81489+ ret = -EPERM;
81490+ }
81491+#endif
81492+
81493 return ret;
81494 }
81495
81496@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
81497 || s->sh_entsize != ~0UL
81498 || strstarts(sname, ".init"))
81499 continue;
81500- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
81501+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
81502+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
81503+ else
81504+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
81505 pr_debug("\t%s\n", sname);
81506 }
81507- switch (m) {
81508- case 0: /* executable */
81509- mod->core_size = debug_align(mod->core_size);
81510- mod->core_text_size = mod->core_size;
81511- break;
81512- case 1: /* RO: text and ro-data */
81513- mod->core_size = debug_align(mod->core_size);
81514- mod->core_ro_size = mod->core_size;
81515- break;
81516- case 3: /* whole core */
81517- mod->core_size = debug_align(mod->core_size);
81518- break;
81519- }
81520 }
81521
81522 pr_debug("Init section allocation order:\n");
81523@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
81524 || s->sh_entsize != ~0UL
81525 || !strstarts(sname, ".init"))
81526 continue;
81527- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
81528- | INIT_OFFSET_MASK);
81529+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
81530+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
81531+ else
81532+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
81533+ s->sh_entsize |= INIT_OFFSET_MASK;
81534 pr_debug("\t%s\n", sname);
81535 }
81536- switch (m) {
81537- case 0: /* executable */
81538- mod->init_size = debug_align(mod->init_size);
81539- mod->init_text_size = mod->init_size;
81540- break;
81541- case 1: /* RO: text and ro-data */
81542- mod->init_size = debug_align(mod->init_size);
81543- mod->init_ro_size = mod->init_size;
81544- break;
81545- case 3: /* whole init */
81546- mod->init_size = debug_align(mod->init_size);
81547- break;
81548- }
81549 }
81550 }
81551
81552@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
81553
81554 /* Put symbol section at end of init part of module. */
81555 symsect->sh_flags |= SHF_ALLOC;
81556- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
81557+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
81558 info->index.sym) | INIT_OFFSET_MASK;
81559 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
81560
81561@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
81562 }
81563
81564 /* Append room for core symbols at end of core part. */
81565- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
81566- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
81567- mod->core_size += strtab_size;
81568+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
81569+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
81570+ mod->core_size_rx += strtab_size;
81571
81572 /* Put string table section at end of init part of module. */
81573 strsect->sh_flags |= SHF_ALLOC;
81574- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
81575+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
81576 info->index.str) | INIT_OFFSET_MASK;
81577 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
81578 }
81579@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
81580 /* Make sure we get permanent strtab: don't use info->strtab. */
81581 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
81582
81583+ pax_open_kernel();
81584+
81585 /* Set types up while we still have access to sections. */
81586 for (i = 0; i < mod->num_symtab; i++)
81587 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
81588
81589- mod->core_symtab = dst = mod->module_core + info->symoffs;
81590- mod->core_strtab = s = mod->module_core + info->stroffs;
81591+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
81592+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
81593 src = mod->symtab;
81594 for (ndst = i = 0; i < mod->num_symtab; i++) {
81595 if (i == 0 ||
81596@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
81597 }
81598 }
81599 mod->core_num_syms = ndst;
81600+
81601+ pax_close_kernel();
81602 }
81603 #else
81604 static inline void layout_symtab(struct module *mod, struct load_info *info)
81605@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
81606 return vmalloc_exec(size);
81607 }
81608
81609-static void *module_alloc_update_bounds(unsigned long size)
81610+static void *module_alloc_update_bounds_rw(unsigned long size)
81611 {
81612 void *ret = module_alloc(size);
81613
81614 if (ret) {
81615 mutex_lock(&module_mutex);
81616 /* Update module bounds. */
81617- if ((unsigned long)ret < module_addr_min)
81618- module_addr_min = (unsigned long)ret;
81619- if ((unsigned long)ret + size > module_addr_max)
81620- module_addr_max = (unsigned long)ret + size;
81621+ if ((unsigned long)ret < module_addr_min_rw)
81622+ module_addr_min_rw = (unsigned long)ret;
81623+ if ((unsigned long)ret + size > module_addr_max_rw)
81624+ module_addr_max_rw = (unsigned long)ret + size;
81625+ mutex_unlock(&module_mutex);
81626+ }
81627+ return ret;
81628+}
81629+
81630+static void *module_alloc_update_bounds_rx(unsigned long size)
81631+{
81632+ void *ret = module_alloc_exec(size);
81633+
81634+ if (ret) {
81635+ mutex_lock(&module_mutex);
81636+ /* Update module bounds. */
81637+ if ((unsigned long)ret < module_addr_min_rx)
81638+ module_addr_min_rx = (unsigned long)ret;
81639+ if ((unsigned long)ret + size > module_addr_max_rx)
81640+ module_addr_max_rx = (unsigned long)ret + size;
81641 mutex_unlock(&module_mutex);
81642 }
81643 return ret;
81644@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
81645 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
81646 {
81647 const char *modmagic = get_modinfo(info, "vermagic");
81648+ const char *license = get_modinfo(info, "license");
81649 int err;
81650
81651+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
81652+ if (!license || !license_is_gpl_compatible(license))
81653+ return -ENOEXEC;
81654+#endif
81655+
81656 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
81657 modmagic = NULL;
81658
81659@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
81660 }
81661
81662 /* Set up license info based on the info section */
81663- set_license(mod, get_modinfo(info, "license"));
81664+ set_license(mod, license);
81665
81666 return 0;
81667 }
81668@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
81669 void *ptr;
81670
81671 /* Do the allocs. */
81672- ptr = module_alloc_update_bounds(mod->core_size);
81673+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
81674 /*
81675 * The pointer to this block is stored in the module structure
81676 * which is inside the block. Just mark it as not being a
81677@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
81678 if (!ptr)
81679 return -ENOMEM;
81680
81681- memset(ptr, 0, mod->core_size);
81682- mod->module_core = ptr;
81683+ memset(ptr, 0, mod->core_size_rw);
81684+ mod->module_core_rw = ptr;
81685
81686- if (mod->init_size) {
81687- ptr = module_alloc_update_bounds(mod->init_size);
81688+ if (mod->init_size_rw) {
81689+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
81690 /*
81691 * The pointer to this block is stored in the module structure
81692 * which is inside the block. This block doesn't need to be
81693@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
81694 */
81695 kmemleak_ignore(ptr);
81696 if (!ptr) {
81697- module_free(mod, mod->module_core);
81698+ module_free(mod, mod->module_core_rw);
81699 return -ENOMEM;
81700 }
81701- memset(ptr, 0, mod->init_size);
81702- mod->module_init = ptr;
81703+ memset(ptr, 0, mod->init_size_rw);
81704+ mod->module_init_rw = ptr;
81705 } else
81706- mod->module_init = NULL;
81707+ mod->module_init_rw = NULL;
81708+
81709+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
81710+ kmemleak_not_leak(ptr);
81711+ if (!ptr) {
81712+ if (mod->module_init_rw)
81713+ module_free(mod, mod->module_init_rw);
81714+ module_free(mod, mod->module_core_rw);
81715+ return -ENOMEM;
81716+ }
81717+
81718+ pax_open_kernel();
81719+ memset(ptr, 0, mod->core_size_rx);
81720+ pax_close_kernel();
81721+ mod->module_core_rx = ptr;
81722+
81723+ if (mod->init_size_rx) {
81724+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
81725+ kmemleak_ignore(ptr);
81726+ if (!ptr && mod->init_size_rx) {
81727+ module_free_exec(mod, mod->module_core_rx);
81728+ if (mod->module_init_rw)
81729+ module_free(mod, mod->module_init_rw);
81730+ module_free(mod, mod->module_core_rw);
81731+ return -ENOMEM;
81732+ }
81733+
81734+ pax_open_kernel();
81735+ memset(ptr, 0, mod->init_size_rx);
81736+ pax_close_kernel();
81737+ mod->module_init_rx = ptr;
81738+ } else
81739+ mod->module_init_rx = NULL;
81740
81741 /* Transfer each section which specifies SHF_ALLOC */
81742 pr_debug("final section addresses:\n");
81743@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
81744 if (!(shdr->sh_flags & SHF_ALLOC))
81745 continue;
81746
81747- if (shdr->sh_entsize & INIT_OFFSET_MASK)
81748- dest = mod->module_init
81749- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81750- else
81751- dest = mod->module_core + shdr->sh_entsize;
81752+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
81753+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
81754+ dest = mod->module_init_rw
81755+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81756+ else
81757+ dest = mod->module_init_rx
81758+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81759+ } else {
81760+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
81761+ dest = mod->module_core_rw + shdr->sh_entsize;
81762+ else
81763+ dest = mod->module_core_rx + shdr->sh_entsize;
81764+ }
81765+
81766+ if (shdr->sh_type != SHT_NOBITS) {
81767+
81768+#ifdef CONFIG_PAX_KERNEXEC
81769+#ifdef CONFIG_X86_64
81770+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
81771+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
81772+#endif
81773+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
81774+ pax_open_kernel();
81775+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
81776+ pax_close_kernel();
81777+ } else
81778+#endif
81779
81780- if (shdr->sh_type != SHT_NOBITS)
81781 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
81782+ }
81783 /* Update sh_addr to point to copy in image. */
81784- shdr->sh_addr = (unsigned long)dest;
81785+
81786+#ifdef CONFIG_PAX_KERNEXEC
81787+ if (shdr->sh_flags & SHF_EXECINSTR)
81788+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
81789+ else
81790+#endif
81791+
81792+ shdr->sh_addr = (unsigned long)dest;
81793 pr_debug("\t0x%lx %s\n",
81794 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
81795 }
81796@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
81797 * Do it before processing of module parameters, so the module
81798 * can provide parameter accessor functions of its own.
81799 */
81800- if (mod->module_init)
81801- flush_icache_range((unsigned long)mod->module_init,
81802- (unsigned long)mod->module_init
81803- + mod->init_size);
81804- flush_icache_range((unsigned long)mod->module_core,
81805- (unsigned long)mod->module_core + mod->core_size);
81806+ if (mod->module_init_rx)
81807+ flush_icache_range((unsigned long)mod->module_init_rx,
81808+ (unsigned long)mod->module_init_rx
81809+ + mod->init_size_rx);
81810+ flush_icache_range((unsigned long)mod->module_core_rx,
81811+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
81812
81813 set_fs(old_fs);
81814 }
81815@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
81816 static void module_deallocate(struct module *mod, struct load_info *info)
81817 {
81818 percpu_modfree(mod);
81819- module_free(mod, mod->module_init);
81820- module_free(mod, mod->module_core);
81821+ module_free_exec(mod, mod->module_init_rx);
81822+ module_free_exec(mod, mod->module_core_rx);
81823+ module_free(mod, mod->module_init_rw);
81824+ module_free(mod, mod->module_core_rw);
81825 }
81826
81827 int __weak module_finalize(const Elf_Ehdr *hdr,
81828@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
81829 static int post_relocation(struct module *mod, const struct load_info *info)
81830 {
81831 /* Sort exception table now relocations are done. */
81832+ pax_open_kernel();
81833 sort_extable(mod->extable, mod->extable + mod->num_exentries);
81834+ pax_close_kernel();
81835
81836 /* Copy relocated percpu area over. */
81837 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
81838@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
81839 MODULE_STATE_COMING, mod);
81840
81841 /* Set RO and NX regions for core */
81842- set_section_ro_nx(mod->module_core,
81843- mod->core_text_size,
81844- mod->core_ro_size,
81845- mod->core_size);
81846+ set_section_ro_nx(mod->module_core_rx,
81847+ mod->core_size_rx,
81848+ mod->core_size_rx,
81849+ mod->core_size_rx);
81850
81851 /* Set RO and NX regions for init */
81852- set_section_ro_nx(mod->module_init,
81853- mod->init_text_size,
81854- mod->init_ro_size,
81855- mod->init_size);
81856+ set_section_ro_nx(mod->module_init_rx,
81857+ mod->init_size_rx,
81858+ mod->init_size_rx,
81859+ mod->init_size_rx);
81860
81861 do_mod_ctors(mod);
81862 /* Start the module */
81863@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
81864 mod->strtab = mod->core_strtab;
81865 #endif
81866 unset_module_init_ro_nx(mod);
81867- module_free(mod, mod->module_init);
81868- mod->module_init = NULL;
81869- mod->init_size = 0;
81870- mod->init_ro_size = 0;
81871- mod->init_text_size = 0;
81872+ module_free(mod, mod->module_init_rw);
81873+ module_free_exec(mod, mod->module_init_rx);
81874+ mod->module_init_rw = NULL;
81875+ mod->module_init_rx = NULL;
81876+ mod->init_size_rw = 0;
81877+ mod->init_size_rx = 0;
81878 mutex_unlock(&module_mutex);
81879 wake_up_all(&module_wq);
81880
81881@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
81882 if (err)
81883 goto free_unload;
81884
81885+ /* Now copy in args */
81886+ mod->args = strndup_user(uargs, ~0UL >> 1);
81887+ if (IS_ERR(mod->args)) {
81888+ err = PTR_ERR(mod->args);
81889+ goto free_unload;
81890+ }
81891+
81892 /* Set up MODINFO_ATTR fields */
81893 setup_modinfo(mod, info);
81894
81895+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81896+ {
81897+ char *p, *p2;
81898+
81899+ if (strstr(mod->args, "grsec_modharden_netdev")) {
81900+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
81901+ err = -EPERM;
81902+ goto free_modinfo;
81903+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
81904+ p += sizeof("grsec_modharden_normal") - 1;
81905+ p2 = strstr(p, "_");
81906+ if (p2) {
81907+ *p2 = '\0';
81908+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
81909+ *p2 = '_';
81910+ }
81911+ err = -EPERM;
81912+ goto free_modinfo;
81913+ }
81914+ }
81915+#endif
81916+
81917 /* Fix up syms, so that st_value is a pointer to location. */
81918 err = simplify_symbols(mod, info);
81919 if (err < 0)
81920@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
81921
81922 flush_module_icache(mod);
81923
81924- /* Now copy in args */
81925- mod->args = strndup_user(uargs, ~0UL >> 1);
81926- if (IS_ERR(mod->args)) {
81927- err = PTR_ERR(mod->args);
81928- goto free_arch_cleanup;
81929- }
81930-
81931 dynamic_debug_setup(info->debug, info->num_debug);
81932
81933 /* Finally it's fully formed, ready to start executing. */
81934@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
81935 ddebug_cleanup:
81936 dynamic_debug_remove(info->debug);
81937 synchronize_sched();
81938- kfree(mod->args);
81939- free_arch_cleanup:
81940 module_arch_cleanup(mod);
81941 free_modinfo:
81942 free_modinfo(mod);
81943+ kfree(mod->args);
81944 free_unload:
81945 module_unload_free(mod);
81946 unlink_mod:
81947@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
81948 unsigned long nextval;
81949
81950 /* At worse, next value is at end of module */
81951- if (within_module_init(addr, mod))
81952- nextval = (unsigned long)mod->module_init+mod->init_text_size;
81953+ if (within_module_init_rx(addr, mod))
81954+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
81955+ else if (within_module_init_rw(addr, mod))
81956+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
81957+ else if (within_module_core_rx(addr, mod))
81958+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
81959+ else if (within_module_core_rw(addr, mod))
81960+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
81961 else
81962- nextval = (unsigned long)mod->module_core+mod->core_text_size;
81963+ return NULL;
81964
81965 /* Scan for closest preceding symbol, and next symbol. (ELF
81966 starts real symbols at 1). */
81967@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
81968 return 0;
81969
81970 seq_printf(m, "%s %u",
81971- mod->name, mod->init_size + mod->core_size);
81972+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
81973 print_unload_info(m, mod);
81974
81975 /* Informative for users. */
81976@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
81977 mod->state == MODULE_STATE_COMING ? "Loading":
81978 "Live");
81979 /* Used by oprofile and other similar tools. */
81980- seq_printf(m, " 0x%pK", mod->module_core);
81981+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
81982
81983 /* Taints info */
81984 if (mod->taints)
81985@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
81986
81987 static int __init proc_modules_init(void)
81988 {
81989+#ifndef CONFIG_GRKERNSEC_HIDESYM
81990+#ifdef CONFIG_GRKERNSEC_PROC_USER
81991+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
81992+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81993+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
81994+#else
81995 proc_create("modules", 0, NULL, &proc_modules_operations);
81996+#endif
81997+#else
81998+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
81999+#endif
82000 return 0;
82001 }
82002 module_init(proc_modules_init);
82003@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
82004 {
82005 struct module *mod;
82006
82007- if (addr < module_addr_min || addr > module_addr_max)
82008+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
82009+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
82010 return NULL;
82011
82012 list_for_each_entry_rcu(mod, &modules, list) {
82013 if (mod->state == MODULE_STATE_UNFORMED)
82014 continue;
82015- if (within_module_core(addr, mod)
82016- || within_module_init(addr, mod))
82017+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
82018 return mod;
82019 }
82020 return NULL;
82021@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
82022 */
82023 struct module *__module_text_address(unsigned long addr)
82024 {
82025- struct module *mod = __module_address(addr);
82026+ struct module *mod;
82027+
82028+#ifdef CONFIG_X86_32
82029+ addr = ktla_ktva(addr);
82030+#endif
82031+
82032+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
82033+ return NULL;
82034+
82035+ mod = __module_address(addr);
82036+
82037 if (mod) {
82038 /* Make sure it's within the text section. */
82039- if (!within(addr, mod->module_init, mod->init_text_size)
82040- && !within(addr, mod->module_core, mod->core_text_size))
82041+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
82042 mod = NULL;
82043 }
82044 return mod;
82045diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
82046index 7e3443f..b2a1e6b 100644
82047--- a/kernel/mutex-debug.c
82048+++ b/kernel/mutex-debug.c
82049@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
82050 }
82051
82052 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
82053- struct thread_info *ti)
82054+ struct task_struct *task)
82055 {
82056 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
82057
82058 /* Mark the current thread as blocked on the lock: */
82059- ti->task->blocked_on = waiter;
82060+ task->blocked_on = waiter;
82061 }
82062
82063 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
82064- struct thread_info *ti)
82065+ struct task_struct *task)
82066 {
82067 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
82068- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
82069- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
82070- ti->task->blocked_on = NULL;
82071+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
82072+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
82073+ task->blocked_on = NULL;
82074
82075 list_del_init(&waiter->list);
82076 waiter->task = NULL;
82077diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
82078index 0799fd3..d06ae3b 100644
82079--- a/kernel/mutex-debug.h
82080+++ b/kernel/mutex-debug.h
82081@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
82082 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
82083 extern void debug_mutex_add_waiter(struct mutex *lock,
82084 struct mutex_waiter *waiter,
82085- struct thread_info *ti);
82086+ struct task_struct *task);
82087 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
82088- struct thread_info *ti);
82089+ struct task_struct *task);
82090 extern void debug_mutex_unlock(struct mutex *lock);
82091 extern void debug_mutex_init(struct mutex *lock, const char *name,
82092 struct lock_class_key *key);
82093diff --git a/kernel/mutex.c b/kernel/mutex.c
82094index ad53a66..f1bf8bc 100644
82095--- a/kernel/mutex.c
82096+++ b/kernel/mutex.c
82097@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
82098 node->locked = 1;
82099 return;
82100 }
82101- ACCESS_ONCE(prev->next) = node;
82102+ ACCESS_ONCE_RW(prev->next) = node;
82103 smp_wmb();
82104 /* Wait until the lock holder passes the lock down */
82105 while (!ACCESS_ONCE(node->locked))
82106@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
82107 while (!(next = ACCESS_ONCE(node->next)))
82108 arch_mutex_cpu_relax();
82109 }
82110- ACCESS_ONCE(next->locked) = 1;
82111+ ACCESS_ONCE_RW(next->locked) = 1;
82112 smp_wmb();
82113 }
82114
82115@@ -341,7 +341,7 @@ slowpath:
82116 spin_lock_mutex(&lock->wait_lock, flags);
82117
82118 debug_mutex_lock_common(lock, &waiter);
82119- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
82120+ debug_mutex_add_waiter(lock, &waiter, task);
82121
82122 /* add waiting tasks to the end of the waitqueue (FIFO): */
82123 list_add_tail(&waiter.list, &lock->wait_list);
82124@@ -371,8 +371,7 @@ slowpath:
82125 * TASK_UNINTERRUPTIBLE case.)
82126 */
82127 if (unlikely(signal_pending_state(state, task))) {
82128- mutex_remove_waiter(lock, &waiter,
82129- task_thread_info(task));
82130+ mutex_remove_waiter(lock, &waiter, task);
82131 mutex_release(&lock->dep_map, 1, ip);
82132 spin_unlock_mutex(&lock->wait_lock, flags);
82133
82134@@ -391,7 +390,7 @@ slowpath:
82135 done:
82136 lock_acquired(&lock->dep_map, ip);
82137 /* got the lock - rejoice! */
82138- mutex_remove_waiter(lock, &waiter, current_thread_info());
82139+ mutex_remove_waiter(lock, &waiter, task);
82140 mutex_set_owner(lock);
82141
82142 /* set it to 0 if there are no waiters left: */
82143diff --git a/kernel/notifier.c b/kernel/notifier.c
82144index 2d5cc4c..d9ea600 100644
82145--- a/kernel/notifier.c
82146+++ b/kernel/notifier.c
82147@@ -5,6 +5,7 @@
82148 #include <linux/rcupdate.h>
82149 #include <linux/vmalloc.h>
82150 #include <linux/reboot.h>
82151+#include <linux/mm.h>
82152
82153 /*
82154 * Notifier list for kernel code which wants to be called
82155@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
82156 while ((*nl) != NULL) {
82157 if (n->priority > (*nl)->priority)
82158 break;
82159- nl = &((*nl)->next);
82160+ nl = (struct notifier_block **)&((*nl)->next);
82161 }
82162- n->next = *nl;
82163+ pax_open_kernel();
82164+ *(const void **)&n->next = *nl;
82165 rcu_assign_pointer(*nl, n);
82166+ pax_close_kernel();
82167 return 0;
82168 }
82169
82170@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
82171 return 0;
82172 if (n->priority > (*nl)->priority)
82173 break;
82174- nl = &((*nl)->next);
82175+ nl = (struct notifier_block **)&((*nl)->next);
82176 }
82177- n->next = *nl;
82178+ pax_open_kernel();
82179+ *(const void **)&n->next = *nl;
82180 rcu_assign_pointer(*nl, n);
82181+ pax_close_kernel();
82182 return 0;
82183 }
82184
82185@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
82186 {
82187 while ((*nl) != NULL) {
82188 if ((*nl) == n) {
82189+ pax_open_kernel();
82190 rcu_assign_pointer(*nl, n->next);
82191+ pax_close_kernel();
82192 return 0;
82193 }
82194- nl = &((*nl)->next);
82195+ nl = (struct notifier_block **)&((*nl)->next);
82196 }
82197 return -ENOENT;
82198 }
82199diff --git a/kernel/panic.c b/kernel/panic.c
82200index 167ec09..0dda5f9 100644
82201--- a/kernel/panic.c
82202+++ b/kernel/panic.c
82203@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
82204 unsigned taint, struct slowpath_args *args)
82205 {
82206 printk(KERN_WARNING "------------[ cut here ]------------\n");
82207- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
82208+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
82209
82210 if (args)
82211 vprintk(args->fmt, args->args);
82212@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
82213 */
82214 void __stack_chk_fail(void)
82215 {
82216- panic("stack-protector: Kernel stack is corrupted in: %p\n",
82217+ dump_stack();
82218+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
82219 __builtin_return_address(0));
82220 }
82221 EXPORT_SYMBOL(__stack_chk_fail);
82222diff --git a/kernel/pid.c b/kernel/pid.c
82223index 0db3e79..95b9dc2 100644
82224--- a/kernel/pid.c
82225+++ b/kernel/pid.c
82226@@ -33,6 +33,7 @@
82227 #include <linux/rculist.h>
82228 #include <linux/bootmem.h>
82229 #include <linux/hash.h>
82230+#include <linux/security.h>
82231 #include <linux/pid_namespace.h>
82232 #include <linux/init_task.h>
82233 #include <linux/syscalls.h>
82234@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
82235
82236 int pid_max = PID_MAX_DEFAULT;
82237
82238-#define RESERVED_PIDS 300
82239+#define RESERVED_PIDS 500
82240
82241 int pid_max_min = RESERVED_PIDS + 1;
82242 int pid_max_max = PID_MAX_LIMIT;
82243@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
82244 */
82245 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
82246 {
82247+ struct task_struct *task;
82248+
82249 rcu_lockdep_assert(rcu_read_lock_held(),
82250 "find_task_by_pid_ns() needs rcu_read_lock()"
82251 " protection");
82252- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
82253+
82254+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
82255+
82256+ if (gr_pid_is_chrooted(task))
82257+ return NULL;
82258+
82259+ return task;
82260 }
82261
82262 struct task_struct *find_task_by_vpid(pid_t vnr)
82263@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
82264 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
82265 }
82266
82267+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
82268+{
82269+ rcu_lockdep_assert(rcu_read_lock_held(),
82270+ "find_task_by_pid_ns() needs rcu_read_lock()"
82271+ " protection");
82272+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
82273+}
82274+
82275 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
82276 {
82277 struct pid *pid;
82278diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
82279index 6917e8e..9909aeb 100644
82280--- a/kernel/pid_namespace.c
82281+++ b/kernel/pid_namespace.c
82282@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
82283 void __user *buffer, size_t *lenp, loff_t *ppos)
82284 {
82285 struct pid_namespace *pid_ns = task_active_pid_ns(current);
82286- struct ctl_table tmp = *table;
82287+ ctl_table_no_const tmp = *table;
82288
82289 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
82290 return -EPERM;
82291diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
82292index 42670e9..8719c2f 100644
82293--- a/kernel/posix-cpu-timers.c
82294+++ b/kernel/posix-cpu-timers.c
82295@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
82296
82297 static __init int init_posix_cpu_timers(void)
82298 {
82299- struct k_clock process = {
82300+ static struct k_clock process = {
82301 .clock_getres = process_cpu_clock_getres,
82302 .clock_get = process_cpu_clock_get,
82303 .timer_create = process_cpu_timer_create,
82304 .nsleep = process_cpu_nsleep,
82305 .nsleep_restart = process_cpu_nsleep_restart,
82306 };
82307- struct k_clock thread = {
82308+ static struct k_clock thread = {
82309 .clock_getres = thread_cpu_clock_getres,
82310 .clock_get = thread_cpu_clock_get,
82311 .timer_create = thread_cpu_timer_create,
82312diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
82313index 424c2d4..679242f 100644
82314--- a/kernel/posix-timers.c
82315+++ b/kernel/posix-timers.c
82316@@ -43,6 +43,7 @@
82317 #include <linux/hash.h>
82318 #include <linux/posix-clock.h>
82319 #include <linux/posix-timers.h>
82320+#include <linux/grsecurity.h>
82321 #include <linux/syscalls.h>
82322 #include <linux/wait.h>
82323 #include <linux/workqueue.h>
82324@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
82325 * which we beg off on and pass to do_sys_settimeofday().
82326 */
82327
82328-static struct k_clock posix_clocks[MAX_CLOCKS];
82329+static struct k_clock *posix_clocks[MAX_CLOCKS];
82330
82331 /*
82332 * These ones are defined below.
82333@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
82334 */
82335 static __init int init_posix_timers(void)
82336 {
82337- struct k_clock clock_realtime = {
82338+ static struct k_clock clock_realtime = {
82339 .clock_getres = hrtimer_get_res,
82340 .clock_get = posix_clock_realtime_get,
82341 .clock_set = posix_clock_realtime_set,
82342@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
82343 .timer_get = common_timer_get,
82344 .timer_del = common_timer_del,
82345 };
82346- struct k_clock clock_monotonic = {
82347+ static struct k_clock clock_monotonic = {
82348 .clock_getres = hrtimer_get_res,
82349 .clock_get = posix_ktime_get_ts,
82350 .nsleep = common_nsleep,
82351@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
82352 .timer_get = common_timer_get,
82353 .timer_del = common_timer_del,
82354 };
82355- struct k_clock clock_monotonic_raw = {
82356+ static struct k_clock clock_monotonic_raw = {
82357 .clock_getres = hrtimer_get_res,
82358 .clock_get = posix_get_monotonic_raw,
82359 };
82360- struct k_clock clock_realtime_coarse = {
82361+ static struct k_clock clock_realtime_coarse = {
82362 .clock_getres = posix_get_coarse_res,
82363 .clock_get = posix_get_realtime_coarse,
82364 };
82365- struct k_clock clock_monotonic_coarse = {
82366+ static struct k_clock clock_monotonic_coarse = {
82367 .clock_getres = posix_get_coarse_res,
82368 .clock_get = posix_get_monotonic_coarse,
82369 };
82370- struct k_clock clock_tai = {
82371+ static struct k_clock clock_tai = {
82372 .clock_getres = hrtimer_get_res,
82373 .clock_get = posix_get_tai,
82374 .nsleep = common_nsleep,
82375@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
82376 .timer_get = common_timer_get,
82377 .timer_del = common_timer_del,
82378 };
82379- struct k_clock clock_boottime = {
82380+ static struct k_clock clock_boottime = {
82381 .clock_getres = hrtimer_get_res,
82382 .clock_get = posix_get_boottime,
82383 .nsleep = common_nsleep,
82384@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
82385 return;
82386 }
82387
82388- posix_clocks[clock_id] = *new_clock;
82389+ posix_clocks[clock_id] = new_clock;
82390 }
82391 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
82392
82393@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
82394 return (id & CLOCKFD_MASK) == CLOCKFD ?
82395 &clock_posix_dynamic : &clock_posix_cpu;
82396
82397- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
82398+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
82399 return NULL;
82400- return &posix_clocks[id];
82401+ return posix_clocks[id];
82402 }
82403
82404 static int common_timer_create(struct k_itimer *new_timer)
82405@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
82406 struct k_clock *kc = clockid_to_kclock(which_clock);
82407 struct k_itimer *new_timer;
82408 int error, new_timer_id;
82409- sigevent_t event;
82410+ sigevent_t event = { };
82411 int it_id_set = IT_ID_NOT_SET;
82412
82413 if (!kc)
82414@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
82415 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
82416 return -EFAULT;
82417
82418+ /* only the CLOCK_REALTIME clock can be set, all other clocks
82419+ have their clock_set fptr set to a nosettime dummy function
82420+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
82421+ call common_clock_set, which calls do_sys_settimeofday, which
82422+ we hook
82423+ */
82424+
82425 return kc->clock_set(which_clock, &new_tp);
82426 }
82427
82428diff --git a/kernel/power/process.c b/kernel/power/process.c
82429index 98088e0..aaf95c0 100644
82430--- a/kernel/power/process.c
82431+++ b/kernel/power/process.c
82432@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
82433 u64 elapsed_csecs64;
82434 unsigned int elapsed_csecs;
82435 bool wakeup = false;
82436+ bool timedout = false;
82437
82438 do_gettimeofday(&start);
82439
82440@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
82441
82442 while (true) {
82443 todo = 0;
82444+ if (time_after(jiffies, end_time))
82445+ timedout = true;
82446 read_lock(&tasklist_lock);
82447 do_each_thread(g, p) {
82448 if (p == current || !freeze_task(p))
82449 continue;
82450
82451- if (!freezer_should_skip(p))
82452+ if (!freezer_should_skip(p)) {
82453 todo++;
82454+ if (timedout) {
82455+ printk(KERN_ERR "Task refusing to freeze:\n");
82456+ sched_show_task(p);
82457+ }
82458+ }
82459 } while_each_thread(g, p);
82460 read_unlock(&tasklist_lock);
82461
82462@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
82463 todo += wq_busy;
82464 }
82465
82466- if (!todo || time_after(jiffies, end_time))
82467+ if (!todo || timedout)
82468 break;
82469
82470 if (pm_wakeup_pending()) {
82471diff --git a/kernel/printk.c b/kernel/printk.c
82472index d37d45c..ab918b3 100644
82473--- a/kernel/printk.c
82474+++ b/kernel/printk.c
82475@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
82476 if (from_file && type != SYSLOG_ACTION_OPEN)
82477 return 0;
82478
82479+#ifdef CONFIG_GRKERNSEC_DMESG
82480+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
82481+ return -EPERM;
82482+#endif
82483+
82484 if (syslog_action_restricted(type)) {
82485 if (capable(CAP_SYSLOG))
82486 return 0;
82487diff --git a/kernel/profile.c b/kernel/profile.c
82488index 0bf4007..6234708 100644
82489--- a/kernel/profile.c
82490+++ b/kernel/profile.c
82491@@ -37,7 +37,7 @@ struct profile_hit {
82492 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
82493 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
82494
82495-static atomic_t *prof_buffer;
82496+static atomic_unchecked_t *prof_buffer;
82497 static unsigned long prof_len, prof_shift;
82498
82499 int prof_on __read_mostly;
82500@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
82501 hits[i].pc = 0;
82502 continue;
82503 }
82504- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
82505+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
82506 hits[i].hits = hits[i].pc = 0;
82507 }
82508 }
82509@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
82510 * Add the current hit(s) and flush the write-queue out
82511 * to the global buffer:
82512 */
82513- atomic_add(nr_hits, &prof_buffer[pc]);
82514+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
82515 for (i = 0; i < NR_PROFILE_HIT; ++i) {
82516- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
82517+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
82518 hits[i].pc = hits[i].hits = 0;
82519 }
82520 out:
82521@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
82522 {
82523 unsigned long pc;
82524 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
82525- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
82526+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
82527 }
82528 #endif /* !CONFIG_SMP */
82529
82530@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
82531 return -EFAULT;
82532 buf++; p++; count--; read++;
82533 }
82534- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
82535+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
82536 if (copy_to_user(buf, (void *)pnt, count))
82537 return -EFAULT;
82538 read += count;
82539@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
82540 }
82541 #endif
82542 profile_discard_flip_buffers();
82543- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
82544+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
82545 return count;
82546 }
82547
82548diff --git a/kernel/ptrace.c b/kernel/ptrace.c
82549index 335a7ae..3bbbceb 100644
82550--- a/kernel/ptrace.c
82551+++ b/kernel/ptrace.c
82552@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
82553 if (seize)
82554 flags |= PT_SEIZED;
82555 rcu_read_lock();
82556- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
82557+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
82558 flags |= PT_PTRACE_CAP;
82559 rcu_read_unlock();
82560 task->ptrace = flags;
82561@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
82562 break;
82563 return -EIO;
82564 }
82565- if (copy_to_user(dst, buf, retval))
82566+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
82567 return -EFAULT;
82568 copied += retval;
82569 src += retval;
82570@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
82571 bool seized = child->ptrace & PT_SEIZED;
82572 int ret = -EIO;
82573 siginfo_t siginfo, *si;
82574- void __user *datavp = (void __user *) data;
82575+ void __user *datavp = (__force void __user *) data;
82576 unsigned long __user *datalp = datavp;
82577 unsigned long flags;
82578
82579@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
82580 goto out;
82581 }
82582
82583+ if (gr_handle_ptrace(child, request)) {
82584+ ret = -EPERM;
82585+ goto out_put_task_struct;
82586+ }
82587+
82588 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
82589 ret = ptrace_attach(child, request, addr, data);
82590 /*
82591 * Some architectures need to do book-keeping after
82592 * a ptrace attach.
82593 */
82594- if (!ret)
82595+ if (!ret) {
82596 arch_ptrace_attach(child);
82597+ gr_audit_ptrace(child);
82598+ }
82599 goto out_put_task_struct;
82600 }
82601
82602@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
82603 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
82604 if (copied != sizeof(tmp))
82605 return -EIO;
82606- return put_user(tmp, (unsigned long __user *)data);
82607+ return put_user(tmp, (__force unsigned long __user *)data);
82608 }
82609
82610 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
82611@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
82612 }
82613
82614 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
82615- compat_long_t addr, compat_long_t data)
82616+ compat_ulong_t addr, compat_ulong_t data)
82617 {
82618 struct task_struct *child;
82619 long ret;
82620@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
82621 goto out;
82622 }
82623
82624+ if (gr_handle_ptrace(child, request)) {
82625+ ret = -EPERM;
82626+ goto out_put_task_struct;
82627+ }
82628+
82629 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
82630 ret = ptrace_attach(child, request, addr, data);
82631 /*
82632 * Some architectures need to do book-keeping after
82633 * a ptrace attach.
82634 */
82635- if (!ret)
82636+ if (!ret) {
82637 arch_ptrace_attach(child);
82638+ gr_audit_ptrace(child);
82639+ }
82640 goto out_put_task_struct;
82641 }
82642
82643diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
82644index 48ab703..07561d4 100644
82645--- a/kernel/rcupdate.c
82646+++ b/kernel/rcupdate.c
82647@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
82648 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
82649 */
82650 if (till_stall_check < 3) {
82651- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
82652+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
82653 till_stall_check = 3;
82654 } else if (till_stall_check > 300) {
82655- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
82656+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
82657 till_stall_check = 300;
82658 }
82659 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
82660diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
82661index a0714a5..2ab5e34 100644
82662--- a/kernel/rcutiny.c
82663+++ b/kernel/rcutiny.c
82664@@ -46,7 +46,7 @@
82665 struct rcu_ctrlblk;
82666 static void invoke_rcu_callbacks(void);
82667 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
82668-static void rcu_process_callbacks(struct softirq_action *unused);
82669+static void rcu_process_callbacks(void);
82670 static void __call_rcu(struct rcu_head *head,
82671 void (*func)(struct rcu_head *rcu),
82672 struct rcu_ctrlblk *rcp);
82673@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
82674 rcu_is_callbacks_kthread()));
82675 }
82676
82677-static void rcu_process_callbacks(struct softirq_action *unused)
82678+static void rcu_process_callbacks(void)
82679 {
82680 __rcu_process_callbacks(&rcu_sched_ctrlblk);
82681 __rcu_process_callbacks(&rcu_bh_ctrlblk);
82682diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
82683index 8a23300..4255818 100644
82684--- a/kernel/rcutiny_plugin.h
82685+++ b/kernel/rcutiny_plugin.h
82686@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
82687 have_rcu_kthread_work = morework;
82688 local_irq_restore(flags);
82689 if (work)
82690- rcu_process_callbacks(NULL);
82691+ rcu_process_callbacks();
82692 schedule_timeout_interruptible(1); /* Leave CPU for others. */
82693 }
82694
82695diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
82696index e1f3a8c..42c94a2 100644
82697--- a/kernel/rcutorture.c
82698+++ b/kernel/rcutorture.c
82699@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
82700 { 0 };
82701 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
82702 { 0 };
82703-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
82704-static atomic_t n_rcu_torture_alloc;
82705-static atomic_t n_rcu_torture_alloc_fail;
82706-static atomic_t n_rcu_torture_free;
82707-static atomic_t n_rcu_torture_mberror;
82708-static atomic_t n_rcu_torture_error;
82709+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
82710+static atomic_unchecked_t n_rcu_torture_alloc;
82711+static atomic_unchecked_t n_rcu_torture_alloc_fail;
82712+static atomic_unchecked_t n_rcu_torture_free;
82713+static atomic_unchecked_t n_rcu_torture_mberror;
82714+static atomic_unchecked_t n_rcu_torture_error;
82715 static long n_rcu_torture_barrier_error;
82716 static long n_rcu_torture_boost_ktrerror;
82717 static long n_rcu_torture_boost_rterror;
82718@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
82719
82720 spin_lock_bh(&rcu_torture_lock);
82721 if (list_empty(&rcu_torture_freelist)) {
82722- atomic_inc(&n_rcu_torture_alloc_fail);
82723+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
82724 spin_unlock_bh(&rcu_torture_lock);
82725 return NULL;
82726 }
82727- atomic_inc(&n_rcu_torture_alloc);
82728+ atomic_inc_unchecked(&n_rcu_torture_alloc);
82729 p = rcu_torture_freelist.next;
82730 list_del_init(p);
82731 spin_unlock_bh(&rcu_torture_lock);
82732@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
82733 static void
82734 rcu_torture_free(struct rcu_torture *p)
82735 {
82736- atomic_inc(&n_rcu_torture_free);
82737+ atomic_inc_unchecked(&n_rcu_torture_free);
82738 spin_lock_bh(&rcu_torture_lock);
82739 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
82740 spin_unlock_bh(&rcu_torture_lock);
82741@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
82742 i = rp->rtort_pipe_count;
82743 if (i > RCU_TORTURE_PIPE_LEN)
82744 i = RCU_TORTURE_PIPE_LEN;
82745- atomic_inc(&rcu_torture_wcount[i]);
82746+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82747 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
82748 rp->rtort_mbtest = 0;
82749 rcu_torture_free(rp);
82750@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
82751 i = rp->rtort_pipe_count;
82752 if (i > RCU_TORTURE_PIPE_LEN)
82753 i = RCU_TORTURE_PIPE_LEN;
82754- atomic_inc(&rcu_torture_wcount[i]);
82755+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82756 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
82757 rp->rtort_mbtest = 0;
82758 list_del(&rp->rtort_free);
82759@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
82760 i = old_rp->rtort_pipe_count;
82761 if (i > RCU_TORTURE_PIPE_LEN)
82762 i = RCU_TORTURE_PIPE_LEN;
82763- atomic_inc(&rcu_torture_wcount[i]);
82764+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82765 old_rp->rtort_pipe_count++;
82766 cur_ops->deferred_free(old_rp);
82767 }
82768@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
82769 return;
82770 }
82771 if (p->rtort_mbtest == 0)
82772- atomic_inc(&n_rcu_torture_mberror);
82773+ atomic_inc_unchecked(&n_rcu_torture_mberror);
82774 spin_lock(&rand_lock);
82775 cur_ops->read_delay(&rand);
82776 n_rcu_torture_timers++;
82777@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
82778 continue;
82779 }
82780 if (p->rtort_mbtest == 0)
82781- atomic_inc(&n_rcu_torture_mberror);
82782+ atomic_inc_unchecked(&n_rcu_torture_mberror);
82783 cur_ops->read_delay(&rand);
82784 preempt_disable();
82785 pipe_count = p->rtort_pipe_count;
82786@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
82787 rcu_torture_current,
82788 rcu_torture_current_version,
82789 list_empty(&rcu_torture_freelist),
82790- atomic_read(&n_rcu_torture_alloc),
82791- atomic_read(&n_rcu_torture_alloc_fail),
82792- atomic_read(&n_rcu_torture_free));
82793+ atomic_read_unchecked(&n_rcu_torture_alloc),
82794+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
82795+ atomic_read_unchecked(&n_rcu_torture_free));
82796 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
82797- atomic_read(&n_rcu_torture_mberror),
82798+ atomic_read_unchecked(&n_rcu_torture_mberror),
82799 n_rcu_torture_boost_ktrerror,
82800 n_rcu_torture_boost_rterror);
82801 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
82802@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
82803 n_barrier_attempts,
82804 n_rcu_torture_barrier_error);
82805 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
82806- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
82807+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
82808 n_rcu_torture_barrier_error != 0 ||
82809 n_rcu_torture_boost_ktrerror != 0 ||
82810 n_rcu_torture_boost_rterror != 0 ||
82811 n_rcu_torture_boost_failure != 0 ||
82812 i > 1) {
82813 cnt += sprintf(&page[cnt], "!!! ");
82814- atomic_inc(&n_rcu_torture_error);
82815+ atomic_inc_unchecked(&n_rcu_torture_error);
82816 WARN_ON_ONCE(1);
82817 }
82818 cnt += sprintf(&page[cnt], "Reader Pipe: ");
82819@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
82820 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
82821 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
82822 cnt += sprintf(&page[cnt], " %d",
82823- atomic_read(&rcu_torture_wcount[i]));
82824+ atomic_read_unchecked(&rcu_torture_wcount[i]));
82825 }
82826 cnt += sprintf(&page[cnt], "\n");
82827 if (cur_ops->stats)
82828@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
82829
82830 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
82831
82832- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
82833+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
82834 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
82835 else if (n_online_successes != n_online_attempts ||
82836 n_offline_successes != n_offline_attempts)
82837@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
82838
82839 rcu_torture_current = NULL;
82840 rcu_torture_current_version = 0;
82841- atomic_set(&n_rcu_torture_alloc, 0);
82842- atomic_set(&n_rcu_torture_alloc_fail, 0);
82843- atomic_set(&n_rcu_torture_free, 0);
82844- atomic_set(&n_rcu_torture_mberror, 0);
82845- atomic_set(&n_rcu_torture_error, 0);
82846+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
82847+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
82848+ atomic_set_unchecked(&n_rcu_torture_free, 0);
82849+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
82850+ atomic_set_unchecked(&n_rcu_torture_error, 0);
82851 n_rcu_torture_barrier_error = 0;
82852 n_rcu_torture_boost_ktrerror = 0;
82853 n_rcu_torture_boost_rterror = 0;
82854 n_rcu_torture_boost_failure = 0;
82855 n_rcu_torture_boosts = 0;
82856 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
82857- atomic_set(&rcu_torture_wcount[i], 0);
82858+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
82859 for_each_possible_cpu(cpu) {
82860 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
82861 per_cpu(rcu_torture_count, cpu)[i] = 0;
82862diff --git a/kernel/rcutree.c b/kernel/rcutree.c
82863index 3538001..e379e0b 100644
82864--- a/kernel/rcutree.c
82865+++ b/kernel/rcutree.c
82866@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
82867 rcu_prepare_for_idle(smp_processor_id());
82868 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
82869 smp_mb__before_atomic_inc(); /* See above. */
82870- atomic_inc(&rdtp->dynticks);
82871+ atomic_inc_unchecked(&rdtp->dynticks);
82872 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
82873- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
82874+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
82875
82876 /*
82877 * It is illegal to enter an extended quiescent state while
82878@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
82879 int user)
82880 {
82881 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
82882- atomic_inc(&rdtp->dynticks);
82883+ atomic_inc_unchecked(&rdtp->dynticks);
82884 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
82885 smp_mb__after_atomic_inc(); /* See above. */
82886- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
82887+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
82888 rcu_cleanup_after_idle(smp_processor_id());
82889 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
82890 if (!user && !is_idle_task(current)) {
82891@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
82892 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
82893
82894 if (rdtp->dynticks_nmi_nesting == 0 &&
82895- (atomic_read(&rdtp->dynticks) & 0x1))
82896+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
82897 return;
82898 rdtp->dynticks_nmi_nesting++;
82899 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
82900- atomic_inc(&rdtp->dynticks);
82901+ atomic_inc_unchecked(&rdtp->dynticks);
82902 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
82903 smp_mb__after_atomic_inc(); /* See above. */
82904- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
82905+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
82906 }
82907
82908 /**
82909@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
82910 return;
82911 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
82912 smp_mb__before_atomic_inc(); /* See above. */
82913- atomic_inc(&rdtp->dynticks);
82914+ atomic_inc_unchecked(&rdtp->dynticks);
82915 smp_mb__after_atomic_inc(); /* Force delay to next write. */
82916- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
82917+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
82918 }
82919
82920 /**
82921@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
82922 int ret;
82923
82924 preempt_disable();
82925- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
82926+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
82927 preempt_enable();
82928 return ret;
82929 }
82930@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
82931 */
82932 static int dyntick_save_progress_counter(struct rcu_data *rdp)
82933 {
82934- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
82935+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
82936 return (rdp->dynticks_snap & 0x1) == 0;
82937 }
82938
82939@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
82940 unsigned int curr;
82941 unsigned int snap;
82942
82943- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
82944+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
82945 snap = (unsigned int)rdp->dynticks_snap;
82946
82947 /*
82948@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
82949 rdp = this_cpu_ptr(rsp->rda);
82950 rcu_preempt_check_blocked_tasks(rnp);
82951 rnp->qsmask = rnp->qsmaskinit;
82952- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
82953+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
82954 WARN_ON_ONCE(rnp->completed != rsp->completed);
82955- ACCESS_ONCE(rnp->completed) = rsp->completed;
82956+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
82957 if (rnp == rdp->mynode)
82958 rcu_start_gp_per_cpu(rsp, rnp, rdp);
82959 rcu_preempt_boost_start_gp(rnp);
82960@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
82961 */
82962 rcu_for_each_node_breadth_first(rsp, rnp) {
82963 raw_spin_lock_irq(&rnp->lock);
82964- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
82965+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
82966 rdp = this_cpu_ptr(rsp->rda);
82967 if (rnp == rdp->mynode)
82968 __rcu_process_gp_end(rsp, rnp, rdp);
82969@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
82970 rsp->qlen += rdp->qlen;
82971 rdp->n_cbs_orphaned += rdp->qlen;
82972 rdp->qlen_lazy = 0;
82973- ACCESS_ONCE(rdp->qlen) = 0;
82974+ ACCESS_ONCE_RW(rdp->qlen) = 0;
82975 }
82976
82977 /*
82978@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
82979 }
82980 smp_mb(); /* List handling before counting for rcu_barrier(). */
82981 rdp->qlen_lazy -= count_lazy;
82982- ACCESS_ONCE(rdp->qlen) -= count;
82983+ ACCESS_ONCE_RW(rdp->qlen) -= count;
82984 rdp->n_cbs_invoked += count;
82985
82986 /* Reinstate batch limit if we have worked down the excess. */
82987@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
82988 /*
82989 * Do RCU core processing for the current CPU.
82990 */
82991-static void rcu_process_callbacks(struct softirq_action *unused)
82992+static void rcu_process_callbacks(void)
82993 {
82994 struct rcu_state *rsp;
82995
82996@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
82997 local_irq_restore(flags);
82998 return;
82999 }
83000- ACCESS_ONCE(rdp->qlen)++;
83001+ ACCESS_ONCE_RW(rdp->qlen)++;
83002 if (lazy)
83003 rdp->qlen_lazy++;
83004 else
83005@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
83006 * counter wrap on a 32-bit system. Quite a few more CPUs would of
83007 * course be required on a 64-bit system.
83008 */
83009- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
83010+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
83011 (ulong)atomic_long_read(&rsp->expedited_done) +
83012 ULONG_MAX / 8)) {
83013 synchronize_sched();
83014- atomic_long_inc(&rsp->expedited_wrap);
83015+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
83016 return;
83017 }
83018
83019@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
83020 * Take a ticket. Note that atomic_inc_return() implies a
83021 * full memory barrier.
83022 */
83023- snap = atomic_long_inc_return(&rsp->expedited_start);
83024+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
83025 firstsnap = snap;
83026 get_online_cpus();
83027 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
83028@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
83029 synchronize_sched_expedited_cpu_stop,
83030 NULL) == -EAGAIN) {
83031 put_online_cpus();
83032- atomic_long_inc(&rsp->expedited_tryfail);
83033+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
83034
83035 /* Check to see if someone else did our work for us. */
83036 s = atomic_long_read(&rsp->expedited_done);
83037 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
83038 /* ensure test happens before caller kfree */
83039 smp_mb__before_atomic_inc(); /* ^^^ */
83040- atomic_long_inc(&rsp->expedited_workdone1);
83041+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
83042 return;
83043 }
83044
83045@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
83046 udelay(trycount * num_online_cpus());
83047 } else {
83048 wait_rcu_gp(call_rcu_sched);
83049- atomic_long_inc(&rsp->expedited_normal);
83050+ atomic_long_inc_unchecked(&rsp->expedited_normal);
83051 return;
83052 }
83053
83054@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
83055 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
83056 /* ensure test happens before caller kfree */
83057 smp_mb__before_atomic_inc(); /* ^^^ */
83058- atomic_long_inc(&rsp->expedited_workdone2);
83059+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
83060 return;
83061 }
83062
83063@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
83064 * period works for us.
83065 */
83066 get_online_cpus();
83067- snap = atomic_long_read(&rsp->expedited_start);
83068+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
83069 smp_mb(); /* ensure read is before try_stop_cpus(). */
83070 }
83071- atomic_long_inc(&rsp->expedited_stoppedcpus);
83072+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
83073
83074 /*
83075 * Everyone up to our most recent fetch is covered by our grace
83076@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
83077 * than we did already did their update.
83078 */
83079 do {
83080- atomic_long_inc(&rsp->expedited_done_tries);
83081+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
83082 s = atomic_long_read(&rsp->expedited_done);
83083 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
83084 /* ensure test happens before caller kfree */
83085 smp_mb__before_atomic_inc(); /* ^^^ */
83086- atomic_long_inc(&rsp->expedited_done_lost);
83087+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
83088 break;
83089 }
83090 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
83091- atomic_long_inc(&rsp->expedited_done_exit);
83092+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
83093
83094 put_online_cpus();
83095 }
83096@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
83097 * ACCESS_ONCE() to prevent the compiler from speculating
83098 * the increment to precede the early-exit check.
83099 */
83100- ACCESS_ONCE(rsp->n_barrier_done)++;
83101+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
83102 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
83103 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
83104 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
83105@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
83106
83107 /* Increment ->n_barrier_done to prevent duplicate work. */
83108 smp_mb(); /* Keep increment after above mechanism. */
83109- ACCESS_ONCE(rsp->n_barrier_done)++;
83110+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
83111 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
83112 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
83113 smp_mb(); /* Keep increment before caller's subsequent code. */
83114@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
83115 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
83116 init_callback_list(rdp);
83117 rdp->qlen_lazy = 0;
83118- ACCESS_ONCE(rdp->qlen) = 0;
83119+ ACCESS_ONCE_RW(rdp->qlen) = 0;
83120 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
83121 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
83122- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
83123+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
83124 rdp->cpu = cpu;
83125 rdp->rsp = rsp;
83126 rcu_boot_init_nocb_percpu_data(rdp);
83127@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
83128 rdp->blimit = blimit;
83129 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
83130 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
83131- atomic_set(&rdp->dynticks->dynticks,
83132- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
83133+ atomic_set_unchecked(&rdp->dynticks->dynticks,
83134+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
83135 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
83136
83137 /* Add CPU to rcu_node bitmasks. */
83138@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
83139 struct task_struct *t;
83140
83141 for_each_rcu_flavor(rsp) {
83142- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
83143+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
83144 BUG_ON(IS_ERR(t));
83145 rnp = rcu_get_root(rsp);
83146 raw_spin_lock_irqsave(&rnp->lock, flags);
83147diff --git a/kernel/rcutree.h b/kernel/rcutree.h
83148index 4df5034..5ee93f2 100644
83149--- a/kernel/rcutree.h
83150+++ b/kernel/rcutree.h
83151@@ -87,7 +87,7 @@ struct rcu_dynticks {
83152 long long dynticks_nesting; /* Track irq/process nesting level. */
83153 /* Process level is worth LLONG_MAX/2. */
83154 int dynticks_nmi_nesting; /* Track NMI nesting level. */
83155- atomic_t dynticks; /* Even value for idle, else odd. */
83156+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
83157 #ifdef CONFIG_RCU_FAST_NO_HZ
83158 bool all_lazy; /* Are all CPU's CBs lazy? */
83159 unsigned long nonlazy_posted;
83160@@ -414,17 +414,17 @@ struct rcu_state {
83161 /* _rcu_barrier(). */
83162 /* End of fields guarded by barrier_mutex. */
83163
83164- atomic_long_t expedited_start; /* Starting ticket. */
83165- atomic_long_t expedited_done; /* Done ticket. */
83166- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
83167- atomic_long_t expedited_tryfail; /* # acquisition failures. */
83168- atomic_long_t expedited_workdone1; /* # done by others #1. */
83169- atomic_long_t expedited_workdone2; /* # done by others #2. */
83170- atomic_long_t expedited_normal; /* # fallbacks to normal. */
83171- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
83172- atomic_long_t expedited_done_tries; /* # tries to update _done. */
83173- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
83174- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
83175+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
83176+ atomic_long_t expedited_done; /* Done ticket. */
83177+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
83178+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
83179+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
83180+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
83181+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
83182+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
83183+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
83184+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
83185+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
83186
83187 unsigned long jiffies_force_qs; /* Time at which to invoke */
83188 /* force_quiescent_state(). */
83189diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
83190index 3db5a37..b395fb35 100644
83191--- a/kernel/rcutree_plugin.h
83192+++ b/kernel/rcutree_plugin.h
83193@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
83194
83195 /* Clean up and exit. */
83196 smp_mb(); /* ensure expedited GP seen before counter increment. */
83197- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
83198+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
83199 unlock_mb_ret:
83200 mutex_unlock(&sync_rcu_preempt_exp_mutex);
83201 mb_ret:
83202@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
83203 free_cpumask_var(cm);
83204 }
83205
83206-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
83207+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
83208 .store = &rcu_cpu_kthread_task,
83209 .thread_should_run = rcu_cpu_kthread_should_run,
83210 .thread_fn = rcu_cpu_kthread,
83211@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
83212 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
83213 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
83214 cpu, ticks_value, ticks_title,
83215- atomic_read(&rdtp->dynticks) & 0xfff,
83216+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
83217 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
83218 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
83219 fast_no_hz);
83220@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
83221
83222 /* Enqueue the callback on the nocb list and update counts. */
83223 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
83224- ACCESS_ONCE(*old_rhpp) = rhp;
83225+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
83226 atomic_long_add(rhcount, &rdp->nocb_q_count);
83227 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
83228
83229@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
83230 * Extract queued callbacks, update counts, and wait
83231 * for a grace period to elapse.
83232 */
83233- ACCESS_ONCE(rdp->nocb_head) = NULL;
83234+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
83235 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
83236 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
83237 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
83238- ACCESS_ONCE(rdp->nocb_p_count) += c;
83239- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
83240+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
83241+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
83242 rcu_nocb_wait_gp(rdp);
83243
83244 /* Each pass through the following loop invokes a callback. */
83245@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
83246 list = next;
83247 }
83248 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
83249- ACCESS_ONCE(rdp->nocb_p_count) -= c;
83250- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
83251+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
83252+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
83253 rdp->n_nocbs_invoked += c;
83254 }
83255 return 0;
83256@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
83257 t = kthread_run(rcu_nocb_kthread, rdp,
83258 "rcuo%c/%d", rsp->abbr, cpu);
83259 BUG_ON(IS_ERR(t));
83260- ACCESS_ONCE(rdp->nocb_kthread) = t;
83261+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
83262 }
83263 }
83264
83265diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
83266index cf6c174..a8f4b50 100644
83267--- a/kernel/rcutree_trace.c
83268+++ b/kernel/rcutree_trace.c
83269@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
83270 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
83271 rdp->passed_quiesce, rdp->qs_pending);
83272 seq_printf(m, " dt=%d/%llx/%d df=%lu",
83273- atomic_read(&rdp->dynticks->dynticks),
83274+ atomic_read_unchecked(&rdp->dynticks->dynticks),
83275 rdp->dynticks->dynticks_nesting,
83276 rdp->dynticks->dynticks_nmi_nesting,
83277 rdp->dynticks_fqs);
83278@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
83279 struct rcu_state *rsp = (struct rcu_state *)m->private;
83280
83281 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
83282- atomic_long_read(&rsp->expedited_start),
83283+ atomic_long_read_unchecked(&rsp->expedited_start),
83284 atomic_long_read(&rsp->expedited_done),
83285- atomic_long_read(&rsp->expedited_wrap),
83286- atomic_long_read(&rsp->expedited_tryfail),
83287- atomic_long_read(&rsp->expedited_workdone1),
83288- atomic_long_read(&rsp->expedited_workdone2),
83289- atomic_long_read(&rsp->expedited_normal),
83290- atomic_long_read(&rsp->expedited_stoppedcpus),
83291- atomic_long_read(&rsp->expedited_done_tries),
83292- atomic_long_read(&rsp->expedited_done_lost),
83293- atomic_long_read(&rsp->expedited_done_exit));
83294+ atomic_long_read_unchecked(&rsp->expedited_wrap),
83295+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
83296+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
83297+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
83298+ atomic_long_read_unchecked(&rsp->expedited_normal),
83299+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
83300+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
83301+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
83302+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
83303 return 0;
83304 }
83305
83306diff --git a/kernel/resource.c b/kernel/resource.c
83307index d738698..5f8e60a 100644
83308--- a/kernel/resource.c
83309+++ b/kernel/resource.c
83310@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
83311
83312 static int __init ioresources_init(void)
83313 {
83314+#ifdef CONFIG_GRKERNSEC_PROC_ADD
83315+#ifdef CONFIG_GRKERNSEC_PROC_USER
83316+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
83317+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
83318+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83319+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
83320+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
83321+#endif
83322+#else
83323 proc_create("ioports", 0, NULL, &proc_ioports_operations);
83324 proc_create("iomem", 0, NULL, &proc_iomem_operations);
83325+#endif
83326 return 0;
83327 }
83328 __initcall(ioresources_init);
83329diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
83330index 1d96dd0..994ff19 100644
83331--- a/kernel/rtmutex-tester.c
83332+++ b/kernel/rtmutex-tester.c
83333@@ -22,7 +22,7 @@
83334 #define MAX_RT_TEST_MUTEXES 8
83335
83336 static spinlock_t rttest_lock;
83337-static atomic_t rttest_event;
83338+static atomic_unchecked_t rttest_event;
83339
83340 struct test_thread_data {
83341 int opcode;
83342@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83343
83344 case RTTEST_LOCKCONT:
83345 td->mutexes[td->opdata] = 1;
83346- td->event = atomic_add_return(1, &rttest_event);
83347+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83348 return 0;
83349
83350 case RTTEST_RESET:
83351@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83352 return 0;
83353
83354 case RTTEST_RESETEVENT:
83355- atomic_set(&rttest_event, 0);
83356+ atomic_set_unchecked(&rttest_event, 0);
83357 return 0;
83358
83359 default:
83360@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83361 return ret;
83362
83363 td->mutexes[id] = 1;
83364- td->event = atomic_add_return(1, &rttest_event);
83365+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83366 rt_mutex_lock(&mutexes[id]);
83367- td->event = atomic_add_return(1, &rttest_event);
83368+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83369 td->mutexes[id] = 4;
83370 return 0;
83371
83372@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83373 return ret;
83374
83375 td->mutexes[id] = 1;
83376- td->event = atomic_add_return(1, &rttest_event);
83377+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83378 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
83379- td->event = atomic_add_return(1, &rttest_event);
83380+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83381 td->mutexes[id] = ret ? 0 : 4;
83382 return ret ? -EINTR : 0;
83383
83384@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83385 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
83386 return ret;
83387
83388- td->event = atomic_add_return(1, &rttest_event);
83389+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83390 rt_mutex_unlock(&mutexes[id]);
83391- td->event = atomic_add_return(1, &rttest_event);
83392+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83393 td->mutexes[id] = 0;
83394 return 0;
83395
83396@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83397 break;
83398
83399 td->mutexes[dat] = 2;
83400- td->event = atomic_add_return(1, &rttest_event);
83401+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83402 break;
83403
83404 default:
83405@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83406 return;
83407
83408 td->mutexes[dat] = 3;
83409- td->event = atomic_add_return(1, &rttest_event);
83410+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83411 break;
83412
83413 case RTTEST_LOCKNOWAIT:
83414@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83415 return;
83416
83417 td->mutexes[dat] = 1;
83418- td->event = atomic_add_return(1, &rttest_event);
83419+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83420 return;
83421
83422 default:
83423diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
83424index 64de5f8..7735e12 100644
83425--- a/kernel/sched/auto_group.c
83426+++ b/kernel/sched/auto_group.c
83427@@ -11,7 +11,7 @@
83428
83429 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
83430 static struct autogroup autogroup_default;
83431-static atomic_t autogroup_seq_nr;
83432+static atomic_unchecked_t autogroup_seq_nr;
83433
83434 void __init autogroup_init(struct task_struct *init_task)
83435 {
83436@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
83437
83438 kref_init(&ag->kref);
83439 init_rwsem(&ag->lock);
83440- ag->id = atomic_inc_return(&autogroup_seq_nr);
83441+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
83442 ag->tg = tg;
83443 #ifdef CONFIG_RT_GROUP_SCHED
83444 /*
83445diff --git a/kernel/sched/core.c b/kernel/sched/core.c
83446index e8b3350..d83d44e 100644
83447--- a/kernel/sched/core.c
83448+++ b/kernel/sched/core.c
83449@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
83450 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
83451 * positive (at least 1, or number of jiffies left till timeout) if completed.
83452 */
83453-long __sched
83454+long __sched __intentional_overflow(-1)
83455 wait_for_completion_interruptible_timeout(struct completion *x,
83456 unsigned long timeout)
83457 {
83458@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
83459 *
83460 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
83461 */
83462-int __sched wait_for_completion_killable(struct completion *x)
83463+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
83464 {
83465 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
83466 if (t == -ERESTARTSYS)
83467@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
83468 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
83469 * positive (at least 1, or number of jiffies left till timeout) if completed.
83470 */
83471-long __sched
83472+long __sched __intentional_overflow(-1)
83473 wait_for_completion_killable_timeout(struct completion *x,
83474 unsigned long timeout)
83475 {
83476@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
83477 /* convert nice value [19,-20] to rlimit style value [1,40] */
83478 int nice_rlim = 20 - nice;
83479
83480+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
83481+
83482 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
83483 capable(CAP_SYS_NICE));
83484 }
83485@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
83486 if (nice > 19)
83487 nice = 19;
83488
83489- if (increment < 0 && !can_nice(current, nice))
83490+ if (increment < 0 && (!can_nice(current, nice) ||
83491+ gr_handle_chroot_nice()))
83492 return -EPERM;
83493
83494 retval = security_task_setnice(current, nice);
83495@@ -3891,6 +3894,7 @@ recheck:
83496 unsigned long rlim_rtprio =
83497 task_rlimit(p, RLIMIT_RTPRIO);
83498
83499+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
83500 /* can't set/change the rt policy */
83501 if (policy != p->policy && !rlim_rtprio)
83502 return -EPERM;
83503@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
83504
83505 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
83506
83507-static struct ctl_table sd_ctl_dir[] = {
83508+static ctl_table_no_const sd_ctl_dir[] __read_only = {
83509 {
83510 .procname = "sched_domain",
83511 .mode = 0555,
83512@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
83513 {}
83514 };
83515
83516-static struct ctl_table *sd_alloc_ctl_entry(int n)
83517+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
83518 {
83519- struct ctl_table *entry =
83520+ ctl_table_no_const *entry =
83521 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
83522
83523 return entry;
83524 }
83525
83526-static void sd_free_ctl_entry(struct ctl_table **tablep)
83527+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
83528 {
83529- struct ctl_table *entry;
83530+ ctl_table_no_const *entry;
83531
83532 /*
83533 * In the intermediate directories, both the child directory and
83534@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
83535 * will always be set. In the lowest directory the names are
83536 * static strings and all have proc handlers.
83537 */
83538- for (entry = *tablep; entry->mode; entry++) {
83539- if (entry->child)
83540- sd_free_ctl_entry(&entry->child);
83541+ for (entry = tablep; entry->mode; entry++) {
83542+ if (entry->child) {
83543+ sd_free_ctl_entry(entry->child);
83544+ pax_open_kernel();
83545+ entry->child = NULL;
83546+ pax_close_kernel();
83547+ }
83548 if (entry->proc_handler == NULL)
83549 kfree(entry->procname);
83550 }
83551
83552- kfree(*tablep);
83553- *tablep = NULL;
83554+ kfree(tablep);
83555 }
83556
83557 static int min_load_idx = 0;
83558 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
83559
83560 static void
83561-set_table_entry(struct ctl_table *entry,
83562+set_table_entry(ctl_table_no_const *entry,
83563 const char *procname, void *data, int maxlen,
83564 umode_t mode, proc_handler *proc_handler,
83565 bool load_idx)
83566@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
83567 static struct ctl_table *
83568 sd_alloc_ctl_domain_table(struct sched_domain *sd)
83569 {
83570- struct ctl_table *table = sd_alloc_ctl_entry(13);
83571+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
83572
83573 if (table == NULL)
83574 return NULL;
83575@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
83576 return table;
83577 }
83578
83579-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
83580+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
83581 {
83582- struct ctl_table *entry, *table;
83583+ ctl_table_no_const *entry, *table;
83584 struct sched_domain *sd;
83585 int domain_num = 0, i;
83586 char buf[32];
83587@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
83588 static void register_sched_domain_sysctl(void)
83589 {
83590 int i, cpu_num = num_possible_cpus();
83591- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
83592+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
83593 char buf[32];
83594
83595 WARN_ON(sd_ctl_dir[0].child);
83596+ pax_open_kernel();
83597 sd_ctl_dir[0].child = entry;
83598+ pax_close_kernel();
83599
83600 if (entry == NULL)
83601 return;
83602@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
83603 if (sd_sysctl_header)
83604 unregister_sysctl_table(sd_sysctl_header);
83605 sd_sysctl_header = NULL;
83606- if (sd_ctl_dir[0].child)
83607- sd_free_ctl_entry(&sd_ctl_dir[0].child);
83608+ if (sd_ctl_dir[0].child) {
83609+ sd_free_ctl_entry(sd_ctl_dir[0].child);
83610+ pax_open_kernel();
83611+ sd_ctl_dir[0].child = NULL;
83612+ pax_close_kernel();
83613+ }
83614 }
83615 #else
83616 static void register_sched_domain_sysctl(void)
83617@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
83618 * happens before everything else. This has to be lower priority than
83619 * the notifier in the perf_event subsystem, though.
83620 */
83621-static struct notifier_block __cpuinitdata migration_notifier = {
83622+static struct notifier_block migration_notifier = {
83623 .notifier_call = migration_call,
83624 .priority = CPU_PRI_MIGRATION,
83625 };
83626diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
83627index 03b73be..9422b9f 100644
83628--- a/kernel/sched/fair.c
83629+++ b/kernel/sched/fair.c
83630@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
83631
83632 static void reset_ptenuma_scan(struct task_struct *p)
83633 {
83634- ACCESS_ONCE(p->mm->numa_scan_seq)++;
83635+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
83636 p->mm->numa_scan_offset = 0;
83637 }
83638
83639@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
83640 * run_rebalance_domains is triggered when needed from the scheduler tick.
83641 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
83642 */
83643-static void run_rebalance_domains(struct softirq_action *h)
83644+static void run_rebalance_domains(void)
83645 {
83646 int this_cpu = smp_processor_id();
83647 struct rq *this_rq = cpu_rq(this_cpu);
83648diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
83649index ce39224d..0e09343 100644
83650--- a/kernel/sched/sched.h
83651+++ b/kernel/sched/sched.h
83652@@ -1009,7 +1009,7 @@ struct sched_class {
83653 #ifdef CONFIG_FAIR_GROUP_SCHED
83654 void (*task_move_group) (struct task_struct *p, int on_rq);
83655 #endif
83656-};
83657+} __do_const;
83658
83659 #define sched_class_highest (&stop_sched_class)
83660 #define for_each_class(class) \
83661diff --git a/kernel/signal.c b/kernel/signal.c
83662index 113411b..20d0a99 100644
83663--- a/kernel/signal.c
83664+++ b/kernel/signal.c
83665@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
83666
83667 int print_fatal_signals __read_mostly;
83668
83669-static void __user *sig_handler(struct task_struct *t, int sig)
83670+static __sighandler_t sig_handler(struct task_struct *t, int sig)
83671 {
83672 return t->sighand->action[sig - 1].sa.sa_handler;
83673 }
83674
83675-static int sig_handler_ignored(void __user *handler, int sig)
83676+static int sig_handler_ignored(__sighandler_t handler, int sig)
83677 {
83678 /* Is it explicitly or implicitly ignored? */
83679 return handler == SIG_IGN ||
83680@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
83681
83682 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
83683 {
83684- void __user *handler;
83685+ __sighandler_t handler;
83686
83687 handler = sig_handler(t, sig);
83688
83689@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
83690 atomic_inc(&user->sigpending);
83691 rcu_read_unlock();
83692
83693+ if (!override_rlimit)
83694+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
83695+
83696 if (override_rlimit ||
83697 atomic_read(&user->sigpending) <=
83698 task_rlimit(t, RLIMIT_SIGPENDING)) {
83699@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
83700
83701 int unhandled_signal(struct task_struct *tsk, int sig)
83702 {
83703- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
83704+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
83705 if (is_global_init(tsk))
83706 return 1;
83707 if (handler != SIG_IGN && handler != SIG_DFL)
83708@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
83709 }
83710 }
83711
83712+ /* allow glibc communication via tgkill to other threads in our
83713+ thread group */
83714+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
83715+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
83716+ && gr_handle_signal(t, sig))
83717+ return -EPERM;
83718+
83719 return security_task_kill(t, info, sig, 0);
83720 }
83721
83722@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
83723 return send_signal(sig, info, p, 1);
83724 }
83725
83726-static int
83727+int
83728 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83729 {
83730 return send_signal(sig, info, t, 0);
83731@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83732 unsigned long int flags;
83733 int ret, blocked, ignored;
83734 struct k_sigaction *action;
83735+ int is_unhandled = 0;
83736
83737 spin_lock_irqsave(&t->sighand->siglock, flags);
83738 action = &t->sighand->action[sig-1];
83739@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83740 }
83741 if (action->sa.sa_handler == SIG_DFL)
83742 t->signal->flags &= ~SIGNAL_UNKILLABLE;
83743+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
83744+ is_unhandled = 1;
83745 ret = specific_send_sig_info(sig, info, t);
83746 spin_unlock_irqrestore(&t->sighand->siglock, flags);
83747
83748+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
83749+ normal operation */
83750+ if (is_unhandled) {
83751+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
83752+ gr_handle_crash(t, sig);
83753+ }
83754+
83755 return ret;
83756 }
83757
83758@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
83759 ret = check_kill_permission(sig, info, p);
83760 rcu_read_unlock();
83761
83762- if (!ret && sig)
83763+ if (!ret && sig) {
83764 ret = do_send_sig_info(sig, info, p, true);
83765+ if (!ret)
83766+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
83767+ }
83768
83769 return ret;
83770 }
83771@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
83772 int error = -ESRCH;
83773
83774 rcu_read_lock();
83775- p = find_task_by_vpid(pid);
83776+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83777+ /* allow glibc communication via tgkill to other threads in our
83778+ thread group */
83779+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
83780+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
83781+ p = find_task_by_vpid_unrestricted(pid);
83782+ else
83783+#endif
83784+ p = find_task_by_vpid(pid);
83785 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
83786 error = check_kill_permission(sig, info, p);
83787 /*
83788@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
83789 __put_user(t->sas_ss_size, &uss->ss_size);
83790 }
83791
83792+#ifdef CONFIG_X86
83793+void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
83794+{
83795+ struct task_struct *t = current;
83796+ put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
83797+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
83798+ put_user_ex(t->sas_ss_size, &uss->ss_size);
83799+}
83800+#endif
83801+
83802 #ifdef CONFIG_COMPAT
83803 COMPAT_SYSCALL_DEFINE2(sigaltstack,
83804 const compat_stack_t __user *, uss_ptr,
83805@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
83806 }
83807 seg = get_fs();
83808 set_fs(KERNEL_DS);
83809- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
83810- (stack_t __force __user *) &uoss,
83811+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
83812+ (stack_t __force_user *) &uoss,
83813 compat_user_stack_pointer());
83814 set_fs(seg);
83815 if (ret >= 0 && uoss_ptr) {
83816@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
83817 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
83818 __put_user(t->sas_ss_size, &uss->ss_size);
83819 }
83820+
83821+#ifdef CONFIG_X86
83822+void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
83823+{
83824+ struct task_struct *t = current;
83825+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
83826+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
83827+ put_user_ex(t->sas_ss_size, &uss->ss_size);
83828+}
83829+#endif
83830 #endif
83831
83832 #ifdef __ARCH_WANT_SYS_SIGPENDING
83833diff --git a/kernel/smp.c b/kernel/smp.c
83834index 4dba0f7..fe9f773 100644
83835--- a/kernel/smp.c
83836+++ b/kernel/smp.c
83837@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
83838 return NOTIFY_OK;
83839 }
83840
83841-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
83842+static struct notifier_block hotplug_cfd_notifier = {
83843 .notifier_call = hotplug_cfd,
83844 };
83845
83846diff --git a/kernel/smpboot.c b/kernel/smpboot.c
83847index 02fc5c9..e54c335 100644
83848--- a/kernel/smpboot.c
83849+++ b/kernel/smpboot.c
83850@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
83851 }
83852 smpboot_unpark_thread(plug_thread, cpu);
83853 }
83854- list_add(&plug_thread->list, &hotplug_threads);
83855+ pax_list_add(&plug_thread->list, &hotplug_threads);
83856 out:
83857 mutex_unlock(&smpboot_threads_lock);
83858 return ret;
83859@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
83860 {
83861 get_online_cpus();
83862 mutex_lock(&smpboot_threads_lock);
83863- list_del(&plug_thread->list);
83864+ pax_list_del(&plug_thread->list);
83865 smpboot_destroy_threads(plug_thread);
83866 mutex_unlock(&smpboot_threads_lock);
83867 put_online_cpus();
83868diff --git a/kernel/softirq.c b/kernel/softirq.c
83869index 3d6833f..da6d93d 100644
83870--- a/kernel/softirq.c
83871+++ b/kernel/softirq.c
83872@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
83873 EXPORT_SYMBOL(irq_stat);
83874 #endif
83875
83876-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
83877+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
83878
83879 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
83880
83881-char *softirq_to_name[NR_SOFTIRQS] = {
83882+const char * const softirq_to_name[NR_SOFTIRQS] = {
83883 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
83884 "TASKLET", "SCHED", "HRTIMER", "RCU"
83885 };
83886@@ -250,7 +250,7 @@ restart:
83887 kstat_incr_softirqs_this_cpu(vec_nr);
83888
83889 trace_softirq_entry(vec_nr);
83890- h->action(h);
83891+ h->action();
83892 trace_softirq_exit(vec_nr);
83893 if (unlikely(prev_count != preempt_count())) {
83894 printk(KERN_ERR "huh, entered softirq %u %s %p"
83895@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
83896 or_softirq_pending(1UL << nr);
83897 }
83898
83899-void open_softirq(int nr, void (*action)(struct softirq_action *))
83900+void __init open_softirq(int nr, void (*action)(void))
83901 {
83902 softirq_vec[nr].action = action;
83903 }
83904@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
83905
83906 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
83907
83908-static void tasklet_action(struct softirq_action *a)
83909+static void tasklet_action(void)
83910 {
83911 struct tasklet_struct *list;
83912
83913@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
83914 }
83915 }
83916
83917-static void tasklet_hi_action(struct softirq_action *a)
83918+static void tasklet_hi_action(void)
83919 {
83920 struct tasklet_struct *list;
83921
83922@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
83923 return NOTIFY_OK;
83924 }
83925
83926-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
83927+static struct notifier_block remote_softirq_cpu_notifier = {
83928 .notifier_call = remote_softirq_cpu_notify,
83929 };
83930
83931@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
83932 return NOTIFY_OK;
83933 }
83934
83935-static struct notifier_block __cpuinitdata cpu_nfb = {
83936+static struct notifier_block cpu_nfb = {
83937 .notifier_call = cpu_callback
83938 };
83939
83940-static struct smp_hotplug_thread softirq_threads = {
83941+static struct smp_hotplug_thread softirq_threads __read_only = {
83942 .store = &ksoftirqd,
83943 .thread_should_run = ksoftirqd_should_run,
83944 .thread_fn = run_ksoftirqd,
83945diff --git a/kernel/srcu.c b/kernel/srcu.c
83946index 01d5ccb..cdcbee6 100644
83947--- a/kernel/srcu.c
83948+++ b/kernel/srcu.c
83949@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
83950
83951 idx = ACCESS_ONCE(sp->completed) & 0x1;
83952 preempt_disable();
83953- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
83954+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
83955 smp_mb(); /* B */ /* Avoid leaking the critical section. */
83956- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
83957+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
83958 preempt_enable();
83959 return idx;
83960 }
83961diff --git a/kernel/sys.c b/kernel/sys.c
83962index 2bbd9a7..0875671 100644
83963--- a/kernel/sys.c
83964+++ b/kernel/sys.c
83965@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
83966 error = -EACCES;
83967 goto out;
83968 }
83969+
83970+ if (gr_handle_chroot_setpriority(p, niceval)) {
83971+ error = -EACCES;
83972+ goto out;
83973+ }
83974+
83975 no_nice = security_task_setnice(p, niceval);
83976 if (no_nice) {
83977 error = no_nice;
83978@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
83979 goto error;
83980 }
83981
83982+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
83983+ goto error;
83984+
83985 if (rgid != (gid_t) -1 ||
83986 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
83987 new->sgid = new->egid;
83988@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
83989 old = current_cred();
83990
83991 retval = -EPERM;
83992+
83993+ if (gr_check_group_change(kgid, kgid, kgid))
83994+ goto error;
83995+
83996 if (nsown_capable(CAP_SETGID))
83997 new->gid = new->egid = new->sgid = new->fsgid = kgid;
83998 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
83999@@ -678,7 +691,7 @@ error:
84000 /*
84001 * change the user struct in a credentials set to match the new UID
84002 */
84003-static int set_user(struct cred *new)
84004+int set_user(struct cred *new)
84005 {
84006 struct user_struct *new_user;
84007
84008@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
84009 goto error;
84010 }
84011
84012+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
84013+ goto error;
84014+
84015 if (!uid_eq(new->uid, old->uid)) {
84016 retval = set_user(new);
84017 if (retval < 0)
84018@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
84019 old = current_cred();
84020
84021 retval = -EPERM;
84022+
84023+ if (gr_check_crash_uid(kuid))
84024+ goto error;
84025+ if (gr_check_user_change(kuid, kuid, kuid))
84026+ goto error;
84027+
84028 if (nsown_capable(CAP_SETUID)) {
84029 new->suid = new->uid = kuid;
84030 if (!uid_eq(kuid, old->uid)) {
84031@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
84032 goto error;
84033 }
84034
84035+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
84036+ goto error;
84037+
84038 if (ruid != (uid_t) -1) {
84039 new->uid = kruid;
84040 if (!uid_eq(kruid, old->uid)) {
84041@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
84042 goto error;
84043 }
84044
84045+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
84046+ goto error;
84047+
84048 if (rgid != (gid_t) -1)
84049 new->gid = krgid;
84050 if (egid != (gid_t) -1)
84051@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
84052 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
84053 nsown_capable(CAP_SETUID)) {
84054 if (!uid_eq(kuid, old->fsuid)) {
84055+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
84056+ goto error;
84057+
84058 new->fsuid = kuid;
84059 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
84060 goto change_okay;
84061 }
84062 }
84063
84064+error:
84065 abort_creds(new);
84066 return old_fsuid;
84067
84068@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
84069 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
84070 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
84071 nsown_capable(CAP_SETGID)) {
84072+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
84073+ goto error;
84074+
84075 if (!gid_eq(kgid, old->fsgid)) {
84076 new->fsgid = kgid;
84077 goto change_okay;
84078 }
84079 }
84080
84081+error:
84082 abort_creds(new);
84083 return old_fsgid;
84084
84085@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
84086 return -EFAULT;
84087
84088 down_read(&uts_sem);
84089- error = __copy_to_user(&name->sysname, &utsname()->sysname,
84090+ error = __copy_to_user(name->sysname, &utsname()->sysname,
84091 __OLD_UTS_LEN);
84092 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
84093- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
84094+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
84095 __OLD_UTS_LEN);
84096 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
84097- error |= __copy_to_user(&name->release, &utsname()->release,
84098+ error |= __copy_to_user(name->release, &utsname()->release,
84099 __OLD_UTS_LEN);
84100 error |= __put_user(0, name->release + __OLD_UTS_LEN);
84101- error |= __copy_to_user(&name->version, &utsname()->version,
84102+ error |= __copy_to_user(name->version, &utsname()->version,
84103 __OLD_UTS_LEN);
84104 error |= __put_user(0, name->version + __OLD_UTS_LEN);
84105- error |= __copy_to_user(&name->machine, &utsname()->machine,
84106+ error |= __copy_to_user(name->machine, &utsname()->machine,
84107 __OLD_UTS_LEN);
84108 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
84109 up_read(&uts_sem);
84110@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
84111 */
84112 new_rlim->rlim_cur = 1;
84113 }
84114+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
84115+ is changed to a lower value. Since tasks can be created by the same
84116+ user in between this limit change and an execve by this task, force
84117+ a recheck only for this task by setting PF_NPROC_EXCEEDED
84118+ */
84119+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
84120+ tsk->flags |= PF_NPROC_EXCEEDED;
84121 }
84122 if (!retval) {
84123 if (old_rlim)
84124diff --git a/kernel/sysctl.c b/kernel/sysctl.c
84125index 9edcf45..713c960 100644
84126--- a/kernel/sysctl.c
84127+++ b/kernel/sysctl.c
84128@@ -93,7 +93,6 @@
84129
84130
84131 #if defined(CONFIG_SYSCTL)
84132-
84133 /* External variables not in a header file. */
84134 extern int sysctl_overcommit_memory;
84135 extern int sysctl_overcommit_ratio;
84136@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
84137
84138 /* Constants used for minimum and maximum */
84139 #ifdef CONFIG_LOCKUP_DETECTOR
84140-static int sixty = 60;
84141-static int neg_one = -1;
84142+static int sixty __read_only = 60;
84143 #endif
84144
84145-static int zero;
84146-static int __maybe_unused one = 1;
84147-static int __maybe_unused two = 2;
84148-static int __maybe_unused three = 3;
84149-static unsigned long one_ul = 1;
84150-static int one_hundred = 100;
84151+static int neg_one __read_only = -1;
84152+static int zero __read_only = 0;
84153+static int __maybe_unused one __read_only = 1;
84154+static int __maybe_unused two __read_only = 2;
84155+static int __maybe_unused three __read_only = 3;
84156+static unsigned long one_ul __read_only = 1;
84157+static int one_hundred __read_only = 100;
84158 #ifdef CONFIG_PRINTK
84159-static int ten_thousand = 10000;
84160+static int ten_thousand __read_only = 10000;
84161 #endif
84162
84163 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
84164@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
84165 void __user *buffer, size_t *lenp, loff_t *ppos);
84166 #endif
84167
84168-#ifdef CONFIG_PRINTK
84169 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84170 void __user *buffer, size_t *lenp, loff_t *ppos);
84171-#endif
84172
84173 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
84174 void __user *buffer, size_t *lenp, loff_t *ppos);
84175@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
84176
84177 #endif
84178
84179+extern struct ctl_table grsecurity_table[];
84180+
84181 static struct ctl_table kern_table[];
84182 static struct ctl_table vm_table[];
84183 static struct ctl_table fs_table[];
84184@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
84185 int sysctl_legacy_va_layout;
84186 #endif
84187
84188+#ifdef CONFIG_PAX_SOFTMODE
84189+static ctl_table pax_table[] = {
84190+ {
84191+ .procname = "softmode",
84192+ .data = &pax_softmode,
84193+ .maxlen = sizeof(unsigned int),
84194+ .mode = 0600,
84195+ .proc_handler = &proc_dointvec,
84196+ },
84197+
84198+ { }
84199+};
84200+#endif
84201+
84202 /* The default sysctl tables: */
84203
84204 static struct ctl_table sysctl_base_table[] = {
84205@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
84206 #endif
84207
84208 static struct ctl_table kern_table[] = {
84209+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
84210+ {
84211+ .procname = "grsecurity",
84212+ .mode = 0500,
84213+ .child = grsecurity_table,
84214+ },
84215+#endif
84216+
84217+#ifdef CONFIG_PAX_SOFTMODE
84218+ {
84219+ .procname = "pax",
84220+ .mode = 0500,
84221+ .child = pax_table,
84222+ },
84223+#endif
84224+
84225 {
84226 .procname = "sched_child_runs_first",
84227 .data = &sysctl_sched_child_runs_first,
84228@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
84229 .data = &modprobe_path,
84230 .maxlen = KMOD_PATH_LEN,
84231 .mode = 0644,
84232- .proc_handler = proc_dostring,
84233+ .proc_handler = proc_dostring_modpriv,
84234 },
84235 {
84236 .procname = "modules_disabled",
84237@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
84238 .extra1 = &zero,
84239 .extra2 = &one,
84240 },
84241+#endif
84242 {
84243 .procname = "kptr_restrict",
84244 .data = &kptr_restrict,
84245 .maxlen = sizeof(int),
84246 .mode = 0644,
84247 .proc_handler = proc_dointvec_minmax_sysadmin,
84248+#ifdef CONFIG_GRKERNSEC_HIDESYM
84249+ .extra1 = &two,
84250+#else
84251 .extra1 = &zero,
84252+#endif
84253 .extra2 = &two,
84254 },
84255-#endif
84256 {
84257 .procname = "ngroups_max",
84258 .data = &ngroups_max,
84259@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
84260 */
84261 {
84262 .procname = "perf_event_paranoid",
84263- .data = &sysctl_perf_event_paranoid,
84264- .maxlen = sizeof(sysctl_perf_event_paranoid),
84265+ .data = &sysctl_perf_event_legitimately_concerned,
84266+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
84267 .mode = 0644,
84268- .proc_handler = proc_dointvec,
84269+ /* go ahead, be a hero */
84270+ .proc_handler = proc_dointvec_minmax_sysadmin,
84271+ .extra1 = &neg_one,
84272+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84273+ .extra2 = &three,
84274+#else
84275+ .extra2 = &two,
84276+#endif
84277 },
84278 {
84279 .procname = "perf_event_mlock_kb",
84280@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
84281 .proc_handler = proc_dointvec_minmax,
84282 .extra1 = &zero,
84283 },
84284+ {
84285+ .procname = "heap_stack_gap",
84286+ .data = &sysctl_heap_stack_gap,
84287+ .maxlen = sizeof(sysctl_heap_stack_gap),
84288+ .mode = 0644,
84289+ .proc_handler = proc_doulongvec_minmax,
84290+ },
84291 #else
84292 {
84293 .procname = "nr_trim_pages",
84294@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
84295 buffer, lenp, ppos);
84296 }
84297
84298+int proc_dostring_modpriv(struct ctl_table *table, int write,
84299+ void __user *buffer, size_t *lenp, loff_t *ppos)
84300+{
84301+ if (write && !capable(CAP_SYS_MODULE))
84302+ return -EPERM;
84303+
84304+ return _proc_do_string(table->data, table->maxlen, write,
84305+ buffer, lenp, ppos);
84306+}
84307+
84308 static size_t proc_skip_spaces(char **buf)
84309 {
84310 size_t ret;
84311@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
84312 len = strlen(tmp);
84313 if (len > *size)
84314 len = *size;
84315+ if (len > sizeof(tmp))
84316+ len = sizeof(tmp);
84317 if (copy_to_user(*buf, tmp, len))
84318 return -EFAULT;
84319 *size -= len;
84320@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
84321 static int proc_taint(struct ctl_table *table, int write,
84322 void __user *buffer, size_t *lenp, loff_t *ppos)
84323 {
84324- struct ctl_table t;
84325+ ctl_table_no_const t;
84326 unsigned long tmptaint = get_taint();
84327 int err;
84328
84329@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
84330 return err;
84331 }
84332
84333-#ifdef CONFIG_PRINTK
84334 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84335 void __user *buffer, size_t *lenp, loff_t *ppos)
84336 {
84337@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84338
84339 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
84340 }
84341-#endif
84342
84343 struct do_proc_dointvec_minmax_conv_param {
84344 int *min;
84345@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
84346 *i = val;
84347 } else {
84348 val = convdiv * (*i) / convmul;
84349- if (!first)
84350+ if (!first) {
84351 err = proc_put_char(&buffer, &left, '\t');
84352+ if (err)
84353+ break;
84354+ }
84355 err = proc_put_long(&buffer, &left, val, false);
84356 if (err)
84357 break;
84358@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
84359 return -ENOSYS;
84360 }
84361
84362+int proc_dostring_modpriv(struct ctl_table *table, int write,
84363+ void __user *buffer, size_t *lenp, loff_t *ppos)
84364+{
84365+ return -ENOSYS;
84366+}
84367+
84368 int proc_dointvec(struct ctl_table *table, int write,
84369 void __user *buffer, size_t *lenp, loff_t *ppos)
84370 {
84371@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
84372 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
84373 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
84374 EXPORT_SYMBOL(proc_dostring);
84375+EXPORT_SYMBOL(proc_dostring_modpriv);
84376 EXPORT_SYMBOL(proc_doulongvec_minmax);
84377 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
84378diff --git a/kernel/taskstats.c b/kernel/taskstats.c
84379index 145bb4d..b2aa969 100644
84380--- a/kernel/taskstats.c
84381+++ b/kernel/taskstats.c
84382@@ -28,9 +28,12 @@
84383 #include <linux/fs.h>
84384 #include <linux/file.h>
84385 #include <linux/pid_namespace.h>
84386+#include <linux/grsecurity.h>
84387 #include <net/genetlink.h>
84388 #include <linux/atomic.h>
84389
84390+extern int gr_is_taskstats_denied(int pid);
84391+
84392 /*
84393 * Maximum length of a cpumask that can be specified in
84394 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
84395@@ -570,6 +573,9 @@ err:
84396
84397 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
84398 {
84399+ if (gr_is_taskstats_denied(current->pid))
84400+ return -EACCES;
84401+
84402 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
84403 return cmd_attr_register_cpumask(info);
84404 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
84405diff --git a/kernel/time.c b/kernel/time.c
84406index d3617db..c98bbe9 100644
84407--- a/kernel/time.c
84408+++ b/kernel/time.c
84409@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
84410 return error;
84411
84412 if (tz) {
84413+ /* we log in do_settimeofday called below, so don't log twice
84414+ */
84415+ if (!tv)
84416+ gr_log_timechange();
84417+
84418 sys_tz = *tz;
84419 update_vsyscall_tz();
84420 if (firsttime) {
84421@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
84422 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
84423 * value to a scaled second value.
84424 */
84425-unsigned long
84426+unsigned long __intentional_overflow(-1)
84427 timespec_to_jiffies(const struct timespec *value)
84428 {
84429 unsigned long sec = value->tv_sec;
84430diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
84431index f11d83b..d016d91 100644
84432--- a/kernel/time/alarmtimer.c
84433+++ b/kernel/time/alarmtimer.c
84434@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
84435 struct platform_device *pdev;
84436 int error = 0;
84437 int i;
84438- struct k_clock alarm_clock = {
84439+ static struct k_clock alarm_clock = {
84440 .clock_getres = alarm_clock_getres,
84441 .clock_get = alarm_clock_get,
84442 .timer_create = alarm_timer_create,
84443diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
84444index baeeb5c..c22704a 100644
84445--- a/kernel/time/timekeeping.c
84446+++ b/kernel/time/timekeeping.c
84447@@ -15,6 +15,7 @@
84448 #include <linux/init.h>
84449 #include <linux/mm.h>
84450 #include <linux/sched.h>
84451+#include <linux/grsecurity.h>
84452 #include <linux/syscore_ops.h>
84453 #include <linux/clocksource.h>
84454 #include <linux/jiffies.h>
84455@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
84456 if (!timespec_valid_strict(tv))
84457 return -EINVAL;
84458
84459+ gr_log_timechange();
84460+
84461 raw_spin_lock_irqsave(&timekeeper_lock, flags);
84462 write_seqcount_begin(&timekeeper_seq);
84463
84464diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
84465index 3bdf283..cc68d83 100644
84466--- a/kernel/time/timer_list.c
84467+++ b/kernel/time/timer_list.c
84468@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
84469
84470 static void print_name_offset(struct seq_file *m, void *sym)
84471 {
84472+#ifdef CONFIG_GRKERNSEC_HIDESYM
84473+ SEQ_printf(m, "<%p>", NULL);
84474+#else
84475 char symname[KSYM_NAME_LEN];
84476
84477 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
84478 SEQ_printf(m, "<%pK>", sym);
84479 else
84480 SEQ_printf(m, "%s", symname);
84481+#endif
84482 }
84483
84484 static void
84485@@ -119,7 +123,11 @@ next_one:
84486 static void
84487 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
84488 {
84489+#ifdef CONFIG_GRKERNSEC_HIDESYM
84490+ SEQ_printf(m, " .base: %p\n", NULL);
84491+#else
84492 SEQ_printf(m, " .base: %pK\n", base);
84493+#endif
84494 SEQ_printf(m, " .index: %d\n",
84495 base->index);
84496 SEQ_printf(m, " .resolution: %Lu nsecs\n",
84497@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
84498 {
84499 struct proc_dir_entry *pe;
84500
84501+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84502+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
84503+#else
84504 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
84505+#endif
84506 if (!pe)
84507 return -ENOMEM;
84508 return 0;
84509diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
84510index 0b537f2..40d6c20 100644
84511--- a/kernel/time/timer_stats.c
84512+++ b/kernel/time/timer_stats.c
84513@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
84514 static unsigned long nr_entries;
84515 static struct entry entries[MAX_ENTRIES];
84516
84517-static atomic_t overflow_count;
84518+static atomic_unchecked_t overflow_count;
84519
84520 /*
84521 * The entries are in a hash-table, for fast lookup:
84522@@ -140,7 +140,7 @@ static void reset_entries(void)
84523 nr_entries = 0;
84524 memset(entries, 0, sizeof(entries));
84525 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
84526- atomic_set(&overflow_count, 0);
84527+ atomic_set_unchecked(&overflow_count, 0);
84528 }
84529
84530 static struct entry *alloc_entry(void)
84531@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
84532 if (likely(entry))
84533 entry->count++;
84534 else
84535- atomic_inc(&overflow_count);
84536+ atomic_inc_unchecked(&overflow_count);
84537
84538 out_unlock:
84539 raw_spin_unlock_irqrestore(lock, flags);
84540@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
84541
84542 static void print_name_offset(struct seq_file *m, unsigned long addr)
84543 {
84544+#ifdef CONFIG_GRKERNSEC_HIDESYM
84545+ seq_printf(m, "<%p>", NULL);
84546+#else
84547 char symname[KSYM_NAME_LEN];
84548
84549 if (lookup_symbol_name(addr, symname) < 0)
84550- seq_printf(m, "<%p>", (void *)addr);
84551+ seq_printf(m, "<%pK>", (void *)addr);
84552 else
84553 seq_printf(m, "%s", symname);
84554+#endif
84555 }
84556
84557 static int tstats_show(struct seq_file *m, void *v)
84558@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
84559
84560 seq_puts(m, "Timer Stats Version: v0.2\n");
84561 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
84562- if (atomic_read(&overflow_count))
84563+ if (atomic_read_unchecked(&overflow_count))
84564 seq_printf(m, "Overflow: %d entries\n",
84565- atomic_read(&overflow_count));
84566+ atomic_read_unchecked(&overflow_count));
84567
84568 for (i = 0; i < nr_entries; i++) {
84569 entry = entries + i;
84570@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
84571 {
84572 struct proc_dir_entry *pe;
84573
84574+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84575+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
84576+#else
84577 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
84578+#endif
84579 if (!pe)
84580 return -ENOMEM;
84581 return 0;
84582diff --git a/kernel/timer.c b/kernel/timer.c
84583index 15bc1b4..32da49c 100644
84584--- a/kernel/timer.c
84585+++ b/kernel/timer.c
84586@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
84587 /*
84588 * This function runs timers and the timer-tq in bottom half context.
84589 */
84590-static void run_timer_softirq(struct softirq_action *h)
84591+static void run_timer_softirq(void)
84592 {
84593 struct tvec_base *base = __this_cpu_read(tvec_bases);
84594
84595@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
84596 *
84597 * In all cases the return value is guaranteed to be non-negative.
84598 */
84599-signed long __sched schedule_timeout(signed long timeout)
84600+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
84601 {
84602 struct timer_list timer;
84603 unsigned long expire;
84604@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
84605 return NOTIFY_OK;
84606 }
84607
84608-static struct notifier_block __cpuinitdata timers_nb = {
84609+static struct notifier_block timers_nb = {
84610 .notifier_call = timer_cpu_notify,
84611 };
84612
84613diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
84614index b8b8560..75b1a09 100644
84615--- a/kernel/trace/blktrace.c
84616+++ b/kernel/trace/blktrace.c
84617@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
84618 struct blk_trace *bt = filp->private_data;
84619 char buf[16];
84620
84621- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
84622+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
84623
84624 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
84625 }
84626@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
84627 return 1;
84628
84629 bt = buf->chan->private_data;
84630- atomic_inc(&bt->dropped);
84631+ atomic_inc_unchecked(&bt->dropped);
84632 return 0;
84633 }
84634
84635@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
84636
84637 bt->dir = dir;
84638 bt->dev = dev;
84639- atomic_set(&bt->dropped, 0);
84640+ atomic_set_unchecked(&bt->dropped, 0);
84641
84642 ret = -EIO;
84643 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
84644diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
84645index f23449d..b8cc3a1 100644
84646--- a/kernel/trace/ftrace.c
84647+++ b/kernel/trace/ftrace.c
84648@@ -1925,12 +1925,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
84649 if (unlikely(ftrace_disabled))
84650 return 0;
84651
84652+ ret = ftrace_arch_code_modify_prepare();
84653+ FTRACE_WARN_ON(ret);
84654+ if (ret)
84655+ return 0;
84656+
84657 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
84658+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
84659 if (ret) {
84660 ftrace_bug(ret, ip);
84661- return 0;
84662 }
84663- return 1;
84664+ return ret ? 0 : 1;
84665 }
84666
84667 /*
84668@@ -3994,8 +3999,10 @@ static int ftrace_process_locs(struct module *mod,
84669 if (!count)
84670 return 0;
84671
84672+ pax_open_kernel();
84673 sort(start, count, sizeof(*start),
84674 ftrace_cmp_ips, ftrace_swap_ips);
84675+ pax_close_kernel();
84676
84677 start_pg = ftrace_allocate_pages(count);
84678 if (!start_pg)
84679@@ -4718,8 +4725,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
84680 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
84681
84682 static int ftrace_graph_active;
84683-static struct notifier_block ftrace_suspend_notifier;
84684-
84685 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
84686 {
84687 return 0;
84688@@ -4863,6 +4868,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
84689 return NOTIFY_DONE;
84690 }
84691
84692+static struct notifier_block ftrace_suspend_notifier = {
84693+ .notifier_call = ftrace_suspend_notifier_call
84694+};
84695+
84696 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
84697 trace_func_graph_ent_t entryfunc)
84698 {
84699@@ -4876,7 +4885,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
84700 goto out;
84701 }
84702
84703- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
84704 register_pm_notifier(&ftrace_suspend_notifier);
84705
84706 ftrace_graph_active++;
84707diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
84708index e444ff8..438b8f4 100644
84709--- a/kernel/trace/ring_buffer.c
84710+++ b/kernel/trace/ring_buffer.c
84711@@ -352,9 +352,9 @@ struct buffer_data_page {
84712 */
84713 struct buffer_page {
84714 struct list_head list; /* list of buffer pages */
84715- local_t write; /* index for next write */
84716+ local_unchecked_t write; /* index for next write */
84717 unsigned read; /* index for next read */
84718- local_t entries; /* entries on this page */
84719+ local_unchecked_t entries; /* entries on this page */
84720 unsigned long real_end; /* real end of data */
84721 struct buffer_data_page *page; /* Actual data page */
84722 };
84723@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
84724 unsigned long last_overrun;
84725 local_t entries_bytes;
84726 local_t entries;
84727- local_t overrun;
84728- local_t commit_overrun;
84729+ local_unchecked_t overrun;
84730+ local_unchecked_t commit_overrun;
84731 local_t dropped_events;
84732 local_t committing;
84733 local_t commits;
84734@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84735 *
84736 * We add a counter to the write field to denote this.
84737 */
84738- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
84739- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
84740+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
84741+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
84742
84743 /*
84744 * Just make sure we have seen our old_write and synchronize
84745@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84746 * cmpxchg to only update if an interrupt did not already
84747 * do it for us. If the cmpxchg fails, we don't care.
84748 */
84749- (void)local_cmpxchg(&next_page->write, old_write, val);
84750- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
84751+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
84752+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
84753
84754 /*
84755 * No need to worry about races with clearing out the commit.
84756@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
84757
84758 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
84759 {
84760- return local_read(&bpage->entries) & RB_WRITE_MASK;
84761+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
84762 }
84763
84764 static inline unsigned long rb_page_write(struct buffer_page *bpage)
84765 {
84766- return local_read(&bpage->write) & RB_WRITE_MASK;
84767+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
84768 }
84769
84770 static int
84771@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
84772 * bytes consumed in ring buffer from here.
84773 * Increment overrun to account for the lost events.
84774 */
84775- local_add(page_entries, &cpu_buffer->overrun);
84776+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
84777 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
84778 }
84779
84780@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
84781 * it is our responsibility to update
84782 * the counters.
84783 */
84784- local_add(entries, &cpu_buffer->overrun);
84785+ local_add_unchecked(entries, &cpu_buffer->overrun);
84786 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
84787
84788 /*
84789@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84790 if (tail == BUF_PAGE_SIZE)
84791 tail_page->real_end = 0;
84792
84793- local_sub(length, &tail_page->write);
84794+ local_sub_unchecked(length, &tail_page->write);
84795 return;
84796 }
84797
84798@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84799 rb_event_set_padding(event);
84800
84801 /* Set the write back to the previous setting */
84802- local_sub(length, &tail_page->write);
84803+ local_sub_unchecked(length, &tail_page->write);
84804 return;
84805 }
84806
84807@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84808
84809 /* Set write to end of buffer */
84810 length = (tail + length) - BUF_PAGE_SIZE;
84811- local_sub(length, &tail_page->write);
84812+ local_sub_unchecked(length, &tail_page->write);
84813 }
84814
84815 /*
84816@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
84817 * about it.
84818 */
84819 if (unlikely(next_page == commit_page)) {
84820- local_inc(&cpu_buffer->commit_overrun);
84821+ local_inc_unchecked(&cpu_buffer->commit_overrun);
84822 goto out_reset;
84823 }
84824
84825@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
84826 cpu_buffer->tail_page) &&
84827 (cpu_buffer->commit_page ==
84828 cpu_buffer->reader_page))) {
84829- local_inc(&cpu_buffer->commit_overrun);
84830+ local_inc_unchecked(&cpu_buffer->commit_overrun);
84831 goto out_reset;
84832 }
84833 }
84834@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
84835 length += RB_LEN_TIME_EXTEND;
84836
84837 tail_page = cpu_buffer->tail_page;
84838- write = local_add_return(length, &tail_page->write);
84839+ write = local_add_return_unchecked(length, &tail_page->write);
84840
84841 /* set write to only the index of the write */
84842 write &= RB_WRITE_MASK;
84843@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
84844 kmemcheck_annotate_bitfield(event, bitfield);
84845 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
84846
84847- local_inc(&tail_page->entries);
84848+ local_inc_unchecked(&tail_page->entries);
84849
84850 /*
84851 * If this is the first commit on the page, then update
84852@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
84853
84854 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
84855 unsigned long write_mask =
84856- local_read(&bpage->write) & ~RB_WRITE_MASK;
84857+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
84858 unsigned long event_length = rb_event_length(event);
84859 /*
84860 * This is on the tail page. It is possible that
84861@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
84862 */
84863 old_index += write_mask;
84864 new_index += write_mask;
84865- index = local_cmpxchg(&bpage->write, old_index, new_index);
84866+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
84867 if (index == old_index) {
84868 /* update counters */
84869 local_sub(event_length, &cpu_buffer->entries_bytes);
84870@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
84871
84872 /* Do the likely case first */
84873 if (likely(bpage->page == (void *)addr)) {
84874- local_dec(&bpage->entries);
84875+ local_dec_unchecked(&bpage->entries);
84876 return;
84877 }
84878
84879@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
84880 start = bpage;
84881 do {
84882 if (bpage->page == (void *)addr) {
84883- local_dec(&bpage->entries);
84884+ local_dec_unchecked(&bpage->entries);
84885 return;
84886 }
84887 rb_inc_page(cpu_buffer, &bpage);
84888@@ -3138,7 +3138,7 @@ static inline unsigned long
84889 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
84890 {
84891 return local_read(&cpu_buffer->entries) -
84892- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
84893+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
84894 }
84895
84896 /**
84897@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
84898 return 0;
84899
84900 cpu_buffer = buffer->buffers[cpu];
84901- ret = local_read(&cpu_buffer->overrun);
84902+ ret = local_read_unchecked(&cpu_buffer->overrun);
84903
84904 return ret;
84905 }
84906@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
84907 return 0;
84908
84909 cpu_buffer = buffer->buffers[cpu];
84910- ret = local_read(&cpu_buffer->commit_overrun);
84911+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
84912
84913 return ret;
84914 }
84915@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
84916 /* if you care about this being correct, lock the buffer */
84917 for_each_buffer_cpu(buffer, cpu) {
84918 cpu_buffer = buffer->buffers[cpu];
84919- overruns += local_read(&cpu_buffer->overrun);
84920+ overruns += local_read_unchecked(&cpu_buffer->overrun);
84921 }
84922
84923 return overruns;
84924@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
84925 /*
84926 * Reset the reader page to size zero.
84927 */
84928- local_set(&cpu_buffer->reader_page->write, 0);
84929- local_set(&cpu_buffer->reader_page->entries, 0);
84930+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
84931+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
84932 local_set(&cpu_buffer->reader_page->page->commit, 0);
84933 cpu_buffer->reader_page->real_end = 0;
84934
84935@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
84936 * want to compare with the last_overrun.
84937 */
84938 smp_mb();
84939- overwrite = local_read(&(cpu_buffer->overrun));
84940+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
84941
84942 /*
84943 * Here's the tricky part.
84944@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
84945
84946 cpu_buffer->head_page
84947 = list_entry(cpu_buffer->pages, struct buffer_page, list);
84948- local_set(&cpu_buffer->head_page->write, 0);
84949- local_set(&cpu_buffer->head_page->entries, 0);
84950+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
84951+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
84952 local_set(&cpu_buffer->head_page->page->commit, 0);
84953
84954 cpu_buffer->head_page->read = 0;
84955@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
84956
84957 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
84958 INIT_LIST_HEAD(&cpu_buffer->new_pages);
84959- local_set(&cpu_buffer->reader_page->write, 0);
84960- local_set(&cpu_buffer->reader_page->entries, 0);
84961+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
84962+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
84963 local_set(&cpu_buffer->reader_page->page->commit, 0);
84964 cpu_buffer->reader_page->read = 0;
84965
84966 local_set(&cpu_buffer->entries_bytes, 0);
84967- local_set(&cpu_buffer->overrun, 0);
84968- local_set(&cpu_buffer->commit_overrun, 0);
84969+ local_set_unchecked(&cpu_buffer->overrun, 0);
84970+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
84971 local_set(&cpu_buffer->dropped_events, 0);
84972 local_set(&cpu_buffer->entries, 0);
84973 local_set(&cpu_buffer->committing, 0);
84974@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
84975 rb_init_page(bpage);
84976 bpage = reader->page;
84977 reader->page = *data_page;
84978- local_set(&reader->write, 0);
84979- local_set(&reader->entries, 0);
84980+ local_set_unchecked(&reader->write, 0);
84981+ local_set_unchecked(&reader->entries, 0);
84982 reader->read = 0;
84983 *data_page = bpage;
84984
84985diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
84986index 0582a01..310bed1 100644
84987--- a/kernel/trace/trace.c
84988+++ b/kernel/trace/trace.c
84989@@ -3327,7 +3327,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
84990 return 0;
84991 }
84992
84993-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
84994+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
84995 {
84996 /* do nothing if flag is already set */
84997 if (!!(trace_flags & mask) == !!enabled)
84998diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
84999index 51b4448..7be601f 100644
85000--- a/kernel/trace/trace.h
85001+++ b/kernel/trace/trace.h
85002@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
85003 void trace_printk_init_buffers(void);
85004 void trace_printk_start_comm(void);
85005 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
85006-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
85007+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
85008
85009 /*
85010 * Normal trace_printk() and friends allocates special buffers
85011diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
85012index 3d18aad..d1be0eb 100644
85013--- a/kernel/trace/trace_events.c
85014+++ b/kernel/trace/trace_events.c
85015@@ -1794,10 +1794,6 @@ static LIST_HEAD(ftrace_module_file_list);
85016 struct ftrace_module_file_ops {
85017 struct list_head list;
85018 struct module *mod;
85019- struct file_operations id;
85020- struct file_operations enable;
85021- struct file_operations format;
85022- struct file_operations filter;
85023 };
85024
85025 static struct ftrace_module_file_ops *
85026@@ -1838,17 +1834,12 @@ trace_create_file_ops(struct module *mod)
85027
85028 file_ops->mod = mod;
85029
85030- file_ops->id = ftrace_event_id_fops;
85031- file_ops->id.owner = mod;
85032-
85033- file_ops->enable = ftrace_enable_fops;
85034- file_ops->enable.owner = mod;
85035-
85036- file_ops->filter = ftrace_event_filter_fops;
85037- file_ops->filter.owner = mod;
85038-
85039- file_ops->format = ftrace_event_format_fops;
85040- file_ops->format.owner = mod;
85041+ pax_open_kernel();
85042+ mod->trace_id.owner = mod;
85043+ mod->trace_enable.owner = mod;
85044+ mod->trace_filter.owner = mod;
85045+ mod->trace_format.owner = mod;
85046+ pax_close_kernel();
85047
85048 list_add(&file_ops->list, &ftrace_module_file_list);
85049
85050@@ -1941,8 +1932,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
85051 struct ftrace_module_file_ops *file_ops)
85052 {
85053 return __trace_add_new_event(call, tr,
85054- &file_ops->id, &file_ops->enable,
85055- &file_ops->filter, &file_ops->format);
85056+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
85057+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
85058 }
85059
85060 #else
85061diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
85062index a5e8f48..a9690d2 100644
85063--- a/kernel/trace/trace_mmiotrace.c
85064+++ b/kernel/trace/trace_mmiotrace.c
85065@@ -24,7 +24,7 @@ struct header_iter {
85066 static struct trace_array *mmio_trace_array;
85067 static bool overrun_detected;
85068 static unsigned long prev_overruns;
85069-static atomic_t dropped_count;
85070+static atomic_unchecked_t dropped_count;
85071
85072 static void mmio_reset_data(struct trace_array *tr)
85073 {
85074@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
85075
85076 static unsigned long count_overruns(struct trace_iterator *iter)
85077 {
85078- unsigned long cnt = atomic_xchg(&dropped_count, 0);
85079+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
85080 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
85081
85082 if (over > prev_overruns)
85083@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
85084 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
85085 sizeof(*entry), 0, pc);
85086 if (!event) {
85087- atomic_inc(&dropped_count);
85088+ atomic_inc_unchecked(&dropped_count);
85089 return;
85090 }
85091 entry = ring_buffer_event_data(event);
85092@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
85093 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
85094 sizeof(*entry), 0, pc);
85095 if (!event) {
85096- atomic_inc(&dropped_count);
85097+ atomic_inc_unchecked(&dropped_count);
85098 return;
85099 }
85100 entry = ring_buffer_event_data(event);
85101diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
85102index bb922d9..2a54a257 100644
85103--- a/kernel/trace/trace_output.c
85104+++ b/kernel/trace/trace_output.c
85105@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
85106
85107 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
85108 if (!IS_ERR(p)) {
85109- p = mangle_path(s->buffer + s->len, p, "\n");
85110+ p = mangle_path(s->buffer + s->len, p, "\n\\");
85111 if (p) {
85112 s->len = p - s->buffer;
85113 return 1;
85114@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
85115 goto out;
85116 }
85117
85118+ pax_open_kernel();
85119 if (event->funcs->trace == NULL)
85120- event->funcs->trace = trace_nop_print;
85121+ *(void **)&event->funcs->trace = trace_nop_print;
85122 if (event->funcs->raw == NULL)
85123- event->funcs->raw = trace_nop_print;
85124+ *(void **)&event->funcs->raw = trace_nop_print;
85125 if (event->funcs->hex == NULL)
85126- event->funcs->hex = trace_nop_print;
85127+ *(void **)&event->funcs->hex = trace_nop_print;
85128 if (event->funcs->binary == NULL)
85129- event->funcs->binary = trace_nop_print;
85130+ *(void **)&event->funcs->binary = trace_nop_print;
85131+ pax_close_kernel();
85132
85133 key = event->type & (EVENT_HASHSIZE - 1);
85134
85135diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
85136index b20428c..4845a10 100644
85137--- a/kernel/trace/trace_stack.c
85138+++ b/kernel/trace/trace_stack.c
85139@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
85140 return;
85141
85142 /* we do not handle interrupt stacks yet */
85143- if (!object_is_on_stack(stack))
85144+ if (!object_starts_on_stack(stack))
85145 return;
85146
85147 local_irq_save(flags);
85148diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
85149index 9064b91..1f5d2f8 100644
85150--- a/kernel/user_namespace.c
85151+++ b/kernel/user_namespace.c
85152@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
85153 !kgid_has_mapping(parent_ns, group))
85154 return -EPERM;
85155
85156+#ifdef CONFIG_GRKERNSEC
85157+ /*
85158+ * This doesn't really inspire confidence:
85159+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
85160+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
85161+ * Increases kernel attack surface in areas developers
85162+ * previously cared little about ("low importance due
85163+ * to requiring "root" capability")
85164+ * To be removed when this code receives *proper* review
85165+ */
85166+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
85167+ !capable(CAP_SETGID))
85168+ return -EPERM;
85169+#endif
85170+
85171 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
85172 if (!ns)
85173 return -ENOMEM;
85174@@ -862,7 +877,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
85175 if (atomic_read(&current->mm->mm_users) > 1)
85176 return -EINVAL;
85177
85178- if (current->fs->users != 1)
85179+ if (atomic_read(&current->fs->users) != 1)
85180 return -EINVAL;
85181
85182 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
85183diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
85184index 4f69f9a..7c6f8f8 100644
85185--- a/kernel/utsname_sysctl.c
85186+++ b/kernel/utsname_sysctl.c
85187@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
85188 static int proc_do_uts_string(ctl_table *table, int write,
85189 void __user *buffer, size_t *lenp, loff_t *ppos)
85190 {
85191- struct ctl_table uts_table;
85192+ ctl_table_no_const uts_table;
85193 int r;
85194 memcpy(&uts_table, table, sizeof(uts_table));
85195 uts_table.data = get_uts(table, write);
85196diff --git a/kernel/watchdog.c b/kernel/watchdog.c
85197index 05039e3..17490c7 100644
85198--- a/kernel/watchdog.c
85199+++ b/kernel/watchdog.c
85200@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
85201 }
85202 #endif /* CONFIG_SYSCTL */
85203
85204-static struct smp_hotplug_thread watchdog_threads = {
85205+static struct smp_hotplug_thread watchdog_threads __read_only = {
85206 .store = &softlockup_watchdog,
85207 .thread_should_run = watchdog_should_run,
85208 .thread_fn = watchdog,
85209diff --git a/kernel/workqueue.c b/kernel/workqueue.c
85210index 6f01921..139869b 100644
85211--- a/kernel/workqueue.c
85212+++ b/kernel/workqueue.c
85213@@ -4596,7 +4596,7 @@ static void rebind_workers(struct worker_pool *pool)
85214 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
85215 worker_flags |= WORKER_REBOUND;
85216 worker_flags &= ~WORKER_UNBOUND;
85217- ACCESS_ONCE(worker->flags) = worker_flags;
85218+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
85219 }
85220
85221 spin_unlock_irq(&pool->lock);
85222diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
85223index 74fdc5c..3310593 100644
85224--- a/lib/Kconfig.debug
85225+++ b/lib/Kconfig.debug
85226@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
85227
85228 config DEBUG_LOCK_ALLOC
85229 bool "Lock debugging: detect incorrect freeing of live locks"
85230- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85231+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85232 select DEBUG_SPINLOCK
85233 select DEBUG_MUTEXES
85234 select LOCKDEP
85235@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
85236
85237 config PROVE_LOCKING
85238 bool "Lock debugging: prove locking correctness"
85239- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85240+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85241 select LOCKDEP
85242 select DEBUG_SPINLOCK
85243 select DEBUG_MUTEXES
85244@@ -614,7 +614,7 @@ config LOCKDEP
85245
85246 config LOCK_STAT
85247 bool "Lock usage statistics"
85248- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85249+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85250 select LOCKDEP
85251 select DEBUG_SPINLOCK
85252 select DEBUG_MUTEXES
85253@@ -1282,6 +1282,7 @@ config LATENCYTOP
85254 depends on DEBUG_KERNEL
85255 depends on STACKTRACE_SUPPORT
85256 depends on PROC_FS
85257+ depends on !GRKERNSEC_HIDESYM
85258 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
85259 select KALLSYMS
85260 select KALLSYMS_ALL
85261@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
85262 config DEBUG_STRICT_USER_COPY_CHECKS
85263 bool "Strict user copy size checks"
85264 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
85265- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
85266+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
85267 help
85268 Enabling this option turns a certain set of sanity checks for user
85269 copy operations into compile time failures.
85270@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
85271
85272 config PROVIDE_OHCI1394_DMA_INIT
85273 bool "Remote debugging over FireWire early on boot"
85274- depends on PCI && X86
85275+ depends on PCI && X86 && !GRKERNSEC
85276 help
85277 If you want to debug problems which hang or crash the kernel early
85278 on boot and the crashing machine has a FireWire port, you can use
85279@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
85280
85281 config FIREWIRE_OHCI_REMOTE_DMA
85282 bool "Remote debugging over FireWire with firewire-ohci"
85283- depends on FIREWIRE_OHCI
85284+ depends on FIREWIRE_OHCI && !GRKERNSEC
85285 help
85286 This option lets you use the FireWire bus for remote debugging
85287 with help of the firewire-ohci driver. It enables unfiltered
85288diff --git a/lib/Makefile b/lib/Makefile
85289index c55a037..fb46e3b 100644
85290--- a/lib/Makefile
85291+++ b/lib/Makefile
85292@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
85293
85294 obj-$(CONFIG_BTREE) += btree.o
85295 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
85296-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
85297+obj-y += list_debug.o
85298 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
85299
85300 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
85301diff --git a/lib/bitmap.c b/lib/bitmap.c
85302index 06f7e4f..f3cf2b0 100644
85303--- a/lib/bitmap.c
85304+++ b/lib/bitmap.c
85305@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
85306 {
85307 int c, old_c, totaldigits, ndigits, nchunks, nbits;
85308 u32 chunk;
85309- const char __user __force *ubuf = (const char __user __force *)buf;
85310+ const char __user *ubuf = (const char __force_user *)buf;
85311
85312 bitmap_zero(maskp, nmaskbits);
85313
85314@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
85315 {
85316 if (!access_ok(VERIFY_READ, ubuf, ulen))
85317 return -EFAULT;
85318- return __bitmap_parse((const char __force *)ubuf,
85319+ return __bitmap_parse((const char __force_kernel *)ubuf,
85320 ulen, 1, maskp, nmaskbits);
85321
85322 }
85323@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
85324 {
85325 unsigned a, b;
85326 int c, old_c, totaldigits;
85327- const char __user __force *ubuf = (const char __user __force *)buf;
85328+ const char __user *ubuf = (const char __force_user *)buf;
85329 int exp_digit, in_range;
85330
85331 totaldigits = c = 0;
85332@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
85333 {
85334 if (!access_ok(VERIFY_READ, ubuf, ulen))
85335 return -EFAULT;
85336- return __bitmap_parselist((const char __force *)ubuf,
85337+ return __bitmap_parselist((const char __force_kernel *)ubuf,
85338 ulen, 1, maskp, nmaskbits);
85339 }
85340 EXPORT_SYMBOL(bitmap_parselist_user);
85341diff --git a/lib/bug.c b/lib/bug.c
85342index 1686034..a9c00c8 100644
85343--- a/lib/bug.c
85344+++ b/lib/bug.c
85345@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
85346 return BUG_TRAP_TYPE_NONE;
85347
85348 bug = find_bug(bugaddr);
85349+ if (!bug)
85350+ return BUG_TRAP_TYPE_NONE;
85351
85352 file = NULL;
85353 line = 0;
85354diff --git a/lib/debugobjects.c b/lib/debugobjects.c
85355index 37061ed..da83f48 100644
85356--- a/lib/debugobjects.c
85357+++ b/lib/debugobjects.c
85358@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
85359 if (limit > 4)
85360 return;
85361
85362- is_on_stack = object_is_on_stack(addr);
85363+ is_on_stack = object_starts_on_stack(addr);
85364 if (is_on_stack == onstack)
85365 return;
85366
85367diff --git a/lib/devres.c b/lib/devres.c
85368index 8235331..5881053 100644
85369--- a/lib/devres.c
85370+++ b/lib/devres.c
85371@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
85372 void devm_iounmap(struct device *dev, void __iomem *addr)
85373 {
85374 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
85375- (void *)addr));
85376+ (void __force *)addr));
85377 iounmap(addr);
85378 }
85379 EXPORT_SYMBOL(devm_iounmap);
85380@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
85381 {
85382 ioport_unmap(addr);
85383 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
85384- devm_ioport_map_match, (void *)addr));
85385+ devm_ioport_map_match, (void __force *)addr));
85386 }
85387 EXPORT_SYMBOL(devm_ioport_unmap);
85388 #endif /* CONFIG_HAS_IOPORT */
85389diff --git a/lib/div64.c b/lib/div64.c
85390index a163b6c..9618fa5 100644
85391--- a/lib/div64.c
85392+++ b/lib/div64.c
85393@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
85394 EXPORT_SYMBOL(__div64_32);
85395
85396 #ifndef div_s64_rem
85397-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
85398+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
85399 {
85400 u64 quotient;
85401
85402@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
85403 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
85404 */
85405 #ifndef div64_u64
85406-u64 div64_u64(u64 dividend, u64 divisor)
85407+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
85408 {
85409 u32 high = divisor >> 32;
85410 u64 quot;
85411diff --git a/lib/dma-debug.c b/lib/dma-debug.c
85412index d87a17a..ac0d79a 100644
85413--- a/lib/dma-debug.c
85414+++ b/lib/dma-debug.c
85415@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
85416
85417 void dma_debug_add_bus(struct bus_type *bus)
85418 {
85419- struct notifier_block *nb;
85420+ notifier_block_no_const *nb;
85421
85422 if (global_disable)
85423 return;
85424@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
85425
85426 static void check_for_stack(struct device *dev, void *addr)
85427 {
85428- if (object_is_on_stack(addr))
85429+ if (object_starts_on_stack(addr))
85430 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
85431 "stack [addr=%p]\n", addr);
85432 }
85433diff --git a/lib/inflate.c b/lib/inflate.c
85434index 013a761..c28f3fc 100644
85435--- a/lib/inflate.c
85436+++ b/lib/inflate.c
85437@@ -269,7 +269,7 @@ static void free(void *where)
85438 malloc_ptr = free_mem_ptr;
85439 }
85440 #else
85441-#define malloc(a) kmalloc(a, GFP_KERNEL)
85442+#define malloc(a) kmalloc((a), GFP_KERNEL)
85443 #define free(a) kfree(a)
85444 #endif
85445
85446diff --git a/lib/ioremap.c b/lib/ioremap.c
85447index 0c9216c..863bd89 100644
85448--- a/lib/ioremap.c
85449+++ b/lib/ioremap.c
85450@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
85451 unsigned long next;
85452
85453 phys_addr -= addr;
85454- pmd = pmd_alloc(&init_mm, pud, addr);
85455+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85456 if (!pmd)
85457 return -ENOMEM;
85458 do {
85459@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
85460 unsigned long next;
85461
85462 phys_addr -= addr;
85463- pud = pud_alloc(&init_mm, pgd, addr);
85464+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85465 if (!pud)
85466 return -ENOMEM;
85467 do {
85468diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
85469index bd2bea9..6b3c95e 100644
85470--- a/lib/is_single_threaded.c
85471+++ b/lib/is_single_threaded.c
85472@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
85473 struct task_struct *p, *t;
85474 bool ret;
85475
85476+ if (!mm)
85477+ return true;
85478+
85479 if (atomic_read(&task->signal->live) != 1)
85480 return false;
85481
85482diff --git a/lib/kobject.c b/lib/kobject.c
85483index b7e29a6..2f3ca75 100644
85484--- a/lib/kobject.c
85485+++ b/lib/kobject.c
85486@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
85487 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
85488 if (!kset)
85489 return NULL;
85490- retval = kobject_set_name(&kset->kobj, name);
85491+ retval = kobject_set_name(&kset->kobj, "%s", name);
85492 if (retval) {
85493 kfree(kset);
85494 return NULL;
85495@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
85496
85497
85498 static DEFINE_SPINLOCK(kobj_ns_type_lock);
85499-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
85500+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
85501
85502-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
85503+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
85504 {
85505 enum kobj_ns_type type = ops->type;
85506 int error;
85507diff --git a/lib/list_debug.c b/lib/list_debug.c
85508index c24c2f7..06e070b 100644
85509--- a/lib/list_debug.c
85510+++ b/lib/list_debug.c
85511@@ -11,7 +11,9 @@
85512 #include <linux/bug.h>
85513 #include <linux/kernel.h>
85514 #include <linux/rculist.h>
85515+#include <linux/mm.h>
85516
85517+#ifdef CONFIG_DEBUG_LIST
85518 /*
85519 * Insert a new entry between two known consecutive entries.
85520 *
85521@@ -19,21 +21,32 @@
85522 * the prev/next entries already!
85523 */
85524
85525-void __list_add(struct list_head *new,
85526- struct list_head *prev,
85527- struct list_head *next)
85528+static bool __list_add_debug(struct list_head *new,
85529+ struct list_head *prev,
85530+ struct list_head *next)
85531 {
85532- WARN(next->prev != prev,
85533+ if (WARN(next->prev != prev,
85534 "list_add corruption. next->prev should be "
85535 "prev (%p), but was %p. (next=%p).\n",
85536- prev, next->prev, next);
85537- WARN(prev->next != next,
85538+ prev, next->prev, next) ||
85539+ WARN(prev->next != next,
85540 "list_add corruption. prev->next should be "
85541 "next (%p), but was %p. (prev=%p).\n",
85542- next, prev->next, prev);
85543- WARN(new == prev || new == next,
85544- "list_add double add: new=%p, prev=%p, next=%p.\n",
85545- new, prev, next);
85546+ next, prev->next, prev) ||
85547+ WARN(new == prev || new == next,
85548+ "list_add double add: new=%p, prev=%p, next=%p.\n",
85549+ new, prev, next))
85550+ return false;
85551+ return true;
85552+}
85553+
85554+void __list_add(struct list_head *new,
85555+ struct list_head *prev,
85556+ struct list_head *next)
85557+{
85558+ if (!__list_add_debug(new, prev, next))
85559+ return;
85560+
85561 next->prev = new;
85562 new->next = next;
85563 new->prev = prev;
85564@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
85565 }
85566 EXPORT_SYMBOL(__list_add);
85567
85568-void __list_del_entry(struct list_head *entry)
85569+static bool __list_del_entry_debug(struct list_head *entry)
85570 {
85571 struct list_head *prev, *next;
85572
85573@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
85574 WARN(next->prev != entry,
85575 "list_del corruption. next->prev should be %p, "
85576 "but was %p\n", entry, next->prev))
85577+ return false;
85578+ return true;
85579+}
85580+
85581+void __list_del_entry(struct list_head *entry)
85582+{
85583+ if (!__list_del_entry_debug(entry))
85584 return;
85585
85586- __list_del(prev, next);
85587+ __list_del(entry->prev, entry->next);
85588 }
85589 EXPORT_SYMBOL(__list_del_entry);
85590
85591@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
85592 void __list_add_rcu(struct list_head *new,
85593 struct list_head *prev, struct list_head *next)
85594 {
85595- WARN(next->prev != prev,
85596- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
85597- prev, next->prev, next);
85598- WARN(prev->next != next,
85599- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
85600- next, prev->next, prev);
85601+ if (!__list_add_debug(new, prev, next))
85602+ return;
85603+
85604 new->next = next;
85605 new->prev = prev;
85606 rcu_assign_pointer(list_next_rcu(prev), new);
85607 next->prev = new;
85608 }
85609 EXPORT_SYMBOL(__list_add_rcu);
85610+#endif
85611+
85612+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
85613+{
85614+#ifdef CONFIG_DEBUG_LIST
85615+ if (!__list_add_debug(new, prev, next))
85616+ return;
85617+#endif
85618+
85619+ pax_open_kernel();
85620+ next->prev = new;
85621+ new->next = next;
85622+ new->prev = prev;
85623+ prev->next = new;
85624+ pax_close_kernel();
85625+}
85626+EXPORT_SYMBOL(__pax_list_add);
85627+
85628+void pax_list_del(struct list_head *entry)
85629+{
85630+#ifdef CONFIG_DEBUG_LIST
85631+ if (!__list_del_entry_debug(entry))
85632+ return;
85633+#endif
85634+
85635+ pax_open_kernel();
85636+ __list_del(entry->prev, entry->next);
85637+ entry->next = LIST_POISON1;
85638+ entry->prev = LIST_POISON2;
85639+ pax_close_kernel();
85640+}
85641+EXPORT_SYMBOL(pax_list_del);
85642+
85643+void pax_list_del_init(struct list_head *entry)
85644+{
85645+ pax_open_kernel();
85646+ __list_del(entry->prev, entry->next);
85647+ INIT_LIST_HEAD(entry);
85648+ pax_close_kernel();
85649+}
85650+EXPORT_SYMBOL(pax_list_del_init);
85651+
85652+void __pax_list_add_rcu(struct list_head *new,
85653+ struct list_head *prev, struct list_head *next)
85654+{
85655+#ifdef CONFIG_DEBUG_LIST
85656+ if (!__list_add_debug(new, prev, next))
85657+ return;
85658+#endif
85659+
85660+ pax_open_kernel();
85661+ new->next = next;
85662+ new->prev = prev;
85663+ rcu_assign_pointer(list_next_rcu(prev), new);
85664+ next->prev = new;
85665+ pax_close_kernel();
85666+}
85667+EXPORT_SYMBOL(__pax_list_add_rcu);
85668+
85669+void pax_list_del_rcu(struct list_head *entry)
85670+{
85671+#ifdef CONFIG_DEBUG_LIST
85672+ if (!__list_del_entry_debug(entry))
85673+ return;
85674+#endif
85675+
85676+ pax_open_kernel();
85677+ __list_del(entry->prev, entry->next);
85678+ entry->next = LIST_POISON1;
85679+ entry->prev = LIST_POISON2;
85680+ pax_close_kernel();
85681+}
85682+EXPORT_SYMBOL(pax_list_del_rcu);
85683diff --git a/lib/radix-tree.c b/lib/radix-tree.c
85684index e796429..6e38f9f 100644
85685--- a/lib/radix-tree.c
85686+++ b/lib/radix-tree.c
85687@@ -92,7 +92,7 @@ struct radix_tree_preload {
85688 int nr;
85689 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
85690 };
85691-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
85692+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
85693
85694 static inline void *ptr_to_indirect(void *ptr)
85695 {
85696diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
85697index bb2b201..46abaf9 100644
85698--- a/lib/strncpy_from_user.c
85699+++ b/lib/strncpy_from_user.c
85700@@ -21,7 +21,7 @@
85701 */
85702 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
85703 {
85704- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85705+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85706 long res = 0;
85707
85708 /*
85709diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
85710index a28df52..3d55877 100644
85711--- a/lib/strnlen_user.c
85712+++ b/lib/strnlen_user.c
85713@@ -26,7 +26,7 @@
85714 */
85715 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
85716 {
85717- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85718+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85719 long align, res = 0;
85720 unsigned long c;
85721
85722diff --git a/lib/swiotlb.c b/lib/swiotlb.c
85723index d23762e..e21eab2 100644
85724--- a/lib/swiotlb.c
85725+++ b/lib/swiotlb.c
85726@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
85727
85728 void
85729 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
85730- dma_addr_t dev_addr)
85731+ dma_addr_t dev_addr, struct dma_attrs *attrs)
85732 {
85733 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
85734
85735diff --git a/lib/usercopy.c b/lib/usercopy.c
85736index 4f5b1dd..7cab418 100644
85737--- a/lib/usercopy.c
85738+++ b/lib/usercopy.c
85739@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
85740 WARN(1, "Buffer overflow detected!\n");
85741 }
85742 EXPORT_SYMBOL(copy_from_user_overflow);
85743+
85744+void copy_to_user_overflow(void)
85745+{
85746+ WARN(1, "Buffer overflow detected!\n");
85747+}
85748+EXPORT_SYMBOL(copy_to_user_overflow);
85749diff --git a/lib/vsprintf.c b/lib/vsprintf.c
85750index e149c64..24aa71a 100644
85751--- a/lib/vsprintf.c
85752+++ b/lib/vsprintf.c
85753@@ -16,6 +16,9 @@
85754 * - scnprintf and vscnprintf
85755 */
85756
85757+#ifdef CONFIG_GRKERNSEC_HIDESYM
85758+#define __INCLUDED_BY_HIDESYM 1
85759+#endif
85760 #include <stdarg.h>
85761 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
85762 #include <linux/types.h>
85763@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
85764 return number(buf, end, *(const netdev_features_t *)addr, spec);
85765 }
85766
85767+#ifdef CONFIG_GRKERNSEC_HIDESYM
85768+int kptr_restrict __read_mostly = 2;
85769+#else
85770 int kptr_restrict __read_mostly;
85771+#endif
85772
85773 /*
85774 * Show a '%p' thing. A kernel extension is that the '%p' is followed
85775@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
85776 * - 'f' For simple symbolic function names without offset
85777 * - 'S' For symbolic direct pointers with offset
85778 * - 's' For symbolic direct pointers without offset
85779+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
85780 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
85781 * - 'B' For backtraced symbolic direct pointers with offset
85782 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
85783@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85784
85785 if (!ptr && *fmt != 'K') {
85786 /*
85787- * Print (null) with the same width as a pointer so it makes
85788+ * Print (nil) with the same width as a pointer so it makes
85789 * tabular output look nice.
85790 */
85791 if (spec.field_width == -1)
85792 spec.field_width = default_width;
85793- return string(buf, end, "(null)", spec);
85794+ return string(buf, end, "(nil)", spec);
85795 }
85796
85797 switch (*fmt) {
85798@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85799 /* Fallthrough */
85800 case 'S':
85801 case 's':
85802+#ifdef CONFIG_GRKERNSEC_HIDESYM
85803+ break;
85804+#else
85805+ return symbol_string(buf, end, ptr, spec, fmt);
85806+#endif
85807+ case 'A':
85808 case 'B':
85809 return symbol_string(buf, end, ptr, spec, fmt);
85810 case 'R':
85811@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85812 va_end(va);
85813 return buf;
85814 }
85815+ case 'P':
85816+ break;
85817 case 'K':
85818 /*
85819 * %pK cannot be used in IRQ context because its test
85820@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85821 return number(buf, end,
85822 (unsigned long long) *((phys_addr_t *)ptr), spec);
85823 }
85824+
85825+#ifdef CONFIG_GRKERNSEC_HIDESYM
85826+ /* 'P' = approved pointers to copy to userland,
85827+ as in the /proc/kallsyms case, as we make it display nothing
85828+ for non-root users, and the real contents for root users
85829+ Also ignore 'K' pointers, since we force their NULLing for non-root users
85830+ above
85831+ */
85832+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
85833+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
85834+ dump_stack();
85835+ ptr = NULL;
85836+ }
85837+#endif
85838+
85839 spec.flags |= SMALL;
85840 if (spec.field_width == -1) {
85841 spec.field_width = default_width;
85842@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
85843 typeof(type) value; \
85844 if (sizeof(type) == 8) { \
85845 args = PTR_ALIGN(args, sizeof(u32)); \
85846- *(u32 *)&value = *(u32 *)args; \
85847- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
85848+ *(u32 *)&value = *(const u32 *)args; \
85849+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
85850 } else { \
85851 args = PTR_ALIGN(args, sizeof(type)); \
85852- value = *(typeof(type) *)args; \
85853+ value = *(const typeof(type) *)args; \
85854 } \
85855 args += sizeof(type); \
85856 value; \
85857@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
85858 case FORMAT_TYPE_STR: {
85859 const char *str_arg = args;
85860 args += strlen(str_arg) + 1;
85861- str = string(str, end, (char *)str_arg, spec);
85862+ str = string(str, end, str_arg, spec);
85863 break;
85864 }
85865
85866diff --git a/localversion-grsec b/localversion-grsec
85867new file mode 100644
85868index 0000000..7cd6065
85869--- /dev/null
85870+++ b/localversion-grsec
85871@@ -0,0 +1 @@
85872+-grsec
85873diff --git a/mm/Kconfig b/mm/Kconfig
85874index e742d06..c56fdd8 100644
85875--- a/mm/Kconfig
85876+++ b/mm/Kconfig
85877@@ -317,10 +317,10 @@ config KSM
85878 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
85879
85880 config DEFAULT_MMAP_MIN_ADDR
85881- int "Low address space to protect from user allocation"
85882+ int "Low address space to protect from user allocation"
85883 depends on MMU
85884- default 4096
85885- help
85886+ default 65536
85887+ help
85888 This is the portion of low virtual memory which should be protected
85889 from userspace allocation. Keeping a user from writing to low pages
85890 can help reduce the impact of kernel NULL pointer bugs.
85891@@ -351,7 +351,7 @@ config MEMORY_FAILURE
85892
85893 config HWPOISON_INJECT
85894 tristate "HWPoison pages injector"
85895- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
85896+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
85897 select PROC_PAGE_MONITOR
85898
85899 config NOMMU_INITIAL_TRIM_EXCESS
85900diff --git a/mm/backing-dev.c b/mm/backing-dev.c
85901index 5025174..9d67dcd 100644
85902--- a/mm/backing-dev.c
85903+++ b/mm/backing-dev.c
85904@@ -12,7 +12,7 @@
85905 #include <linux/device.h>
85906 #include <trace/events/writeback.h>
85907
85908-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
85909+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
85910
85911 struct backing_dev_info default_backing_dev_info = {
85912 .name = "default",
85913@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
85914 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
85915 unsigned int cap)
85916 {
85917- char tmp[32];
85918 int err;
85919
85920 bdi->name = name;
85921@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
85922 if (err)
85923 return err;
85924
85925- sprintf(tmp, "%.28s%s", name, "-%d");
85926- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
85927+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
85928 if (err) {
85929 bdi_destroy(bdi);
85930 return err;
85931diff --git a/mm/filemap.c b/mm/filemap.c
85932index 7905fe7..f59502b 100644
85933--- a/mm/filemap.c
85934+++ b/mm/filemap.c
85935@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
85936 struct address_space *mapping = file->f_mapping;
85937
85938 if (!mapping->a_ops->readpage)
85939- return -ENOEXEC;
85940+ return -ENODEV;
85941 file_accessed(file);
85942 vma->vm_ops = &generic_file_vm_ops;
85943 return 0;
85944@@ -1948,7 +1948,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
85945
85946 while (bytes) {
85947 char __user *buf = iov->iov_base + base;
85948- int copy = min(bytes, iov->iov_len - base);
85949+ size_t copy = min(bytes, iov->iov_len - base);
85950
85951 base = 0;
85952 left = __copy_from_user_inatomic(vaddr, buf, copy);
85953@@ -1977,7 +1977,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
85954 BUG_ON(!in_atomic());
85955 kaddr = kmap_atomic(page);
85956 if (likely(i->nr_segs == 1)) {
85957- int left;
85958+ size_t left;
85959 char __user *buf = i->iov->iov_base + i->iov_offset;
85960 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
85961 copied = bytes - left;
85962@@ -2005,7 +2005,7 @@ size_t iov_iter_copy_from_user(struct page *page,
85963
85964 kaddr = kmap(page);
85965 if (likely(i->nr_segs == 1)) {
85966- int left;
85967+ size_t left;
85968 char __user *buf = i->iov->iov_base + i->iov_offset;
85969 left = __copy_from_user(kaddr + offset, buf, bytes);
85970 copied = bytes - left;
85971@@ -2035,7 +2035,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
85972 * zero-length segments (without overruning the iovec).
85973 */
85974 while (bytes || unlikely(i->count && !iov->iov_len)) {
85975- int copy;
85976+ size_t copy;
85977
85978 copy = min(bytes, iov->iov_len - base);
85979 BUG_ON(!i->count || i->count < copy);
85980@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
85981 *pos = i_size_read(inode);
85982
85983 if (limit != RLIM_INFINITY) {
85984+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
85985 if (*pos >= limit) {
85986 send_sig(SIGXFSZ, current, 0);
85987 return -EFBIG;
85988diff --git a/mm/fremap.c b/mm/fremap.c
85989index 87da359..3f41cb1 100644
85990--- a/mm/fremap.c
85991+++ b/mm/fremap.c
85992@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
85993 retry:
85994 vma = find_vma(mm, start);
85995
85996+#ifdef CONFIG_PAX_SEGMEXEC
85997+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
85998+ goto out;
85999+#endif
86000+
86001 /*
86002 * Make sure the vma is shared, that it supports prefaulting,
86003 * and that the remapped range is valid and fully within
86004diff --git a/mm/highmem.c b/mm/highmem.c
86005index b32b70c..e512eb0 100644
86006--- a/mm/highmem.c
86007+++ b/mm/highmem.c
86008@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
86009 * So no dangers, even with speculative execution.
86010 */
86011 page = pte_page(pkmap_page_table[i]);
86012+ pax_open_kernel();
86013 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
86014-
86015+ pax_close_kernel();
86016 set_page_address(page, NULL);
86017 need_flush = 1;
86018 }
86019@@ -198,9 +199,11 @@ start:
86020 }
86021 }
86022 vaddr = PKMAP_ADDR(last_pkmap_nr);
86023+
86024+ pax_open_kernel();
86025 set_pte_at(&init_mm, vaddr,
86026 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
86027-
86028+ pax_close_kernel();
86029 pkmap_count[last_pkmap_nr] = 1;
86030 set_page_address(page, (void *)vaddr);
86031
86032diff --git a/mm/hugetlb.c b/mm/hugetlb.c
86033index 7c5eb85..5c01c2f 100644
86034--- a/mm/hugetlb.c
86035+++ b/mm/hugetlb.c
86036@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
86037 struct hstate *h = &default_hstate;
86038 unsigned long tmp;
86039 int ret;
86040+ ctl_table_no_const hugetlb_table;
86041
86042 tmp = h->max_huge_pages;
86043
86044 if (write && h->order >= MAX_ORDER)
86045 return -EINVAL;
86046
86047- table->data = &tmp;
86048- table->maxlen = sizeof(unsigned long);
86049- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
86050+ hugetlb_table = *table;
86051+ hugetlb_table.data = &tmp;
86052+ hugetlb_table.maxlen = sizeof(unsigned long);
86053+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
86054 if (ret)
86055 goto out;
86056
86057@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
86058 struct hstate *h = &default_hstate;
86059 unsigned long tmp;
86060 int ret;
86061+ ctl_table_no_const hugetlb_table;
86062
86063 tmp = h->nr_overcommit_huge_pages;
86064
86065 if (write && h->order >= MAX_ORDER)
86066 return -EINVAL;
86067
86068- table->data = &tmp;
86069- table->maxlen = sizeof(unsigned long);
86070- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
86071+ hugetlb_table = *table;
86072+ hugetlb_table.data = &tmp;
86073+ hugetlb_table.maxlen = sizeof(unsigned long);
86074+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
86075 if (ret)
86076 goto out;
86077
86078@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
86079 return 1;
86080 }
86081
86082+#ifdef CONFIG_PAX_SEGMEXEC
86083+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
86084+{
86085+ struct mm_struct *mm = vma->vm_mm;
86086+ struct vm_area_struct *vma_m;
86087+ unsigned long address_m;
86088+ pte_t *ptep_m;
86089+
86090+ vma_m = pax_find_mirror_vma(vma);
86091+ if (!vma_m)
86092+ return;
86093+
86094+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86095+ address_m = address + SEGMEXEC_TASK_SIZE;
86096+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
86097+ get_page(page_m);
86098+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
86099+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
86100+}
86101+#endif
86102+
86103 /*
86104 * Hugetlb_cow() should be called with page lock of the original hugepage held.
86105 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
86106@@ -2663,6 +2688,11 @@ retry_avoidcopy:
86107 make_huge_pte(vma, new_page, 1));
86108 page_remove_rmap(old_page);
86109 hugepage_add_new_anon_rmap(new_page, vma, address);
86110+
86111+#ifdef CONFIG_PAX_SEGMEXEC
86112+ pax_mirror_huge_pte(vma, address, new_page);
86113+#endif
86114+
86115 /* Make the old page be freed below */
86116 new_page = old_page;
86117 }
86118@@ -2821,6 +2851,10 @@ retry:
86119 && (vma->vm_flags & VM_SHARED)));
86120 set_huge_pte_at(mm, address, ptep, new_pte);
86121
86122+#ifdef CONFIG_PAX_SEGMEXEC
86123+ pax_mirror_huge_pte(vma, address, page);
86124+#endif
86125+
86126 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
86127 /* Optimization, do the COW without a second fault */
86128 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
86129@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86130 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
86131 struct hstate *h = hstate_vma(vma);
86132
86133+#ifdef CONFIG_PAX_SEGMEXEC
86134+ struct vm_area_struct *vma_m;
86135+#endif
86136+
86137 address &= huge_page_mask(h);
86138
86139 ptep = huge_pte_offset(mm, address);
86140@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86141 VM_FAULT_SET_HINDEX(hstate_index(h));
86142 }
86143
86144+#ifdef CONFIG_PAX_SEGMEXEC
86145+ vma_m = pax_find_mirror_vma(vma);
86146+ if (vma_m) {
86147+ unsigned long address_m;
86148+
86149+ if (vma->vm_start > vma_m->vm_start) {
86150+ address_m = address;
86151+ address -= SEGMEXEC_TASK_SIZE;
86152+ vma = vma_m;
86153+ h = hstate_vma(vma);
86154+ } else
86155+ address_m = address + SEGMEXEC_TASK_SIZE;
86156+
86157+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
86158+ return VM_FAULT_OOM;
86159+ address_m &= HPAGE_MASK;
86160+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
86161+ }
86162+#endif
86163+
86164 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
86165 if (!ptep)
86166 return VM_FAULT_OOM;
86167diff --git a/mm/internal.h b/mm/internal.h
86168index 8562de0..92b2073 100644
86169--- a/mm/internal.h
86170+++ b/mm/internal.h
86171@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
86172 * in mm/page_alloc.c
86173 */
86174 extern void __free_pages_bootmem(struct page *page, unsigned int order);
86175+extern void free_compound_page(struct page *page);
86176 extern void prep_compound_page(struct page *page, unsigned long order);
86177 #ifdef CONFIG_MEMORY_FAILURE
86178 extern bool is_free_buddy_page(struct page *page);
86179@@ -355,7 +356,7 @@ extern u32 hwpoison_filter_enable;
86180
86181 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
86182 unsigned long, unsigned long,
86183- unsigned long, unsigned long);
86184+ unsigned long, unsigned long) __intentional_overflow(-1);
86185
86186 extern void set_pageblock_order(void);
86187 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
86188diff --git a/mm/kmemleak.c b/mm/kmemleak.c
86189index c8d7f31..2dbeffd 100644
86190--- a/mm/kmemleak.c
86191+++ b/mm/kmemleak.c
86192@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
86193
86194 for (i = 0; i < object->trace_len; i++) {
86195 void *ptr = (void *)object->trace[i];
86196- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
86197+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
86198 }
86199 }
86200
86201@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
86202 return -ENOMEM;
86203 }
86204
86205- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
86206+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
86207 &kmemleak_fops);
86208 if (!dentry)
86209 pr_warning("Failed to create the debugfs kmemleak file\n");
86210diff --git a/mm/maccess.c b/mm/maccess.c
86211index d53adf9..03a24bf 100644
86212--- a/mm/maccess.c
86213+++ b/mm/maccess.c
86214@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
86215 set_fs(KERNEL_DS);
86216 pagefault_disable();
86217 ret = __copy_from_user_inatomic(dst,
86218- (__force const void __user *)src, size);
86219+ (const void __force_user *)src, size);
86220 pagefault_enable();
86221 set_fs(old_fs);
86222
86223@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
86224
86225 set_fs(KERNEL_DS);
86226 pagefault_disable();
86227- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
86228+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
86229 pagefault_enable();
86230 set_fs(old_fs);
86231
86232diff --git a/mm/madvise.c b/mm/madvise.c
86233index 7055883..aafb1ed 100644
86234--- a/mm/madvise.c
86235+++ b/mm/madvise.c
86236@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
86237 pgoff_t pgoff;
86238 unsigned long new_flags = vma->vm_flags;
86239
86240+#ifdef CONFIG_PAX_SEGMEXEC
86241+ struct vm_area_struct *vma_m;
86242+#endif
86243+
86244 switch (behavior) {
86245 case MADV_NORMAL:
86246 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
86247@@ -126,6 +130,13 @@ success:
86248 /*
86249 * vm_flags is protected by the mmap_sem held in write mode.
86250 */
86251+
86252+#ifdef CONFIG_PAX_SEGMEXEC
86253+ vma_m = pax_find_mirror_vma(vma);
86254+ if (vma_m)
86255+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
86256+#endif
86257+
86258 vma->vm_flags = new_flags;
86259
86260 out:
86261@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
86262 struct vm_area_struct ** prev,
86263 unsigned long start, unsigned long end)
86264 {
86265+
86266+#ifdef CONFIG_PAX_SEGMEXEC
86267+ struct vm_area_struct *vma_m;
86268+#endif
86269+
86270 *prev = vma;
86271 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
86272 return -EINVAL;
86273@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
86274 zap_page_range(vma, start, end - start, &details);
86275 } else
86276 zap_page_range(vma, start, end - start, NULL);
86277+
86278+#ifdef CONFIG_PAX_SEGMEXEC
86279+ vma_m = pax_find_mirror_vma(vma);
86280+ if (vma_m) {
86281+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
86282+ struct zap_details details = {
86283+ .nonlinear_vma = vma_m,
86284+ .last_index = ULONG_MAX,
86285+ };
86286+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
86287+ } else
86288+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
86289+ }
86290+#endif
86291+
86292 return 0;
86293 }
86294
86295@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
86296 if (end < start)
86297 return error;
86298
86299+#ifdef CONFIG_PAX_SEGMEXEC
86300+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
86301+ if (end > SEGMEXEC_TASK_SIZE)
86302+ return error;
86303+ } else
86304+#endif
86305+
86306+ if (end > TASK_SIZE)
86307+ return error;
86308+
86309 error = 0;
86310 if (end == start)
86311 return error;
86312diff --git a/mm/memory-failure.c b/mm/memory-failure.c
86313index ceb0c7f..b2b8e94 100644
86314--- a/mm/memory-failure.c
86315+++ b/mm/memory-failure.c
86316@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
86317
86318 int sysctl_memory_failure_recovery __read_mostly = 1;
86319
86320-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
86321+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
86322
86323 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
86324
86325@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
86326 pfn, t->comm, t->pid);
86327 si.si_signo = SIGBUS;
86328 si.si_errno = 0;
86329- si.si_addr = (void *)addr;
86330+ si.si_addr = (void __user *)addr;
86331 #ifdef __ARCH_SI_TRAPNO
86332 si.si_trapno = trapno;
86333 #endif
86334@@ -760,7 +760,7 @@ static struct page_state {
86335 unsigned long res;
86336 char *msg;
86337 int (*action)(struct page *p, unsigned long pfn);
86338-} error_states[] = {
86339+} __do_const error_states[] = {
86340 { reserved, reserved, "reserved kernel", me_kernel },
86341 /*
86342 * free pages are specially detected outside this table:
86343@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86344 nr_pages = 1 << compound_order(hpage);
86345 else /* normal page or thp */
86346 nr_pages = 1;
86347- atomic_long_add(nr_pages, &num_poisoned_pages);
86348+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
86349
86350 /*
86351 * We need/can do nothing about count=0 pages.
86352@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86353 if (!PageHWPoison(hpage)
86354 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
86355 || (p != hpage && TestSetPageHWPoison(hpage))) {
86356- atomic_long_sub(nr_pages, &num_poisoned_pages);
86357+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86358 return 0;
86359 }
86360 set_page_hwpoison_huge_page(hpage);
86361@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86362 }
86363 if (hwpoison_filter(p)) {
86364 if (TestClearPageHWPoison(p))
86365- atomic_long_sub(nr_pages, &num_poisoned_pages);
86366+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86367 unlock_page(hpage);
86368 put_page(hpage);
86369 return 0;
86370@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
86371 return 0;
86372 }
86373 if (TestClearPageHWPoison(p))
86374- atomic_long_sub(nr_pages, &num_poisoned_pages);
86375+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86376 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
86377 return 0;
86378 }
86379@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
86380 */
86381 if (TestClearPageHWPoison(page)) {
86382 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
86383- atomic_long_sub(nr_pages, &num_poisoned_pages);
86384+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86385 freeit = 1;
86386 if (PageHuge(page))
86387 clear_page_hwpoison_huge_page(page);
86388@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
86389 } else {
86390 set_page_hwpoison_huge_page(hpage);
86391 dequeue_hwpoisoned_huge_page(hpage);
86392- atomic_long_add(1 << compound_trans_order(hpage),
86393+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
86394 &num_poisoned_pages);
86395 }
86396 /* keep elevated page count for bad page */
86397@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
86398 if (PageHuge(page)) {
86399 set_page_hwpoison_huge_page(hpage);
86400 dequeue_hwpoisoned_huge_page(hpage);
86401- atomic_long_add(1 << compound_trans_order(hpage),
86402+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
86403 &num_poisoned_pages);
86404 } else {
86405 SetPageHWPoison(page);
86406- atomic_long_inc(&num_poisoned_pages);
86407+ atomic_long_inc_unchecked(&num_poisoned_pages);
86408 }
86409 }
86410 /* keep elevated page count for bad page */
86411@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
86412 put_page(page);
86413 pr_info("soft_offline: %#lx: invalidated\n", pfn);
86414 SetPageHWPoison(page);
86415- atomic_long_inc(&num_poisoned_pages);
86416+ atomic_long_inc_unchecked(&num_poisoned_pages);
86417 return 0;
86418 }
86419
86420@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
86421 ret = -EIO;
86422 } else {
86423 SetPageHWPoison(page);
86424- atomic_long_inc(&num_poisoned_pages);
86425+ atomic_long_inc_unchecked(&num_poisoned_pages);
86426 }
86427 } else {
86428 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
86429diff --git a/mm/memory.c b/mm/memory.c
86430index 5a35443..7c0340f 100644
86431--- a/mm/memory.c
86432+++ b/mm/memory.c
86433@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
86434 free_pte_range(tlb, pmd, addr);
86435 } while (pmd++, addr = next, addr != end);
86436
86437+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
86438 start &= PUD_MASK;
86439 if (start < floor)
86440 return;
86441@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
86442 pmd = pmd_offset(pud, start);
86443 pud_clear(pud);
86444 pmd_free_tlb(tlb, pmd, start);
86445+#endif
86446+
86447 }
86448
86449 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86450@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86451 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
86452 } while (pud++, addr = next, addr != end);
86453
86454+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
86455 start &= PGDIR_MASK;
86456 if (start < floor)
86457 return;
86458@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86459 pud = pud_offset(pgd, start);
86460 pgd_clear(pgd);
86461 pud_free_tlb(tlb, pud, start);
86462+#endif
86463+
86464 }
86465
86466 /*
86467@@ -1644,12 +1650,6 @@ no_page_table:
86468 return page;
86469 }
86470
86471-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
86472-{
86473- return stack_guard_page_start(vma, addr) ||
86474- stack_guard_page_end(vma, addr+PAGE_SIZE);
86475-}
86476-
86477 /**
86478 * __get_user_pages() - pin user pages in memory
86479 * @tsk: task_struct of target task
86480@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86481
86482 i = 0;
86483
86484- do {
86485+ while (nr_pages) {
86486 struct vm_area_struct *vma;
86487
86488- vma = find_extend_vma(mm, start);
86489+ vma = find_vma(mm, start);
86490 if (!vma && in_gate_area(mm, start)) {
86491 unsigned long pg = start & PAGE_MASK;
86492 pgd_t *pgd;
86493@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86494 goto next_page;
86495 }
86496
86497- if (!vma ||
86498+ if (!vma || start < vma->vm_start ||
86499 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
86500 !(vm_flags & vma->vm_flags))
86501 return i ? : -EFAULT;
86502@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86503 int ret;
86504 unsigned int fault_flags = 0;
86505
86506- /* For mlock, just skip the stack guard page. */
86507- if (foll_flags & FOLL_MLOCK) {
86508- if (stack_guard_page(vma, start))
86509- goto next_page;
86510- }
86511 if (foll_flags & FOLL_WRITE)
86512 fault_flags |= FAULT_FLAG_WRITE;
86513 if (nonblocking)
86514@@ -1901,7 +1896,7 @@ next_page:
86515 start += page_increm * PAGE_SIZE;
86516 nr_pages -= page_increm;
86517 } while (nr_pages && start < vma->vm_end);
86518- } while (nr_pages);
86519+ }
86520 return i;
86521 }
86522 EXPORT_SYMBOL(__get_user_pages);
86523@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
86524 page_add_file_rmap(page);
86525 set_pte_at(mm, addr, pte, mk_pte(page, prot));
86526
86527+#ifdef CONFIG_PAX_SEGMEXEC
86528+ pax_mirror_file_pte(vma, addr, page, ptl);
86529+#endif
86530+
86531 retval = 0;
86532 pte_unmap_unlock(pte, ptl);
86533 return retval;
86534@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
86535 if (!page_count(page))
86536 return -EINVAL;
86537 if (!(vma->vm_flags & VM_MIXEDMAP)) {
86538+
86539+#ifdef CONFIG_PAX_SEGMEXEC
86540+ struct vm_area_struct *vma_m;
86541+#endif
86542+
86543 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
86544 BUG_ON(vma->vm_flags & VM_PFNMAP);
86545 vma->vm_flags |= VM_MIXEDMAP;
86546+
86547+#ifdef CONFIG_PAX_SEGMEXEC
86548+ vma_m = pax_find_mirror_vma(vma);
86549+ if (vma_m)
86550+ vma_m->vm_flags |= VM_MIXEDMAP;
86551+#endif
86552+
86553 }
86554 return insert_page(vma, addr, page, vma->vm_page_prot);
86555 }
86556@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
86557 unsigned long pfn)
86558 {
86559 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
86560+ BUG_ON(vma->vm_mirror);
86561
86562 if (addr < vma->vm_start || addr >= vma->vm_end)
86563 return -EFAULT;
86564@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
86565
86566 BUG_ON(pud_huge(*pud));
86567
86568- pmd = pmd_alloc(mm, pud, addr);
86569+ pmd = (mm == &init_mm) ?
86570+ pmd_alloc_kernel(mm, pud, addr) :
86571+ pmd_alloc(mm, pud, addr);
86572 if (!pmd)
86573 return -ENOMEM;
86574 do {
86575@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
86576 unsigned long next;
86577 int err;
86578
86579- pud = pud_alloc(mm, pgd, addr);
86580+ pud = (mm == &init_mm) ?
86581+ pud_alloc_kernel(mm, pgd, addr) :
86582+ pud_alloc(mm, pgd, addr);
86583 if (!pud)
86584 return -ENOMEM;
86585 do {
86586@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
86587 copy_user_highpage(dst, src, va, vma);
86588 }
86589
86590+#ifdef CONFIG_PAX_SEGMEXEC
86591+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
86592+{
86593+ struct mm_struct *mm = vma->vm_mm;
86594+ spinlock_t *ptl;
86595+ pte_t *pte, entry;
86596+
86597+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
86598+ entry = *pte;
86599+ if (!pte_present(entry)) {
86600+ if (!pte_none(entry)) {
86601+ BUG_ON(pte_file(entry));
86602+ free_swap_and_cache(pte_to_swp_entry(entry));
86603+ pte_clear_not_present_full(mm, address, pte, 0);
86604+ }
86605+ } else {
86606+ struct page *page;
86607+
86608+ flush_cache_page(vma, address, pte_pfn(entry));
86609+ entry = ptep_clear_flush(vma, address, pte);
86610+ BUG_ON(pte_dirty(entry));
86611+ page = vm_normal_page(vma, address, entry);
86612+ if (page) {
86613+ update_hiwater_rss(mm);
86614+ if (PageAnon(page))
86615+ dec_mm_counter_fast(mm, MM_ANONPAGES);
86616+ else
86617+ dec_mm_counter_fast(mm, MM_FILEPAGES);
86618+ page_remove_rmap(page);
86619+ page_cache_release(page);
86620+ }
86621+ }
86622+ pte_unmap_unlock(pte, ptl);
86623+}
86624+
86625+/* PaX: if vma is mirrored, synchronize the mirror's PTE
86626+ *
86627+ * the ptl of the lower mapped page is held on entry and is not released on exit
86628+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
86629+ */
86630+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
86631+{
86632+ struct mm_struct *mm = vma->vm_mm;
86633+ unsigned long address_m;
86634+ spinlock_t *ptl_m;
86635+ struct vm_area_struct *vma_m;
86636+ pmd_t *pmd_m;
86637+ pte_t *pte_m, entry_m;
86638+
86639+ BUG_ON(!page_m || !PageAnon(page_m));
86640+
86641+ vma_m = pax_find_mirror_vma(vma);
86642+ if (!vma_m)
86643+ return;
86644+
86645+ BUG_ON(!PageLocked(page_m));
86646+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86647+ address_m = address + SEGMEXEC_TASK_SIZE;
86648+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86649+ pte_m = pte_offset_map(pmd_m, address_m);
86650+ ptl_m = pte_lockptr(mm, pmd_m);
86651+ if (ptl != ptl_m) {
86652+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86653+ if (!pte_none(*pte_m))
86654+ goto out;
86655+ }
86656+
86657+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
86658+ page_cache_get(page_m);
86659+ page_add_anon_rmap(page_m, vma_m, address_m);
86660+ inc_mm_counter_fast(mm, MM_ANONPAGES);
86661+ set_pte_at(mm, address_m, pte_m, entry_m);
86662+ update_mmu_cache(vma_m, address_m, pte_m);
86663+out:
86664+ if (ptl != ptl_m)
86665+ spin_unlock(ptl_m);
86666+ pte_unmap(pte_m);
86667+ unlock_page(page_m);
86668+}
86669+
86670+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
86671+{
86672+ struct mm_struct *mm = vma->vm_mm;
86673+ unsigned long address_m;
86674+ spinlock_t *ptl_m;
86675+ struct vm_area_struct *vma_m;
86676+ pmd_t *pmd_m;
86677+ pte_t *pte_m, entry_m;
86678+
86679+ BUG_ON(!page_m || PageAnon(page_m));
86680+
86681+ vma_m = pax_find_mirror_vma(vma);
86682+ if (!vma_m)
86683+ return;
86684+
86685+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86686+ address_m = address + SEGMEXEC_TASK_SIZE;
86687+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86688+ pte_m = pte_offset_map(pmd_m, address_m);
86689+ ptl_m = pte_lockptr(mm, pmd_m);
86690+ if (ptl != ptl_m) {
86691+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86692+ if (!pte_none(*pte_m))
86693+ goto out;
86694+ }
86695+
86696+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
86697+ page_cache_get(page_m);
86698+ page_add_file_rmap(page_m);
86699+ inc_mm_counter_fast(mm, MM_FILEPAGES);
86700+ set_pte_at(mm, address_m, pte_m, entry_m);
86701+ update_mmu_cache(vma_m, address_m, pte_m);
86702+out:
86703+ if (ptl != ptl_m)
86704+ spin_unlock(ptl_m);
86705+ pte_unmap(pte_m);
86706+}
86707+
86708+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
86709+{
86710+ struct mm_struct *mm = vma->vm_mm;
86711+ unsigned long address_m;
86712+ spinlock_t *ptl_m;
86713+ struct vm_area_struct *vma_m;
86714+ pmd_t *pmd_m;
86715+ pte_t *pte_m, entry_m;
86716+
86717+ vma_m = pax_find_mirror_vma(vma);
86718+ if (!vma_m)
86719+ return;
86720+
86721+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86722+ address_m = address + SEGMEXEC_TASK_SIZE;
86723+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86724+ pte_m = pte_offset_map(pmd_m, address_m);
86725+ ptl_m = pte_lockptr(mm, pmd_m);
86726+ if (ptl != ptl_m) {
86727+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86728+ if (!pte_none(*pte_m))
86729+ goto out;
86730+ }
86731+
86732+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
86733+ set_pte_at(mm, address_m, pte_m, entry_m);
86734+out:
86735+ if (ptl != ptl_m)
86736+ spin_unlock(ptl_m);
86737+ pte_unmap(pte_m);
86738+}
86739+
86740+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
86741+{
86742+ struct page *page_m;
86743+ pte_t entry;
86744+
86745+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
86746+ goto out;
86747+
86748+ entry = *pte;
86749+ page_m = vm_normal_page(vma, address, entry);
86750+ if (!page_m)
86751+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
86752+ else if (PageAnon(page_m)) {
86753+ if (pax_find_mirror_vma(vma)) {
86754+ pte_unmap_unlock(pte, ptl);
86755+ lock_page(page_m);
86756+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
86757+ if (pte_same(entry, *pte))
86758+ pax_mirror_anon_pte(vma, address, page_m, ptl);
86759+ else
86760+ unlock_page(page_m);
86761+ }
86762+ } else
86763+ pax_mirror_file_pte(vma, address, page_m, ptl);
86764+
86765+out:
86766+ pte_unmap_unlock(pte, ptl);
86767+}
86768+#endif
86769+
86770 /*
86771 * This routine handles present pages, when users try to write
86772 * to a shared page. It is done by copying the page to a new address
86773@@ -2808,6 +3004,12 @@ gotten:
86774 */
86775 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
86776 if (likely(pte_same(*page_table, orig_pte))) {
86777+
86778+#ifdef CONFIG_PAX_SEGMEXEC
86779+ if (pax_find_mirror_vma(vma))
86780+ BUG_ON(!trylock_page(new_page));
86781+#endif
86782+
86783 if (old_page) {
86784 if (!PageAnon(old_page)) {
86785 dec_mm_counter_fast(mm, MM_FILEPAGES);
86786@@ -2859,6 +3061,10 @@ gotten:
86787 page_remove_rmap(old_page);
86788 }
86789
86790+#ifdef CONFIG_PAX_SEGMEXEC
86791+ pax_mirror_anon_pte(vma, address, new_page, ptl);
86792+#endif
86793+
86794 /* Free the old page.. */
86795 new_page = old_page;
86796 ret |= VM_FAULT_WRITE;
86797@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
86798 swap_free(entry);
86799 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
86800 try_to_free_swap(page);
86801+
86802+#ifdef CONFIG_PAX_SEGMEXEC
86803+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
86804+#endif
86805+
86806 unlock_page(page);
86807 if (page != swapcache) {
86808 /*
86809@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
86810
86811 /* No need to invalidate - it was non-present before */
86812 update_mmu_cache(vma, address, page_table);
86813+
86814+#ifdef CONFIG_PAX_SEGMEXEC
86815+ pax_mirror_anon_pte(vma, address, page, ptl);
86816+#endif
86817+
86818 unlock:
86819 pte_unmap_unlock(page_table, ptl);
86820 out:
86821@@ -3176,40 +3392,6 @@ out_release:
86822 }
86823
86824 /*
86825- * This is like a special single-page "expand_{down|up}wards()",
86826- * except we must first make sure that 'address{-|+}PAGE_SIZE'
86827- * doesn't hit another vma.
86828- */
86829-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
86830-{
86831- address &= PAGE_MASK;
86832- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
86833- struct vm_area_struct *prev = vma->vm_prev;
86834-
86835- /*
86836- * Is there a mapping abutting this one below?
86837- *
86838- * That's only ok if it's the same stack mapping
86839- * that has gotten split..
86840- */
86841- if (prev && prev->vm_end == address)
86842- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
86843-
86844- expand_downwards(vma, address - PAGE_SIZE);
86845- }
86846- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
86847- struct vm_area_struct *next = vma->vm_next;
86848-
86849- /* As VM_GROWSDOWN but s/below/above/ */
86850- if (next && next->vm_start == address + PAGE_SIZE)
86851- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
86852-
86853- expand_upwards(vma, address + PAGE_SIZE);
86854- }
86855- return 0;
86856-}
86857-
86858-/*
86859 * We enter with non-exclusive mmap_sem (to exclude vma changes,
86860 * but allow concurrent faults), and pte mapped but not yet locked.
86861 * We return with mmap_sem still held, but pte unmapped and unlocked.
86862@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
86863 unsigned long address, pte_t *page_table, pmd_t *pmd,
86864 unsigned int flags)
86865 {
86866- struct page *page;
86867+ struct page *page = NULL;
86868 spinlock_t *ptl;
86869 pte_t entry;
86870
86871- pte_unmap(page_table);
86872-
86873- /* Check if we need to add a guard page to the stack */
86874- if (check_stack_guard_page(vma, address) < 0)
86875- return VM_FAULT_SIGBUS;
86876-
86877- /* Use the zero-page for reads */
86878 if (!(flags & FAULT_FLAG_WRITE)) {
86879 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
86880 vma->vm_page_prot));
86881- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
86882+ ptl = pte_lockptr(mm, pmd);
86883+ spin_lock(ptl);
86884 if (!pte_none(*page_table))
86885 goto unlock;
86886 goto setpte;
86887 }
86888
86889 /* Allocate our own private page. */
86890+ pte_unmap(page_table);
86891+
86892 if (unlikely(anon_vma_prepare(vma)))
86893 goto oom;
86894 page = alloc_zeroed_user_highpage_movable(vma, address);
86895@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
86896 if (!pte_none(*page_table))
86897 goto release;
86898
86899+#ifdef CONFIG_PAX_SEGMEXEC
86900+ if (pax_find_mirror_vma(vma))
86901+ BUG_ON(!trylock_page(page));
86902+#endif
86903+
86904 inc_mm_counter_fast(mm, MM_ANONPAGES);
86905 page_add_new_anon_rmap(page, vma, address);
86906 setpte:
86907@@ -3269,6 +3452,12 @@ setpte:
86908
86909 /* No need to invalidate - it was non-present before */
86910 update_mmu_cache(vma, address, page_table);
86911+
86912+#ifdef CONFIG_PAX_SEGMEXEC
86913+ if (page)
86914+ pax_mirror_anon_pte(vma, address, page, ptl);
86915+#endif
86916+
86917 unlock:
86918 pte_unmap_unlock(page_table, ptl);
86919 return 0;
86920@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86921 */
86922 /* Only go through if we didn't race with anybody else... */
86923 if (likely(pte_same(*page_table, orig_pte))) {
86924+
86925+#ifdef CONFIG_PAX_SEGMEXEC
86926+ if (anon && pax_find_mirror_vma(vma))
86927+ BUG_ON(!trylock_page(page));
86928+#endif
86929+
86930 flush_icache_page(vma, page);
86931 entry = mk_pte(page, vma->vm_page_prot);
86932 if (flags & FAULT_FLAG_WRITE)
86933@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86934
86935 /* no need to invalidate: a not-present page won't be cached */
86936 update_mmu_cache(vma, address, page_table);
86937+
86938+#ifdef CONFIG_PAX_SEGMEXEC
86939+ if (anon)
86940+ pax_mirror_anon_pte(vma, address, page, ptl);
86941+ else
86942+ pax_mirror_file_pte(vma, address, page, ptl);
86943+#endif
86944+
86945 } else {
86946 if (cow_page)
86947 mem_cgroup_uncharge_page(cow_page);
86948@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
86949 if (flags & FAULT_FLAG_WRITE)
86950 flush_tlb_fix_spurious_fault(vma, address);
86951 }
86952+
86953+#ifdef CONFIG_PAX_SEGMEXEC
86954+ pax_mirror_pte(vma, address, pte, pmd, ptl);
86955+ return 0;
86956+#endif
86957+
86958 unlock:
86959 pte_unmap_unlock(pte, ptl);
86960 return 0;
86961@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86962 pmd_t *pmd;
86963 pte_t *pte;
86964
86965+#ifdef CONFIG_PAX_SEGMEXEC
86966+ struct vm_area_struct *vma_m;
86967+#endif
86968+
86969 __set_current_state(TASK_RUNNING);
86970
86971 count_vm_event(PGFAULT);
86972@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86973 if (unlikely(is_vm_hugetlb_page(vma)))
86974 return hugetlb_fault(mm, vma, address, flags);
86975
86976+#ifdef CONFIG_PAX_SEGMEXEC
86977+ vma_m = pax_find_mirror_vma(vma);
86978+ if (vma_m) {
86979+ unsigned long address_m;
86980+ pgd_t *pgd_m;
86981+ pud_t *pud_m;
86982+ pmd_t *pmd_m;
86983+
86984+ if (vma->vm_start > vma_m->vm_start) {
86985+ address_m = address;
86986+ address -= SEGMEXEC_TASK_SIZE;
86987+ vma = vma_m;
86988+ } else
86989+ address_m = address + SEGMEXEC_TASK_SIZE;
86990+
86991+ pgd_m = pgd_offset(mm, address_m);
86992+ pud_m = pud_alloc(mm, pgd_m, address_m);
86993+ if (!pud_m)
86994+ return VM_FAULT_OOM;
86995+ pmd_m = pmd_alloc(mm, pud_m, address_m);
86996+ if (!pmd_m)
86997+ return VM_FAULT_OOM;
86998+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
86999+ return VM_FAULT_OOM;
87000+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
87001+ }
87002+#endif
87003+
87004 retry:
87005 pgd = pgd_offset(mm, address);
87006 pud = pud_alloc(mm, pgd, address);
87007@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
87008 spin_unlock(&mm->page_table_lock);
87009 return 0;
87010 }
87011+
87012+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
87013+{
87014+ pud_t *new = pud_alloc_one(mm, address);
87015+ if (!new)
87016+ return -ENOMEM;
87017+
87018+ smp_wmb(); /* See comment in __pte_alloc */
87019+
87020+ spin_lock(&mm->page_table_lock);
87021+ if (pgd_present(*pgd)) /* Another has populated it */
87022+ pud_free(mm, new);
87023+ else
87024+ pgd_populate_kernel(mm, pgd, new);
87025+ spin_unlock(&mm->page_table_lock);
87026+ return 0;
87027+}
87028 #endif /* __PAGETABLE_PUD_FOLDED */
87029
87030 #ifndef __PAGETABLE_PMD_FOLDED
87031@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
87032 spin_unlock(&mm->page_table_lock);
87033 return 0;
87034 }
87035+
87036+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
87037+{
87038+ pmd_t *new = pmd_alloc_one(mm, address);
87039+ if (!new)
87040+ return -ENOMEM;
87041+
87042+ smp_wmb(); /* See comment in __pte_alloc */
87043+
87044+ spin_lock(&mm->page_table_lock);
87045+#ifndef __ARCH_HAS_4LEVEL_HACK
87046+ if (pud_present(*pud)) /* Another has populated it */
87047+ pmd_free(mm, new);
87048+ else
87049+ pud_populate_kernel(mm, pud, new);
87050+#else
87051+ if (pgd_present(*pud)) /* Another has populated it */
87052+ pmd_free(mm, new);
87053+ else
87054+ pgd_populate_kernel(mm, pud, new);
87055+#endif /* __ARCH_HAS_4LEVEL_HACK */
87056+ spin_unlock(&mm->page_table_lock);
87057+ return 0;
87058+}
87059 #endif /* __PAGETABLE_PMD_FOLDED */
87060
87061 #if !defined(__HAVE_ARCH_GATE_AREA)
87062@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
87063 gate_vma.vm_start = FIXADDR_USER_START;
87064 gate_vma.vm_end = FIXADDR_USER_END;
87065 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
87066- gate_vma.vm_page_prot = __P101;
87067+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
87068
87069 return 0;
87070 }
87071@@ -4054,8 +4336,8 @@ out:
87072 return ret;
87073 }
87074
87075-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
87076- void *buf, int len, int write)
87077+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
87078+ void *buf, size_t len, int write)
87079 {
87080 resource_size_t phys_addr;
87081 unsigned long prot = 0;
87082@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
87083 * Access another process' address space as given in mm. If non-NULL, use the
87084 * given task for page fault accounting.
87085 */
87086-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87087- unsigned long addr, void *buf, int len, int write)
87088+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87089+ unsigned long addr, void *buf, size_t len, int write)
87090 {
87091 struct vm_area_struct *vma;
87092 void *old_buf = buf;
87093@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87094 down_read(&mm->mmap_sem);
87095 /* ignore errors, just check how much was successfully transferred */
87096 while (len) {
87097- int bytes, ret, offset;
87098+ ssize_t bytes, ret, offset;
87099 void *maddr;
87100 struct page *page = NULL;
87101
87102@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87103 *
87104 * The caller must hold a reference on @mm.
87105 */
87106-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87107- void *buf, int len, int write)
87108+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
87109+ void *buf, size_t len, int write)
87110 {
87111 return __access_remote_vm(NULL, mm, addr, buf, len, write);
87112 }
87113@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87114 * Source/target buffer must be kernel space,
87115 * Do not walk the page table directly, use get_user_pages
87116 */
87117-int access_process_vm(struct task_struct *tsk, unsigned long addr,
87118- void *buf, int len, int write)
87119+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
87120+ void *buf, size_t len, int write)
87121 {
87122 struct mm_struct *mm;
87123- int ret;
87124+ ssize_t ret;
87125
87126 mm = get_task_mm(tsk);
87127 if (!mm)
87128diff --git a/mm/mempolicy.c b/mm/mempolicy.c
87129index 4baf12e..5497066 100644
87130--- a/mm/mempolicy.c
87131+++ b/mm/mempolicy.c
87132@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
87133 unsigned long vmstart;
87134 unsigned long vmend;
87135
87136+#ifdef CONFIG_PAX_SEGMEXEC
87137+ struct vm_area_struct *vma_m;
87138+#endif
87139+
87140 vma = find_vma(mm, start);
87141 if (!vma || vma->vm_start > start)
87142 return -EFAULT;
87143@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
87144 err = vma_replace_policy(vma, new_pol);
87145 if (err)
87146 goto out;
87147+
87148+#ifdef CONFIG_PAX_SEGMEXEC
87149+ vma_m = pax_find_mirror_vma(vma);
87150+ if (vma_m) {
87151+ err = vma_replace_policy(vma_m, new_pol);
87152+ if (err)
87153+ goto out;
87154+ }
87155+#endif
87156+
87157 }
87158
87159 out:
87160@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
87161
87162 if (end < start)
87163 return -EINVAL;
87164+
87165+#ifdef CONFIG_PAX_SEGMEXEC
87166+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
87167+ if (end > SEGMEXEC_TASK_SIZE)
87168+ return -EINVAL;
87169+ } else
87170+#endif
87171+
87172+ if (end > TASK_SIZE)
87173+ return -EINVAL;
87174+
87175 if (end == start)
87176 return 0;
87177
87178@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
87179 */
87180 tcred = __task_cred(task);
87181 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
87182- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
87183- !capable(CAP_SYS_NICE)) {
87184+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
87185 rcu_read_unlock();
87186 err = -EPERM;
87187 goto out_put;
87188@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
87189 goto out;
87190 }
87191
87192+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87193+ if (mm != current->mm &&
87194+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
87195+ mmput(mm);
87196+ err = -EPERM;
87197+ goto out;
87198+ }
87199+#endif
87200+
87201 err = do_migrate_pages(mm, old, new,
87202 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
87203
87204diff --git a/mm/migrate.c b/mm/migrate.c
87205index 6f0c244..6d1ae32 100644
87206--- a/mm/migrate.c
87207+++ b/mm/migrate.c
87208@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
87209 */
87210 tcred = __task_cred(task);
87211 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
87212- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
87213- !capable(CAP_SYS_NICE)) {
87214+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
87215 rcu_read_unlock();
87216 err = -EPERM;
87217 goto out;
87218diff --git a/mm/mlock.c b/mm/mlock.c
87219index 79b7cf7..9944291 100644
87220--- a/mm/mlock.c
87221+++ b/mm/mlock.c
87222@@ -13,6 +13,7 @@
87223 #include <linux/pagemap.h>
87224 #include <linux/mempolicy.h>
87225 #include <linux/syscalls.h>
87226+#include <linux/security.h>
87227 #include <linux/sched.h>
87228 #include <linux/export.h>
87229 #include <linux/rmap.h>
87230@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
87231 {
87232 unsigned long nstart, end, tmp;
87233 struct vm_area_struct * vma, * prev;
87234- int error;
87235+ int error = 0;
87236
87237 VM_BUG_ON(start & ~PAGE_MASK);
87238 VM_BUG_ON(len != PAGE_ALIGN(len));
87239@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
87240 return -EINVAL;
87241 if (end == start)
87242 return 0;
87243+ if (end > TASK_SIZE)
87244+ return -EINVAL;
87245+
87246 vma = find_vma(current->mm, start);
87247 if (!vma || vma->vm_start > start)
87248 return -ENOMEM;
87249@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
87250 for (nstart = start ; ; ) {
87251 vm_flags_t newflags;
87252
87253+#ifdef CONFIG_PAX_SEGMEXEC
87254+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
87255+ break;
87256+#endif
87257+
87258 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
87259
87260 newflags = vma->vm_flags & ~VM_LOCKED;
87261@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
87262 lock_limit >>= PAGE_SHIFT;
87263
87264 /* check against resource limits */
87265+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
87266 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
87267 error = do_mlock(start, len, 1);
87268 up_write(&current->mm->mmap_sem);
87269@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
87270 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
87271 vm_flags_t newflags;
87272
87273+#ifdef CONFIG_PAX_SEGMEXEC
87274+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
87275+ break;
87276+#endif
87277+
87278 newflags = vma->vm_flags & ~VM_LOCKED;
87279 if (flags & MCL_CURRENT)
87280 newflags |= VM_LOCKED;
87281@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
87282 lock_limit >>= PAGE_SHIFT;
87283
87284 ret = -ENOMEM;
87285+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
87286 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
87287 capable(CAP_IPC_LOCK))
87288 ret = do_mlockall(flags);
87289diff --git a/mm/mmap.c b/mm/mmap.c
87290index 8d25fdc..bfb7626 100644
87291--- a/mm/mmap.c
87292+++ b/mm/mmap.c
87293@@ -36,6 +36,7 @@
87294 #include <linux/sched/sysctl.h>
87295 #include <linux/notifier.h>
87296 #include <linux/memory.h>
87297+#include <linux/random.h>
87298
87299 #include <asm/uaccess.h>
87300 #include <asm/cacheflush.h>
87301@@ -52,6 +53,16 @@
87302 #define arch_rebalance_pgtables(addr, len) (addr)
87303 #endif
87304
87305+static inline void verify_mm_writelocked(struct mm_struct *mm)
87306+{
87307+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
87308+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
87309+ up_read(&mm->mmap_sem);
87310+ BUG();
87311+ }
87312+#endif
87313+}
87314+
87315 static void unmap_region(struct mm_struct *mm,
87316 struct vm_area_struct *vma, struct vm_area_struct *prev,
87317 unsigned long start, unsigned long end);
87318@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
87319 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
87320 *
87321 */
87322-pgprot_t protection_map[16] = {
87323+pgprot_t protection_map[16] __read_only = {
87324 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
87325 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
87326 };
87327
87328-pgprot_t vm_get_page_prot(unsigned long vm_flags)
87329+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
87330 {
87331- return __pgprot(pgprot_val(protection_map[vm_flags &
87332+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
87333 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
87334 pgprot_val(arch_vm_get_page_prot(vm_flags)));
87335+
87336+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87337+ if (!(__supported_pte_mask & _PAGE_NX) &&
87338+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
87339+ (vm_flags & (VM_READ | VM_WRITE)))
87340+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
87341+#endif
87342+
87343+ return prot;
87344 }
87345 EXPORT_SYMBOL(vm_get_page_prot);
87346
87347@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
87348 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
87349 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
87350 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
87351+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
87352 /*
87353 * Make sure vm_committed_as in one cacheline and not cacheline shared with
87354 * other variables. It can be updated by several CPUs frequently.
87355@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
87356 struct vm_area_struct *next = vma->vm_next;
87357
87358 might_sleep();
87359+ BUG_ON(vma->vm_mirror);
87360 if (vma->vm_ops && vma->vm_ops->close)
87361 vma->vm_ops->close(vma);
87362 if (vma->vm_file)
87363@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
87364 * not page aligned -Ram Gupta
87365 */
87366 rlim = rlimit(RLIMIT_DATA);
87367+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
87368 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
87369 (mm->end_data - mm->start_data) > rlim)
87370 goto out;
87371@@ -933,6 +956,12 @@ static int
87372 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
87373 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
87374 {
87375+
87376+#ifdef CONFIG_PAX_SEGMEXEC
87377+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
87378+ return 0;
87379+#endif
87380+
87381 if (is_mergeable_vma(vma, file, vm_flags) &&
87382 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
87383 if (vma->vm_pgoff == vm_pgoff)
87384@@ -952,6 +981,12 @@ static int
87385 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
87386 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
87387 {
87388+
87389+#ifdef CONFIG_PAX_SEGMEXEC
87390+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
87391+ return 0;
87392+#endif
87393+
87394 if (is_mergeable_vma(vma, file, vm_flags) &&
87395 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
87396 pgoff_t vm_pglen;
87397@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
87398 struct vm_area_struct *vma_merge(struct mm_struct *mm,
87399 struct vm_area_struct *prev, unsigned long addr,
87400 unsigned long end, unsigned long vm_flags,
87401- struct anon_vma *anon_vma, struct file *file,
87402+ struct anon_vma *anon_vma, struct file *file,
87403 pgoff_t pgoff, struct mempolicy *policy)
87404 {
87405 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
87406 struct vm_area_struct *area, *next;
87407 int err;
87408
87409+#ifdef CONFIG_PAX_SEGMEXEC
87410+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
87411+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
87412+
87413+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
87414+#endif
87415+
87416 /*
87417 * We later require that vma->vm_flags == vm_flags,
87418 * so this tests vma->vm_flags & VM_SPECIAL, too.
87419@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87420 if (next && next->vm_end == end) /* cases 6, 7, 8 */
87421 next = next->vm_next;
87422
87423+#ifdef CONFIG_PAX_SEGMEXEC
87424+ if (prev)
87425+ prev_m = pax_find_mirror_vma(prev);
87426+ if (area)
87427+ area_m = pax_find_mirror_vma(area);
87428+ if (next)
87429+ next_m = pax_find_mirror_vma(next);
87430+#endif
87431+
87432 /*
87433 * Can it merge with the predecessor?
87434 */
87435@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87436 /* cases 1, 6 */
87437 err = vma_adjust(prev, prev->vm_start,
87438 next->vm_end, prev->vm_pgoff, NULL);
87439- } else /* cases 2, 5, 7 */
87440+
87441+#ifdef CONFIG_PAX_SEGMEXEC
87442+ if (!err && prev_m)
87443+ err = vma_adjust(prev_m, prev_m->vm_start,
87444+ next_m->vm_end, prev_m->vm_pgoff, NULL);
87445+#endif
87446+
87447+ } else { /* cases 2, 5, 7 */
87448 err = vma_adjust(prev, prev->vm_start,
87449 end, prev->vm_pgoff, NULL);
87450+
87451+#ifdef CONFIG_PAX_SEGMEXEC
87452+ if (!err && prev_m)
87453+ err = vma_adjust(prev_m, prev_m->vm_start,
87454+ end_m, prev_m->vm_pgoff, NULL);
87455+#endif
87456+
87457+ }
87458 if (err)
87459 return NULL;
87460 khugepaged_enter_vma_merge(prev);
87461@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87462 mpol_equal(policy, vma_policy(next)) &&
87463 can_vma_merge_before(next, vm_flags,
87464 anon_vma, file, pgoff+pglen)) {
87465- if (prev && addr < prev->vm_end) /* case 4 */
87466+ if (prev && addr < prev->vm_end) { /* case 4 */
87467 err = vma_adjust(prev, prev->vm_start,
87468 addr, prev->vm_pgoff, NULL);
87469- else /* cases 3, 8 */
87470+
87471+#ifdef CONFIG_PAX_SEGMEXEC
87472+ if (!err && prev_m)
87473+ err = vma_adjust(prev_m, prev_m->vm_start,
87474+ addr_m, prev_m->vm_pgoff, NULL);
87475+#endif
87476+
87477+ } else { /* cases 3, 8 */
87478 err = vma_adjust(area, addr, next->vm_end,
87479 next->vm_pgoff - pglen, NULL);
87480+
87481+#ifdef CONFIG_PAX_SEGMEXEC
87482+ if (!err && area_m)
87483+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
87484+ next_m->vm_pgoff - pglen, NULL);
87485+#endif
87486+
87487+ }
87488 if (err)
87489 return NULL;
87490 khugepaged_enter_vma_merge(area);
87491@@ -1165,8 +1246,10 @@ none:
87492 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
87493 struct file *file, long pages)
87494 {
87495- const unsigned long stack_flags
87496- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
87497+
87498+#ifdef CONFIG_PAX_RANDMMAP
87499+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
87500+#endif
87501
87502 mm->total_vm += pages;
87503
87504@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
87505 mm->shared_vm += pages;
87506 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
87507 mm->exec_vm += pages;
87508- } else if (flags & stack_flags)
87509+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
87510 mm->stack_vm += pages;
87511 }
87512 #endif /* CONFIG_PROC_FS */
87513@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87514 * (the exception is when the underlying filesystem is noexec
87515 * mounted, in which case we dont add PROT_EXEC.)
87516 */
87517- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
87518+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
87519 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
87520 prot |= PROT_EXEC;
87521
87522@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87523 /* Obtain the address to map to. we verify (or select) it and ensure
87524 * that it represents a valid section of the address space.
87525 */
87526- addr = get_unmapped_area(file, addr, len, pgoff, flags);
87527+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
87528 if (addr & ~PAGE_MASK)
87529 return addr;
87530
87531@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87532 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
87533 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
87534
87535+#ifdef CONFIG_PAX_MPROTECT
87536+ if (mm->pax_flags & MF_PAX_MPROTECT) {
87537+
87538+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
87539+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
87540+ mm->binfmt->handle_mmap)
87541+ mm->binfmt->handle_mmap(file);
87542+#endif
87543+
87544+#ifndef CONFIG_PAX_MPROTECT_COMPAT
87545+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
87546+ gr_log_rwxmmap(file);
87547+
87548+#ifdef CONFIG_PAX_EMUPLT
87549+ vm_flags &= ~VM_EXEC;
87550+#else
87551+ return -EPERM;
87552+#endif
87553+
87554+ }
87555+
87556+ if (!(vm_flags & VM_EXEC))
87557+ vm_flags &= ~VM_MAYEXEC;
87558+#else
87559+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
87560+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
87561+#endif
87562+ else
87563+ vm_flags &= ~VM_MAYWRITE;
87564+ }
87565+#endif
87566+
87567+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87568+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
87569+ vm_flags &= ~VM_PAGEEXEC;
87570+#endif
87571+
87572 if (flags & MAP_LOCKED)
87573 if (!can_do_mlock())
87574 return -EPERM;
87575@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87576 locked += mm->locked_vm;
87577 lock_limit = rlimit(RLIMIT_MEMLOCK);
87578 lock_limit >>= PAGE_SHIFT;
87579+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
87580 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
87581 return -EAGAIN;
87582 }
87583@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87584 vm_flags |= VM_NORESERVE;
87585 }
87586
87587+ if (!gr_acl_handle_mmap(file, prot))
87588+ return -EACCES;
87589+
87590 addr = mmap_region(file, addr, len, vm_flags, pgoff);
87591 if (!IS_ERR_VALUE(addr) &&
87592 ((vm_flags & VM_LOCKED) ||
87593@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
87594 vm_flags_t vm_flags = vma->vm_flags;
87595
87596 /* If it was private or non-writable, the write bit is already clear */
87597- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
87598+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
87599 return 0;
87600
87601 /* The backer wishes to know when pages are first written to? */
87602@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
87603 unsigned long charged = 0;
87604 struct inode *inode = file ? file_inode(file) : NULL;
87605
87606+#ifdef CONFIG_PAX_SEGMEXEC
87607+ struct vm_area_struct *vma_m = NULL;
87608+#endif
87609+
87610+ /*
87611+ * mm->mmap_sem is required to protect against another thread
87612+ * changing the mappings in case we sleep.
87613+ */
87614+ verify_mm_writelocked(mm);
87615+
87616 /* Check against address space limit. */
87617+
87618+#ifdef CONFIG_PAX_RANDMMAP
87619+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
87620+#endif
87621+
87622 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
87623 unsigned long nr_pages;
87624
87625@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
87626
87627 /* Clear old maps */
87628 error = -ENOMEM;
87629-munmap_back:
87630 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
87631 if (do_munmap(mm, addr, len))
87632 return -ENOMEM;
87633- goto munmap_back;
87634+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
87635 }
87636
87637 /*
87638@@ -1534,6 +1672,16 @@ munmap_back:
87639 goto unacct_error;
87640 }
87641
87642+#ifdef CONFIG_PAX_SEGMEXEC
87643+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
87644+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
87645+ if (!vma_m) {
87646+ error = -ENOMEM;
87647+ goto free_vma;
87648+ }
87649+ }
87650+#endif
87651+
87652 vma->vm_mm = mm;
87653 vma->vm_start = addr;
87654 vma->vm_end = addr + len;
87655@@ -1558,6 +1706,13 @@ munmap_back:
87656 if (error)
87657 goto unmap_and_free_vma;
87658
87659+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87660+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
87661+ vma->vm_flags |= VM_PAGEEXEC;
87662+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
87663+ }
87664+#endif
87665+
87666 /* Can addr have changed??
87667 *
87668 * Answer: Yes, several device drivers can do it in their
87669@@ -1596,6 +1751,11 @@ munmap_back:
87670 vma_link(mm, vma, prev, rb_link, rb_parent);
87671 file = vma->vm_file;
87672
87673+#ifdef CONFIG_PAX_SEGMEXEC
87674+ if (vma_m)
87675+ BUG_ON(pax_mirror_vma(vma_m, vma));
87676+#endif
87677+
87678 /* Once vma denies write, undo our temporary denial count */
87679 if (correct_wcount)
87680 atomic_inc(&inode->i_writecount);
87681@@ -1603,6 +1763,7 @@ out:
87682 perf_event_mmap(vma);
87683
87684 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
87685+ track_exec_limit(mm, addr, addr + len, vm_flags);
87686 if (vm_flags & VM_LOCKED) {
87687 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
87688 vma == get_gate_vma(current->mm)))
87689@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
87690 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
87691 charged = 0;
87692 free_vma:
87693+
87694+#ifdef CONFIG_PAX_SEGMEXEC
87695+ if (vma_m)
87696+ kmem_cache_free(vm_area_cachep, vma_m);
87697+#endif
87698+
87699 kmem_cache_free(vm_area_cachep, vma);
87700 unacct_error:
87701 if (charged)
87702@@ -1633,7 +1800,63 @@ unacct_error:
87703 return error;
87704 }
87705
87706-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
87707+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
87708+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
87709+{
87710+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
87711+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
87712+
87713+ return 0;
87714+}
87715+#endif
87716+
87717+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
87718+{
87719+ if (!vma) {
87720+#ifdef CONFIG_STACK_GROWSUP
87721+ if (addr > sysctl_heap_stack_gap)
87722+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
87723+ else
87724+ vma = find_vma(current->mm, 0);
87725+ if (vma && (vma->vm_flags & VM_GROWSUP))
87726+ return false;
87727+#endif
87728+ return true;
87729+ }
87730+
87731+ if (addr + len > vma->vm_start)
87732+ return false;
87733+
87734+ if (vma->vm_flags & VM_GROWSDOWN)
87735+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
87736+#ifdef CONFIG_STACK_GROWSUP
87737+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
87738+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
87739+#endif
87740+ else if (offset)
87741+ return offset <= vma->vm_start - addr - len;
87742+
87743+ return true;
87744+}
87745+
87746+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
87747+{
87748+ if (vma->vm_start < len)
87749+ return -ENOMEM;
87750+
87751+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
87752+ if (offset <= vma->vm_start - len)
87753+ return vma->vm_start - len - offset;
87754+ else
87755+ return -ENOMEM;
87756+ }
87757+
87758+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
87759+ return vma->vm_start - len - sysctl_heap_stack_gap;
87760+ return -ENOMEM;
87761+}
87762+
87763+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
87764 {
87765 /*
87766 * We implement the search by looking for an rbtree node that
87767@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
87768 }
87769 }
87770
87771- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
87772+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
87773 check_current:
87774 /* Check if current node has a suitable gap */
87775 if (gap_start > high_limit)
87776 return -ENOMEM;
87777+
87778+ if (gap_end - gap_start > info->threadstack_offset)
87779+ gap_start += info->threadstack_offset;
87780+ else
87781+ gap_start = gap_end;
87782+
87783+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
87784+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87785+ gap_start += sysctl_heap_stack_gap;
87786+ else
87787+ gap_start = gap_end;
87788+ }
87789+ if (vma->vm_flags & VM_GROWSDOWN) {
87790+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87791+ gap_end -= sysctl_heap_stack_gap;
87792+ else
87793+ gap_end = gap_start;
87794+ }
87795 if (gap_end >= low_limit && gap_end - gap_start >= length)
87796 goto found;
87797
87798@@ -1735,7 +1976,7 @@ found:
87799 return gap_start;
87800 }
87801
87802-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
87803+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
87804 {
87805 struct mm_struct *mm = current->mm;
87806 struct vm_area_struct *vma;
87807@@ -1789,6 +2030,24 @@ check_current:
87808 gap_end = vma->vm_start;
87809 if (gap_end < low_limit)
87810 return -ENOMEM;
87811+
87812+ if (gap_end - gap_start > info->threadstack_offset)
87813+ gap_end -= info->threadstack_offset;
87814+ else
87815+ gap_end = gap_start;
87816+
87817+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
87818+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87819+ gap_start += sysctl_heap_stack_gap;
87820+ else
87821+ gap_start = gap_end;
87822+ }
87823+ if (vma->vm_flags & VM_GROWSDOWN) {
87824+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87825+ gap_end -= sysctl_heap_stack_gap;
87826+ else
87827+ gap_end = gap_start;
87828+ }
87829 if (gap_start <= high_limit && gap_end - gap_start >= length)
87830 goto found;
87831
87832@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
87833 struct mm_struct *mm = current->mm;
87834 struct vm_area_struct *vma;
87835 struct vm_unmapped_area_info info;
87836+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
87837
87838 if (len > TASK_SIZE)
87839 return -ENOMEM;
87840@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
87841 if (flags & MAP_FIXED)
87842 return addr;
87843
87844+#ifdef CONFIG_PAX_RANDMMAP
87845+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
87846+#endif
87847+
87848 if (addr) {
87849 addr = PAGE_ALIGN(addr);
87850 vma = find_vma(mm, addr);
87851- if (TASK_SIZE - len >= addr &&
87852- (!vma || addr + len <= vma->vm_start))
87853+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
87854 return addr;
87855 }
87856
87857 info.flags = 0;
87858 info.length = len;
87859 info.low_limit = TASK_UNMAPPED_BASE;
87860+
87861+#ifdef CONFIG_PAX_RANDMMAP
87862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
87863+ info.low_limit += mm->delta_mmap;
87864+#endif
87865+
87866 info.high_limit = TASK_SIZE;
87867 info.align_mask = 0;
87868+ info.threadstack_offset = offset;
87869 return vm_unmapped_area(&info);
87870 }
87871 #endif
87872
87873 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
87874 {
87875+
87876+#ifdef CONFIG_PAX_SEGMEXEC
87877+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
87878+ return;
87879+#endif
87880+
87881 /*
87882 * Is this a new hole at the lowest possible address?
87883 */
87884- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
87885+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
87886 mm->free_area_cache = addr;
87887 }
87888
87889@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87890 struct mm_struct *mm = current->mm;
87891 unsigned long addr = addr0;
87892 struct vm_unmapped_area_info info;
87893+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
87894
87895 /* requested length too big for entire address space */
87896 if (len > TASK_SIZE)
87897@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87898 if (flags & MAP_FIXED)
87899 return addr;
87900
87901+#ifdef CONFIG_PAX_RANDMMAP
87902+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
87903+#endif
87904+
87905 /* requesting a specific address */
87906 if (addr) {
87907 addr = PAGE_ALIGN(addr);
87908 vma = find_vma(mm, addr);
87909- if (TASK_SIZE - len >= addr &&
87910- (!vma || addr + len <= vma->vm_start))
87911+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
87912 return addr;
87913 }
87914
87915@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87916 info.low_limit = PAGE_SIZE;
87917 info.high_limit = mm->mmap_base;
87918 info.align_mask = 0;
87919+ info.threadstack_offset = offset;
87920 addr = vm_unmapped_area(&info);
87921
87922 /*
87923@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87924 VM_BUG_ON(addr != -ENOMEM);
87925 info.flags = 0;
87926 info.low_limit = TASK_UNMAPPED_BASE;
87927+
87928+#ifdef CONFIG_PAX_RANDMMAP
87929+ if (mm->pax_flags & MF_PAX_RANDMMAP)
87930+ info.low_limit += mm->delta_mmap;
87931+#endif
87932+
87933 info.high_limit = TASK_SIZE;
87934 addr = vm_unmapped_area(&info);
87935 }
87936@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87937
87938 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
87939 {
87940+
87941+#ifdef CONFIG_PAX_SEGMEXEC
87942+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
87943+ return;
87944+#endif
87945+
87946 /*
87947 * Is this a new hole at the highest possible address?
87948 */
87949@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
87950 mm->free_area_cache = addr;
87951
87952 /* dont allow allocations above current base */
87953- if (mm->free_area_cache > mm->mmap_base)
87954+ if (mm->free_area_cache > mm->mmap_base) {
87955 mm->free_area_cache = mm->mmap_base;
87956+ mm->cached_hole_size = ~0UL;
87957+ }
87958 }
87959
87960 unsigned long
87961@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
87962 return vma;
87963 }
87964
87965+#ifdef CONFIG_PAX_SEGMEXEC
87966+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
87967+{
87968+ struct vm_area_struct *vma_m;
87969+
87970+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
87971+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
87972+ BUG_ON(vma->vm_mirror);
87973+ return NULL;
87974+ }
87975+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
87976+ vma_m = vma->vm_mirror;
87977+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
87978+ BUG_ON(vma->vm_file != vma_m->vm_file);
87979+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
87980+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
87981+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
87982+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
87983+ return vma_m;
87984+}
87985+#endif
87986+
87987 /*
87988 * Verify that the stack growth is acceptable and
87989 * update accounting. This is shared with both the
87990@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87991 return -ENOMEM;
87992
87993 /* Stack limit test */
87994+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
87995 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
87996 return -ENOMEM;
87997
87998@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87999 locked = mm->locked_vm + grow;
88000 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
88001 limit >>= PAGE_SHIFT;
88002+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
88003 if (locked > limit && !capable(CAP_IPC_LOCK))
88004 return -ENOMEM;
88005 }
88006@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
88007 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
88008 * vma is the last one with address > vma->vm_end. Have to extend vma.
88009 */
88010+#ifndef CONFIG_IA64
88011+static
88012+#endif
88013 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
88014 {
88015 int error;
88016+ bool locknext;
88017
88018 if (!(vma->vm_flags & VM_GROWSUP))
88019 return -EFAULT;
88020
88021+ /* Also guard against wrapping around to address 0. */
88022+ if (address < PAGE_ALIGN(address+1))
88023+ address = PAGE_ALIGN(address+1);
88024+ else
88025+ return -ENOMEM;
88026+
88027 /*
88028 * We must make sure the anon_vma is allocated
88029 * so that the anon_vma locking is not a noop.
88030 */
88031 if (unlikely(anon_vma_prepare(vma)))
88032 return -ENOMEM;
88033+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
88034+ if (locknext && anon_vma_prepare(vma->vm_next))
88035+ return -ENOMEM;
88036 vma_lock_anon_vma(vma);
88037+ if (locknext)
88038+ vma_lock_anon_vma(vma->vm_next);
88039
88040 /*
88041 * vma->vm_start/vm_end cannot change under us because the caller
88042 * is required to hold the mmap_sem in read mode. We need the
88043- * anon_vma lock to serialize against concurrent expand_stacks.
88044- * Also guard against wrapping around to address 0.
88045+ * anon_vma locks to serialize against concurrent expand_stacks
88046+ * and expand_upwards.
88047 */
88048- if (address < PAGE_ALIGN(address+4))
88049- address = PAGE_ALIGN(address+4);
88050- else {
88051- vma_unlock_anon_vma(vma);
88052- return -ENOMEM;
88053- }
88054 error = 0;
88055
88056 /* Somebody else might have raced and expanded it already */
88057- if (address > vma->vm_end) {
88058+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
88059+ error = -ENOMEM;
88060+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
88061 unsigned long size, grow;
88062
88063 size = address - vma->vm_start;
88064@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
88065 }
88066 }
88067 }
88068+ if (locknext)
88069+ vma_unlock_anon_vma(vma->vm_next);
88070 vma_unlock_anon_vma(vma);
88071 khugepaged_enter_vma_merge(vma);
88072 validate_mm(vma->vm_mm);
88073@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
88074 unsigned long address)
88075 {
88076 int error;
88077+ bool lockprev = false;
88078+ struct vm_area_struct *prev;
88079
88080 /*
88081 * We must make sure the anon_vma is allocated
88082@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
88083 if (error)
88084 return error;
88085
88086+ prev = vma->vm_prev;
88087+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
88088+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
88089+#endif
88090+ if (lockprev && anon_vma_prepare(prev))
88091+ return -ENOMEM;
88092+ if (lockprev)
88093+ vma_lock_anon_vma(prev);
88094+
88095 vma_lock_anon_vma(vma);
88096
88097 /*
88098@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
88099 */
88100
88101 /* Somebody else might have raced and expanded it already */
88102- if (address < vma->vm_start) {
88103+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
88104+ error = -ENOMEM;
88105+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
88106 unsigned long size, grow;
88107
88108+#ifdef CONFIG_PAX_SEGMEXEC
88109+ struct vm_area_struct *vma_m;
88110+
88111+ vma_m = pax_find_mirror_vma(vma);
88112+#endif
88113+
88114 size = vma->vm_end - address;
88115 grow = (vma->vm_start - address) >> PAGE_SHIFT;
88116
88117@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
88118 vma->vm_pgoff -= grow;
88119 anon_vma_interval_tree_post_update_vma(vma);
88120 vma_gap_update(vma);
88121+
88122+#ifdef CONFIG_PAX_SEGMEXEC
88123+ if (vma_m) {
88124+ anon_vma_interval_tree_pre_update_vma(vma_m);
88125+ vma_m->vm_start -= grow << PAGE_SHIFT;
88126+ vma_m->vm_pgoff -= grow;
88127+ anon_vma_interval_tree_post_update_vma(vma_m);
88128+ vma_gap_update(vma_m);
88129+ }
88130+#endif
88131+
88132 spin_unlock(&vma->vm_mm->page_table_lock);
88133
88134+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
88135 perf_event_mmap(vma);
88136 }
88137 }
88138 }
88139 vma_unlock_anon_vma(vma);
88140+ if (lockprev)
88141+ vma_unlock_anon_vma(prev);
88142 khugepaged_enter_vma_merge(vma);
88143 validate_mm(vma->vm_mm);
88144 return error;
88145@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
88146 do {
88147 long nrpages = vma_pages(vma);
88148
88149+#ifdef CONFIG_PAX_SEGMEXEC
88150+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
88151+ vma = remove_vma(vma);
88152+ continue;
88153+ }
88154+#endif
88155+
88156 if (vma->vm_flags & VM_ACCOUNT)
88157 nr_accounted += nrpages;
88158 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
88159@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
88160 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
88161 vma->vm_prev = NULL;
88162 do {
88163+
88164+#ifdef CONFIG_PAX_SEGMEXEC
88165+ if (vma->vm_mirror) {
88166+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
88167+ vma->vm_mirror->vm_mirror = NULL;
88168+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
88169+ vma->vm_mirror = NULL;
88170+ }
88171+#endif
88172+
88173 vma_rb_erase(vma, &mm->mm_rb);
88174 mm->map_count--;
88175 tail_vma = vma;
88176@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88177 struct vm_area_struct *new;
88178 int err = -ENOMEM;
88179
88180+#ifdef CONFIG_PAX_SEGMEXEC
88181+ struct vm_area_struct *vma_m, *new_m = NULL;
88182+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
88183+#endif
88184+
88185 if (is_vm_hugetlb_page(vma) && (addr &
88186 ~(huge_page_mask(hstate_vma(vma)))))
88187 return -EINVAL;
88188
88189+#ifdef CONFIG_PAX_SEGMEXEC
88190+ vma_m = pax_find_mirror_vma(vma);
88191+#endif
88192+
88193 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
88194 if (!new)
88195 goto out_err;
88196
88197+#ifdef CONFIG_PAX_SEGMEXEC
88198+ if (vma_m) {
88199+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
88200+ if (!new_m) {
88201+ kmem_cache_free(vm_area_cachep, new);
88202+ goto out_err;
88203+ }
88204+ }
88205+#endif
88206+
88207 /* most fields are the same, copy all, and then fixup */
88208 *new = *vma;
88209
88210@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88211 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
88212 }
88213
88214+#ifdef CONFIG_PAX_SEGMEXEC
88215+ if (vma_m) {
88216+ *new_m = *vma_m;
88217+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
88218+ new_m->vm_mirror = new;
88219+ new->vm_mirror = new_m;
88220+
88221+ if (new_below)
88222+ new_m->vm_end = addr_m;
88223+ else {
88224+ new_m->vm_start = addr_m;
88225+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
88226+ }
88227+ }
88228+#endif
88229+
88230 pol = mpol_dup(vma_policy(vma));
88231 if (IS_ERR(pol)) {
88232 err = PTR_ERR(pol);
88233@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88234 else
88235 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
88236
88237+#ifdef CONFIG_PAX_SEGMEXEC
88238+ if (!err && vma_m) {
88239+ if (anon_vma_clone(new_m, vma_m))
88240+ goto out_free_mpol;
88241+
88242+ mpol_get(pol);
88243+ vma_set_policy(new_m, pol);
88244+
88245+ if (new_m->vm_file)
88246+ get_file(new_m->vm_file);
88247+
88248+ if (new_m->vm_ops && new_m->vm_ops->open)
88249+ new_m->vm_ops->open(new_m);
88250+
88251+ if (new_below)
88252+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
88253+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
88254+ else
88255+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
88256+
88257+ if (err) {
88258+ if (new_m->vm_ops && new_m->vm_ops->close)
88259+ new_m->vm_ops->close(new_m);
88260+ if (new_m->vm_file)
88261+ fput(new_m->vm_file);
88262+ mpol_put(pol);
88263+ }
88264+ }
88265+#endif
88266+
88267 /* Success. */
88268 if (!err)
88269 return 0;
88270@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88271 new->vm_ops->close(new);
88272 if (new->vm_file)
88273 fput(new->vm_file);
88274- unlink_anon_vmas(new);
88275 out_free_mpol:
88276 mpol_put(pol);
88277 out_free_vma:
88278+
88279+#ifdef CONFIG_PAX_SEGMEXEC
88280+ if (new_m) {
88281+ unlink_anon_vmas(new_m);
88282+ kmem_cache_free(vm_area_cachep, new_m);
88283+ }
88284+#endif
88285+
88286+ unlink_anon_vmas(new);
88287 kmem_cache_free(vm_area_cachep, new);
88288 out_err:
88289 return err;
88290@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88291 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88292 unsigned long addr, int new_below)
88293 {
88294+
88295+#ifdef CONFIG_PAX_SEGMEXEC
88296+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
88297+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
88298+ if (mm->map_count >= sysctl_max_map_count-1)
88299+ return -ENOMEM;
88300+ } else
88301+#endif
88302+
88303 if (mm->map_count >= sysctl_max_map_count)
88304 return -ENOMEM;
88305
88306@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88307 * work. This now handles partial unmappings.
88308 * Jeremy Fitzhardinge <jeremy@goop.org>
88309 */
88310+#ifdef CONFIG_PAX_SEGMEXEC
88311 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88312 {
88313+ int ret = __do_munmap(mm, start, len);
88314+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
88315+ return ret;
88316+
88317+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
88318+}
88319+
88320+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88321+#else
88322+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88323+#endif
88324+{
88325 unsigned long end;
88326 struct vm_area_struct *vma, *prev, *last;
88327
88328+ /*
88329+ * mm->mmap_sem is required to protect against another thread
88330+ * changing the mappings in case we sleep.
88331+ */
88332+ verify_mm_writelocked(mm);
88333+
88334 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
88335 return -EINVAL;
88336
88337@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88338 /* Fix up all other VM information */
88339 remove_vma_list(mm, vma);
88340
88341+ track_exec_limit(mm, start, end, 0UL);
88342+
88343 return 0;
88344 }
88345
88346@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
88347 int ret;
88348 struct mm_struct *mm = current->mm;
88349
88350+
88351+#ifdef CONFIG_PAX_SEGMEXEC
88352+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
88353+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
88354+ return -EINVAL;
88355+#endif
88356+
88357 down_write(&mm->mmap_sem);
88358 ret = do_munmap(mm, start, len);
88359 up_write(&mm->mmap_sem);
88360@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
88361 return vm_munmap(addr, len);
88362 }
88363
88364-static inline void verify_mm_writelocked(struct mm_struct *mm)
88365-{
88366-#ifdef CONFIG_DEBUG_VM
88367- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
88368- WARN_ON(1);
88369- up_read(&mm->mmap_sem);
88370- }
88371-#endif
88372-}
88373-
88374 /*
88375 * this is really a simplified "do_mmap". it only handles
88376 * anonymous maps. eventually we may be able to do some
88377@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88378 struct rb_node ** rb_link, * rb_parent;
88379 pgoff_t pgoff = addr >> PAGE_SHIFT;
88380 int error;
88381+ unsigned long charged;
88382
88383 len = PAGE_ALIGN(len);
88384 if (!len)
88385@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88386
88387 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
88388
88389+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
88390+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
88391+ flags &= ~VM_EXEC;
88392+
88393+#ifdef CONFIG_PAX_MPROTECT
88394+ if (mm->pax_flags & MF_PAX_MPROTECT)
88395+ flags &= ~VM_MAYEXEC;
88396+#endif
88397+
88398+ }
88399+#endif
88400+
88401 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
88402 if (error & ~PAGE_MASK)
88403 return error;
88404
88405+ charged = len >> PAGE_SHIFT;
88406+
88407 /*
88408 * mlock MCL_FUTURE?
88409 */
88410 if (mm->def_flags & VM_LOCKED) {
88411 unsigned long locked, lock_limit;
88412- locked = len >> PAGE_SHIFT;
88413+ locked = charged;
88414 locked += mm->locked_vm;
88415 lock_limit = rlimit(RLIMIT_MEMLOCK);
88416 lock_limit >>= PAGE_SHIFT;
88417@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88418 /*
88419 * Clear old maps. this also does some error checking for us
88420 */
88421- munmap_back:
88422 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
88423 if (do_munmap(mm, addr, len))
88424 return -ENOMEM;
88425- goto munmap_back;
88426+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
88427 }
88428
88429 /* Check against address space limits *after* clearing old maps... */
88430- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
88431+ if (!may_expand_vm(mm, charged))
88432 return -ENOMEM;
88433
88434 if (mm->map_count > sysctl_max_map_count)
88435 return -ENOMEM;
88436
88437- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
88438+ if (security_vm_enough_memory_mm(mm, charged))
88439 return -ENOMEM;
88440
88441 /* Can we just expand an old private anonymous mapping? */
88442@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88443 */
88444 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88445 if (!vma) {
88446- vm_unacct_memory(len >> PAGE_SHIFT);
88447+ vm_unacct_memory(charged);
88448 return -ENOMEM;
88449 }
88450
88451@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88452 vma_link(mm, vma, prev, rb_link, rb_parent);
88453 out:
88454 perf_event_mmap(vma);
88455- mm->total_vm += len >> PAGE_SHIFT;
88456+ mm->total_vm += charged;
88457 if (flags & VM_LOCKED)
88458- mm->locked_vm += (len >> PAGE_SHIFT);
88459+ mm->locked_vm += charged;
88460+ track_exec_limit(mm, addr, addr + len, flags);
88461 return addr;
88462 }
88463
88464@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
88465 while (vma) {
88466 if (vma->vm_flags & VM_ACCOUNT)
88467 nr_accounted += vma_pages(vma);
88468+ vma->vm_mirror = NULL;
88469 vma = remove_vma(vma);
88470 }
88471 vm_unacct_memory(nr_accounted);
88472@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
88473 struct vm_area_struct *prev;
88474 struct rb_node **rb_link, *rb_parent;
88475
88476+#ifdef CONFIG_PAX_SEGMEXEC
88477+ struct vm_area_struct *vma_m = NULL;
88478+#endif
88479+
88480+ if (security_mmap_addr(vma->vm_start))
88481+ return -EPERM;
88482+
88483 /*
88484 * The vm_pgoff of a purely anonymous vma should be irrelevant
88485 * until its first write fault, when page's anon_vma and index
88486@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
88487 security_vm_enough_memory_mm(mm, vma_pages(vma)))
88488 return -ENOMEM;
88489
88490+#ifdef CONFIG_PAX_SEGMEXEC
88491+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
88492+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88493+ if (!vma_m)
88494+ return -ENOMEM;
88495+ }
88496+#endif
88497+
88498 vma_link(mm, vma, prev, rb_link, rb_parent);
88499+
88500+#ifdef CONFIG_PAX_SEGMEXEC
88501+ if (vma_m)
88502+ BUG_ON(pax_mirror_vma(vma_m, vma));
88503+#endif
88504+
88505 return 0;
88506 }
88507
88508@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
88509 struct mempolicy *pol;
88510 bool faulted_in_anon_vma = true;
88511
88512+ BUG_ON(vma->vm_mirror);
88513+
88514 /*
88515 * If anonymous vma has not yet been faulted, update new pgoff
88516 * to match new location, to increase its chance of merging.
88517@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
88518 return NULL;
88519 }
88520
88521+#ifdef CONFIG_PAX_SEGMEXEC
88522+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
88523+{
88524+ struct vm_area_struct *prev_m;
88525+ struct rb_node **rb_link_m, *rb_parent_m;
88526+ struct mempolicy *pol_m;
88527+
88528+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
88529+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
88530+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
88531+ *vma_m = *vma;
88532+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
88533+ if (anon_vma_clone(vma_m, vma))
88534+ return -ENOMEM;
88535+ pol_m = vma_policy(vma_m);
88536+ mpol_get(pol_m);
88537+ vma_set_policy(vma_m, pol_m);
88538+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
88539+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
88540+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
88541+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
88542+ if (vma_m->vm_file)
88543+ get_file(vma_m->vm_file);
88544+ if (vma_m->vm_ops && vma_m->vm_ops->open)
88545+ vma_m->vm_ops->open(vma_m);
88546+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
88547+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
88548+ vma_m->vm_mirror = vma;
88549+ vma->vm_mirror = vma_m;
88550+ return 0;
88551+}
88552+#endif
88553+
88554 /*
88555 * Return true if the calling process may expand its vm space by the passed
88556 * number of pages
88557@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
88558
88559 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
88560
88561+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
88562 if (cur + npages > lim)
88563 return 0;
88564 return 1;
88565@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
88566 vma->vm_start = addr;
88567 vma->vm_end = addr + len;
88568
88569+#ifdef CONFIG_PAX_MPROTECT
88570+ if (mm->pax_flags & MF_PAX_MPROTECT) {
88571+#ifndef CONFIG_PAX_MPROTECT_COMPAT
88572+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
88573+ return -EPERM;
88574+ if (!(vm_flags & VM_EXEC))
88575+ vm_flags &= ~VM_MAYEXEC;
88576+#else
88577+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
88578+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
88579+#endif
88580+ else
88581+ vm_flags &= ~VM_MAYWRITE;
88582+ }
88583+#endif
88584+
88585 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
88586 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
88587
88588diff --git a/mm/mprotect.c b/mm/mprotect.c
88589index 94722a4..e661e29 100644
88590--- a/mm/mprotect.c
88591+++ b/mm/mprotect.c
88592@@ -23,10 +23,18 @@
88593 #include <linux/mmu_notifier.h>
88594 #include <linux/migrate.h>
88595 #include <linux/perf_event.h>
88596+#include <linux/sched/sysctl.h>
88597+
88598+#ifdef CONFIG_PAX_MPROTECT
88599+#include <linux/elf.h>
88600+#include <linux/binfmts.h>
88601+#endif
88602+
88603 #include <asm/uaccess.h>
88604 #include <asm/pgtable.h>
88605 #include <asm/cacheflush.h>
88606 #include <asm/tlbflush.h>
88607+#include <asm/mmu_context.h>
88608
88609 #ifndef pgprot_modify
88610 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
88611@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
88612 return pages;
88613 }
88614
88615+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
88616+/* called while holding the mmap semaphor for writing except stack expansion */
88617+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
88618+{
88619+ unsigned long oldlimit, newlimit = 0UL;
88620+
88621+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
88622+ return;
88623+
88624+ spin_lock(&mm->page_table_lock);
88625+ oldlimit = mm->context.user_cs_limit;
88626+ if ((prot & VM_EXEC) && oldlimit < end)
88627+ /* USER_CS limit moved up */
88628+ newlimit = end;
88629+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
88630+ /* USER_CS limit moved down */
88631+ newlimit = start;
88632+
88633+ if (newlimit) {
88634+ mm->context.user_cs_limit = newlimit;
88635+
88636+#ifdef CONFIG_SMP
88637+ wmb();
88638+ cpus_clear(mm->context.cpu_user_cs_mask);
88639+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
88640+#endif
88641+
88642+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
88643+ }
88644+ spin_unlock(&mm->page_table_lock);
88645+ if (newlimit == end) {
88646+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
88647+
88648+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
88649+ if (is_vm_hugetlb_page(vma))
88650+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
88651+ else
88652+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
88653+ }
88654+}
88655+#endif
88656+
88657 int
88658 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88659 unsigned long start, unsigned long end, unsigned long newflags)
88660@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88661 int error;
88662 int dirty_accountable = 0;
88663
88664+#ifdef CONFIG_PAX_SEGMEXEC
88665+ struct vm_area_struct *vma_m = NULL;
88666+ unsigned long start_m, end_m;
88667+
88668+ start_m = start + SEGMEXEC_TASK_SIZE;
88669+ end_m = end + SEGMEXEC_TASK_SIZE;
88670+#endif
88671+
88672 if (newflags == oldflags) {
88673 *pprev = vma;
88674 return 0;
88675 }
88676
88677+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
88678+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
88679+
88680+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
88681+ return -ENOMEM;
88682+
88683+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
88684+ return -ENOMEM;
88685+ }
88686+
88687 /*
88688 * If we make a private mapping writable we increase our commit;
88689 * but (without finer accounting) cannot reduce our commit if we
88690@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88691 }
88692 }
88693
88694+#ifdef CONFIG_PAX_SEGMEXEC
88695+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
88696+ if (start != vma->vm_start) {
88697+ error = split_vma(mm, vma, start, 1);
88698+ if (error)
88699+ goto fail;
88700+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
88701+ *pprev = (*pprev)->vm_next;
88702+ }
88703+
88704+ if (end != vma->vm_end) {
88705+ error = split_vma(mm, vma, end, 0);
88706+ if (error)
88707+ goto fail;
88708+ }
88709+
88710+ if (pax_find_mirror_vma(vma)) {
88711+ error = __do_munmap(mm, start_m, end_m - start_m);
88712+ if (error)
88713+ goto fail;
88714+ } else {
88715+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88716+ if (!vma_m) {
88717+ error = -ENOMEM;
88718+ goto fail;
88719+ }
88720+ vma->vm_flags = newflags;
88721+ error = pax_mirror_vma(vma_m, vma);
88722+ if (error) {
88723+ vma->vm_flags = oldflags;
88724+ goto fail;
88725+ }
88726+ }
88727+ }
88728+#endif
88729+
88730 /*
88731 * First try to merge with previous and/or next vma.
88732 */
88733@@ -296,9 +400,21 @@ success:
88734 * vm_flags and vm_page_prot are protected by the mmap_sem
88735 * held in write mode.
88736 */
88737+
88738+#ifdef CONFIG_PAX_SEGMEXEC
88739+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
88740+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
88741+#endif
88742+
88743 vma->vm_flags = newflags;
88744+
88745+#ifdef CONFIG_PAX_MPROTECT
88746+ if (mm->binfmt && mm->binfmt->handle_mprotect)
88747+ mm->binfmt->handle_mprotect(vma, newflags);
88748+#endif
88749+
88750 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
88751- vm_get_page_prot(newflags));
88752+ vm_get_page_prot(vma->vm_flags));
88753
88754 if (vma_wants_writenotify(vma)) {
88755 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
88756@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88757 end = start + len;
88758 if (end <= start)
88759 return -ENOMEM;
88760+
88761+#ifdef CONFIG_PAX_SEGMEXEC
88762+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
88763+ if (end > SEGMEXEC_TASK_SIZE)
88764+ return -EINVAL;
88765+ } else
88766+#endif
88767+
88768+ if (end > TASK_SIZE)
88769+ return -EINVAL;
88770+
88771 if (!arch_validate_prot(prot))
88772 return -EINVAL;
88773
88774@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88775 /*
88776 * Does the application expect PROT_READ to imply PROT_EXEC:
88777 */
88778- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
88779+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
88780 prot |= PROT_EXEC;
88781
88782 vm_flags = calc_vm_prot_bits(prot);
88783@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88784 if (start > vma->vm_start)
88785 prev = vma;
88786
88787+#ifdef CONFIG_PAX_MPROTECT
88788+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
88789+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
88790+#endif
88791+
88792 for (nstart = start ; ; ) {
88793 unsigned long newflags;
88794
88795@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88796
88797 /* newflags >> 4 shift VM_MAY% in place of VM_% */
88798 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
88799+ if (prot & (PROT_WRITE | PROT_EXEC))
88800+ gr_log_rwxmprotect(vma);
88801+
88802+ error = -EACCES;
88803+ goto out;
88804+ }
88805+
88806+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
88807 error = -EACCES;
88808 goto out;
88809 }
88810@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88811 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
88812 if (error)
88813 goto out;
88814+
88815+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
88816+
88817 nstart = tmp;
88818
88819 if (nstart < prev->vm_end)
88820diff --git a/mm/mremap.c b/mm/mremap.c
88821index 463a257..c0c7a92 100644
88822--- a/mm/mremap.c
88823+++ b/mm/mremap.c
88824@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
88825 continue;
88826 pte = ptep_get_and_clear(mm, old_addr, old_pte);
88827 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
88828+
88829+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
88830+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
88831+ pte = pte_exprotect(pte);
88832+#endif
88833+
88834 set_pte_at(mm, new_addr, new_pte, pte);
88835 }
88836
88837@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
88838 if (is_vm_hugetlb_page(vma))
88839 goto Einval;
88840
88841+#ifdef CONFIG_PAX_SEGMEXEC
88842+ if (pax_find_mirror_vma(vma))
88843+ goto Einval;
88844+#endif
88845+
88846 /* We can't remap across vm area boundaries */
88847 if (old_len > vma->vm_end - addr)
88848 goto Efault;
88849@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
88850 unsigned long ret = -EINVAL;
88851 unsigned long charged = 0;
88852 unsigned long map_flags;
88853+ unsigned long pax_task_size = TASK_SIZE;
88854
88855 if (new_addr & ~PAGE_MASK)
88856 goto out;
88857
88858- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
88859+#ifdef CONFIG_PAX_SEGMEXEC
88860+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
88861+ pax_task_size = SEGMEXEC_TASK_SIZE;
88862+#endif
88863+
88864+ pax_task_size -= PAGE_SIZE;
88865+
88866+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
88867 goto out;
88868
88869 /* Check if the location we're moving into overlaps the
88870 * old location at all, and fail if it does.
88871 */
88872- if ((new_addr <= addr) && (new_addr+new_len) > addr)
88873- goto out;
88874-
88875- if ((addr <= new_addr) && (addr+old_len) > new_addr)
88876+ if (addr + old_len > new_addr && new_addr + new_len > addr)
88877 goto out;
88878
88879 ret = do_munmap(mm, new_addr, new_len);
88880@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88881 unsigned long ret = -EINVAL;
88882 unsigned long charged = 0;
88883 bool locked = false;
88884+ unsigned long pax_task_size = TASK_SIZE;
88885
88886 down_write(&current->mm->mmap_sem);
88887
88888@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88889 if (!new_len)
88890 goto out;
88891
88892+#ifdef CONFIG_PAX_SEGMEXEC
88893+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
88894+ pax_task_size = SEGMEXEC_TASK_SIZE;
88895+#endif
88896+
88897+ pax_task_size -= PAGE_SIZE;
88898+
88899+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
88900+ old_len > pax_task_size || addr > pax_task_size-old_len)
88901+ goto out;
88902+
88903 if (flags & MREMAP_FIXED) {
88904 if (flags & MREMAP_MAYMOVE)
88905 ret = mremap_to(addr, old_len, new_addr, new_len,
88906@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88907 new_addr = addr;
88908 }
88909 ret = addr;
88910+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
88911 goto out;
88912 }
88913 }
88914@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88915 goto out;
88916 }
88917
88918+ map_flags = vma->vm_flags;
88919 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
88920+ if (!(ret & ~PAGE_MASK)) {
88921+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
88922+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
88923+ }
88924 }
88925 out:
88926 if (ret & ~PAGE_MASK)
88927diff --git a/mm/nommu.c b/mm/nommu.c
88928index 298884d..5f74980 100644
88929--- a/mm/nommu.c
88930+++ b/mm/nommu.c
88931@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
88932 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
88933 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
88934 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
88935-int heap_stack_gap = 0;
88936
88937 atomic_long_t mmap_pages_allocated;
88938
88939@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
88940 EXPORT_SYMBOL(find_vma);
88941
88942 /*
88943- * find a VMA
88944- * - we don't extend stack VMAs under NOMMU conditions
88945- */
88946-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
88947-{
88948- return find_vma(mm, addr);
88949-}
88950-
88951-/*
88952 * expand a stack to a given address
88953 * - not supported under NOMMU conditions
88954 */
88955@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88956
88957 /* most fields are the same, copy all, and then fixup */
88958 *new = *vma;
88959+ INIT_LIST_HEAD(&new->anon_vma_chain);
88960 *region = *vma->vm_region;
88961 new->vm_region = region;
88962
88963@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
88964 }
88965 EXPORT_SYMBOL(generic_file_remap_pages);
88966
88967-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88968- unsigned long addr, void *buf, int len, int write)
88969+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88970+ unsigned long addr, void *buf, size_t len, int write)
88971 {
88972 struct vm_area_struct *vma;
88973
88974@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88975 *
88976 * The caller must hold a reference on @mm.
88977 */
88978-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
88979- void *buf, int len, int write)
88980+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
88981+ void *buf, size_t len, int write)
88982 {
88983 return __access_remote_vm(NULL, mm, addr, buf, len, write);
88984 }
88985@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
88986 * Access another process' address space.
88987 * - source/target buffer must be kernel space
88988 */
88989-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
88990+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
88991 {
88992 struct mm_struct *mm;
88993
88994diff --git a/mm/page-writeback.c b/mm/page-writeback.c
88995index 4514ad7..92eaa1c 100644
88996--- a/mm/page-writeback.c
88997+++ b/mm/page-writeback.c
88998@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
88999 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
89000 * - the bdi dirty thresh drops quickly due to change of JBOD workload
89001 */
89002-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
89003+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
89004 unsigned long thresh,
89005 unsigned long bg_thresh,
89006 unsigned long dirty,
89007@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
89008 }
89009 }
89010
89011-static struct notifier_block __cpuinitdata ratelimit_nb = {
89012+static struct notifier_block ratelimit_nb = {
89013 .notifier_call = ratelimit_handler,
89014 .next = NULL,
89015 };
89016diff --git a/mm/page_alloc.c b/mm/page_alloc.c
89017index 2ee0fd3..6e2edfb 100644
89018--- a/mm/page_alloc.c
89019+++ b/mm/page_alloc.c
89020@@ -60,6 +60,7 @@
89021 #include <linux/page-debug-flags.h>
89022 #include <linux/hugetlb.h>
89023 #include <linux/sched/rt.h>
89024+#include <linux/random.h>
89025
89026 #include <asm/tlbflush.h>
89027 #include <asm/div64.h>
89028@@ -345,7 +346,7 @@ out:
89029 * This usage means that zero-order pages may not be compound.
89030 */
89031
89032-static void free_compound_page(struct page *page)
89033+void free_compound_page(struct page *page)
89034 {
89035 __free_pages_ok(page, compound_order(page));
89036 }
89037@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
89038 int i;
89039 int bad = 0;
89040
89041+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89042+ unsigned long index = 1UL << order;
89043+#endif
89044+
89045 trace_mm_page_free(page, order);
89046 kmemcheck_free_shadow(page, order);
89047
89048@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
89049 debug_check_no_obj_freed(page_address(page),
89050 PAGE_SIZE << order);
89051 }
89052+
89053+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89054+ for (; index; --index)
89055+ sanitize_highpage(page + index - 1);
89056+#endif
89057+
89058 arch_free_page(page, order);
89059 kernel_map_pages(page, 1 << order, 0);
89060
89061@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
89062 local_irq_restore(flags);
89063 }
89064
89065+#ifdef CONFIG_PAX_LATENT_ENTROPY
89066+bool __meminitdata extra_latent_entropy;
89067+
89068+static int __init setup_pax_extra_latent_entropy(char *str)
89069+{
89070+ extra_latent_entropy = true;
89071+ return 0;
89072+}
89073+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
89074+
89075+volatile u64 latent_entropy;
89076+#endif
89077+
89078 /*
89079 * Read access to zone->managed_pages is safe because it's unsigned long,
89080 * but we still need to serialize writers. Currently all callers of
89081@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
89082 set_page_count(p, 0);
89083 }
89084
89085+#ifdef CONFIG_PAX_LATENT_ENTROPY
89086+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
89087+ u64 hash = 0;
89088+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
89089+ const u64 *data = lowmem_page_address(page);
89090+
89091+ for (index = 0; index < end; index++)
89092+ hash ^= hash + data[index];
89093+ latent_entropy ^= hash;
89094+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
89095+ }
89096+#endif
89097+
89098 page_zone(page)->managed_pages += 1 << order;
89099 set_page_refcounted(page);
89100 __free_pages(page, order);
89101@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
89102 arch_alloc_page(page, order);
89103 kernel_map_pages(page, 1 << order, 1);
89104
89105+#ifndef CONFIG_PAX_MEMORY_SANITIZE
89106 if (gfp_flags & __GFP_ZERO)
89107 prep_zero_page(page, order, gfp_flags);
89108+#endif
89109
89110 if (order && (gfp_flags & __GFP_COMP))
89111 prep_compound_page(page, order);
89112diff --git a/mm/page_io.c b/mm/page_io.c
89113index a8a3ef4..7260a60 100644
89114--- a/mm/page_io.c
89115+++ b/mm/page_io.c
89116@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
89117 struct file *swap_file = sis->swap_file;
89118 struct address_space *mapping = swap_file->f_mapping;
89119 struct iovec iov = {
89120- .iov_base = kmap(page),
89121+ .iov_base = (void __force_user *)kmap(page),
89122 .iov_len = PAGE_SIZE,
89123 };
89124
89125diff --git a/mm/percpu.c b/mm/percpu.c
89126index 8c8e08f..73a5cda 100644
89127--- a/mm/percpu.c
89128+++ b/mm/percpu.c
89129@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
89130 static unsigned int pcpu_high_unit_cpu __read_mostly;
89131
89132 /* the address of the first chunk which starts with the kernel static area */
89133-void *pcpu_base_addr __read_mostly;
89134+void *pcpu_base_addr __read_only;
89135 EXPORT_SYMBOL_GPL(pcpu_base_addr);
89136
89137 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
89138diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
89139index fd26d04..0cea1b0 100644
89140--- a/mm/process_vm_access.c
89141+++ b/mm/process_vm_access.c
89142@@ -13,6 +13,7 @@
89143 #include <linux/uio.h>
89144 #include <linux/sched.h>
89145 #include <linux/highmem.h>
89146+#include <linux/security.h>
89147 #include <linux/ptrace.h>
89148 #include <linux/slab.h>
89149 #include <linux/syscalls.h>
89150@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
89151 size_t iov_l_curr_offset = 0;
89152 ssize_t iov_len;
89153
89154+ return -ENOSYS; // PaX: until properly audited
89155+
89156 /*
89157 * Work out how many pages of struct pages we're going to need
89158 * when eventually calling get_user_pages
89159 */
89160 for (i = 0; i < riovcnt; i++) {
89161 iov_len = rvec[i].iov_len;
89162- if (iov_len > 0) {
89163- nr_pages_iov = ((unsigned long)rvec[i].iov_base
89164- + iov_len)
89165- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
89166- / PAGE_SIZE + 1;
89167- nr_pages = max(nr_pages, nr_pages_iov);
89168- }
89169+ if (iov_len <= 0)
89170+ continue;
89171+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
89172+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
89173+ nr_pages = max(nr_pages, nr_pages_iov);
89174 }
89175
89176 if (nr_pages == 0)
89177@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
89178 goto free_proc_pages;
89179 }
89180
89181+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
89182+ rc = -EPERM;
89183+ goto put_task_struct;
89184+ }
89185+
89186 mm = mm_access(task, PTRACE_MODE_ATTACH);
89187 if (!mm || IS_ERR(mm)) {
89188 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
89189diff --git a/mm/rmap.c b/mm/rmap.c
89190index 6280da8..b5c090e 100644
89191--- a/mm/rmap.c
89192+++ b/mm/rmap.c
89193@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89194 struct anon_vma *anon_vma = vma->anon_vma;
89195 struct anon_vma_chain *avc;
89196
89197+#ifdef CONFIG_PAX_SEGMEXEC
89198+ struct anon_vma_chain *avc_m = NULL;
89199+#endif
89200+
89201 might_sleep();
89202 if (unlikely(!anon_vma)) {
89203 struct mm_struct *mm = vma->vm_mm;
89204@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89205 if (!avc)
89206 goto out_enomem;
89207
89208+#ifdef CONFIG_PAX_SEGMEXEC
89209+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
89210+ if (!avc_m)
89211+ goto out_enomem_free_avc;
89212+#endif
89213+
89214 anon_vma = find_mergeable_anon_vma(vma);
89215 allocated = NULL;
89216 if (!anon_vma) {
89217@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89218 /* page_table_lock to protect against threads */
89219 spin_lock(&mm->page_table_lock);
89220 if (likely(!vma->anon_vma)) {
89221+
89222+#ifdef CONFIG_PAX_SEGMEXEC
89223+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
89224+
89225+ if (vma_m) {
89226+ BUG_ON(vma_m->anon_vma);
89227+ vma_m->anon_vma = anon_vma;
89228+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
89229+ avc_m = NULL;
89230+ }
89231+#endif
89232+
89233 vma->anon_vma = anon_vma;
89234 anon_vma_chain_link(vma, avc, anon_vma);
89235 allocated = NULL;
89236@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89237
89238 if (unlikely(allocated))
89239 put_anon_vma(allocated);
89240+
89241+#ifdef CONFIG_PAX_SEGMEXEC
89242+ if (unlikely(avc_m))
89243+ anon_vma_chain_free(avc_m);
89244+#endif
89245+
89246 if (unlikely(avc))
89247 anon_vma_chain_free(avc);
89248 }
89249 return 0;
89250
89251 out_enomem_free_avc:
89252+
89253+#ifdef CONFIG_PAX_SEGMEXEC
89254+ if (avc_m)
89255+ anon_vma_chain_free(avc_m);
89256+#endif
89257+
89258 anon_vma_chain_free(avc);
89259 out_enomem:
89260 return -ENOMEM;
89261@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
89262 * Attach the anon_vmas from src to dst.
89263 * Returns 0 on success, -ENOMEM on failure.
89264 */
89265-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
89266+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
89267 {
89268 struct anon_vma_chain *avc, *pavc;
89269 struct anon_vma *root = NULL;
89270@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
89271 * the corresponding VMA in the parent process is attached to.
89272 * Returns 0 on success, non-zero on failure.
89273 */
89274-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
89275+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
89276 {
89277 struct anon_vma_chain *avc;
89278 struct anon_vma *anon_vma;
89279@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
89280 void __init anon_vma_init(void)
89281 {
89282 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
89283- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
89284- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
89285+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
89286+ anon_vma_ctor);
89287+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
89288+ SLAB_PANIC|SLAB_NO_SANITIZE);
89289 }
89290
89291 /*
89292diff --git a/mm/shmem.c b/mm/shmem.c
89293index 5e6a842..b41916e 100644
89294--- a/mm/shmem.c
89295+++ b/mm/shmem.c
89296@@ -33,7 +33,7 @@
89297 #include <linux/swap.h>
89298 #include <linux/aio.h>
89299
89300-static struct vfsmount *shm_mnt;
89301+struct vfsmount *shm_mnt;
89302
89303 #ifdef CONFIG_SHMEM
89304 /*
89305@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
89306 #define BOGO_DIRENT_SIZE 20
89307
89308 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
89309-#define SHORT_SYMLINK_LEN 128
89310+#define SHORT_SYMLINK_LEN 64
89311
89312 /*
89313 * shmem_fallocate and shmem_writepage communicate via inode->i_private
89314@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
89315 static int shmem_xattr_validate(const char *name)
89316 {
89317 struct { const char *prefix; size_t len; } arr[] = {
89318+
89319+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
89320+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
89321+#endif
89322+
89323 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
89324 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
89325 };
89326@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
89327 if (err)
89328 return err;
89329
89330+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
89331+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
89332+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
89333+ return -EOPNOTSUPP;
89334+ if (size > 8)
89335+ return -EINVAL;
89336+ }
89337+#endif
89338+
89339 return simple_xattr_set(&info->xattrs, name, value, size, flags);
89340 }
89341
89342@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
89343 int err = -ENOMEM;
89344
89345 /* Round up to L1_CACHE_BYTES to resist false sharing */
89346- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
89347- L1_CACHE_BYTES), GFP_KERNEL);
89348+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
89349 if (!sbinfo)
89350 return -ENOMEM;
89351
89352diff --git a/mm/slab.c b/mm/slab.c
89353index bd88411..2d46fd6 100644
89354--- a/mm/slab.c
89355+++ b/mm/slab.c
89356@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
89357 if ((x)->max_freeable < i) \
89358 (x)->max_freeable = i; \
89359 } while (0)
89360-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
89361-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
89362-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
89363-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
89364+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
89365+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
89366+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
89367+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
89368+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
89369+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
89370 #else
89371 #define STATS_INC_ACTIVE(x) do { } while (0)
89372 #define STATS_DEC_ACTIVE(x) do { } while (0)
89373@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
89374 #define STATS_INC_ALLOCMISS(x) do { } while (0)
89375 #define STATS_INC_FREEHIT(x) do { } while (0)
89376 #define STATS_INC_FREEMISS(x) do { } while (0)
89377+#define STATS_INC_SANITIZED(x) do { } while (0)
89378+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
89379 #endif
89380
89381 #if DEBUG
89382@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
89383 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
89384 */
89385 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
89386- const struct slab *slab, void *obj)
89387+ const struct slab *slab, const void *obj)
89388 {
89389 u32 offset = (obj - slab->s_mem);
89390 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
89391@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
89392 return notifier_from_errno(err);
89393 }
89394
89395-static struct notifier_block __cpuinitdata cpucache_notifier = {
89396+static struct notifier_block cpucache_notifier = {
89397 &cpuup_callback, NULL, 0
89398 };
89399
89400@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
89401 */
89402
89403 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
89404- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
89405+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
89406
89407 if (INDEX_AC != INDEX_NODE)
89408 kmalloc_caches[INDEX_NODE] =
89409 create_kmalloc_cache("kmalloc-node",
89410- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
89411+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
89412
89413 slab_early_init = 0;
89414
89415@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
89416 struct array_cache *ac = cpu_cache_get(cachep);
89417
89418 check_irq_off();
89419+
89420+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89421+ if (pax_sanitize_slab) {
89422+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
89423+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
89424+
89425+ if (cachep->ctor)
89426+ cachep->ctor(objp);
89427+
89428+ STATS_INC_SANITIZED(cachep);
89429+ } else
89430+ STATS_INC_NOT_SANITIZED(cachep);
89431+ }
89432+#endif
89433+
89434 kmemleak_free_recursive(objp, cachep->flags);
89435 objp = cache_free_debugcheck(cachep, objp, caller);
89436
89437@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
89438
89439 if (unlikely(ZERO_OR_NULL_PTR(objp)))
89440 return;
89441+ VM_BUG_ON(!virt_addr_valid(objp));
89442 local_irq_save(flags);
89443 kfree_debugcheck(objp);
89444 c = virt_to_cache(objp);
89445@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
89446 }
89447 /* cpu stats */
89448 {
89449- unsigned long allochit = atomic_read(&cachep->allochit);
89450- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
89451- unsigned long freehit = atomic_read(&cachep->freehit);
89452- unsigned long freemiss = atomic_read(&cachep->freemiss);
89453+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
89454+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
89455+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
89456+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
89457
89458 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
89459 allochit, allocmiss, freehit, freemiss);
89460 }
89461+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89462+ {
89463+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
89464+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
89465+
89466+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
89467+ }
89468+#endif
89469 #endif
89470 }
89471
89472@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
89473 static int __init slab_proc_init(void)
89474 {
89475 #ifdef CONFIG_DEBUG_SLAB_LEAK
89476- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
89477+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
89478 #endif
89479 return 0;
89480 }
89481 module_init(slab_proc_init);
89482 #endif
89483
89484+bool is_usercopy_object(const void *ptr)
89485+{
89486+ struct page *page;
89487+ struct kmem_cache *cachep;
89488+
89489+ if (ZERO_OR_NULL_PTR(ptr))
89490+ return false;
89491+
89492+ if (!slab_is_available())
89493+ return false;
89494+
89495+ if (!virt_addr_valid(ptr))
89496+ return false;
89497+
89498+ page = virt_to_head_page(ptr);
89499+
89500+ if (!PageSlab(page))
89501+ return false;
89502+
89503+ cachep = page->slab_cache;
89504+ return cachep->flags & SLAB_USERCOPY;
89505+}
89506+
89507+#ifdef CONFIG_PAX_USERCOPY
89508+const char *check_heap_object(const void *ptr, unsigned long n)
89509+{
89510+ struct page *page;
89511+ struct kmem_cache *cachep;
89512+ struct slab *slabp;
89513+ unsigned int objnr;
89514+ unsigned long offset;
89515+
89516+ if (ZERO_OR_NULL_PTR(ptr))
89517+ return "<null>";
89518+
89519+ if (!virt_addr_valid(ptr))
89520+ return NULL;
89521+
89522+ page = virt_to_head_page(ptr);
89523+
89524+ if (!PageSlab(page))
89525+ return NULL;
89526+
89527+ cachep = page->slab_cache;
89528+ if (!(cachep->flags & SLAB_USERCOPY))
89529+ return cachep->name;
89530+
89531+ slabp = page->slab_page;
89532+ objnr = obj_to_index(cachep, slabp, ptr);
89533+ BUG_ON(objnr >= cachep->num);
89534+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
89535+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
89536+ return NULL;
89537+
89538+ return cachep->name;
89539+}
89540+#endif
89541+
89542 /**
89543 * ksize - get the actual amount of memory allocated for a given object
89544 * @objp: Pointer to the object
89545diff --git a/mm/slab.h b/mm/slab.h
89546index f96b49e..db1d204 100644
89547--- a/mm/slab.h
89548+++ b/mm/slab.h
89549@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
89550 /* The slab cache that manages slab cache information */
89551 extern struct kmem_cache *kmem_cache;
89552
89553+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89554+#ifdef CONFIG_X86_64
89555+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
89556+#else
89557+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
89558+#endif
89559+extern bool pax_sanitize_slab;
89560+#endif
89561+
89562 unsigned long calculate_alignment(unsigned long flags,
89563 unsigned long align, unsigned long size);
89564
89565@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
89566
89567 /* Legal flag mask for kmem_cache_create(), for various configurations */
89568 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
89569- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
89570+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
89571+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
89572
89573 #if defined(CONFIG_DEBUG_SLAB)
89574 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
89575@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
89576 return s;
89577
89578 page = virt_to_head_page(x);
89579+
89580+ BUG_ON(!PageSlab(page));
89581+
89582 cachep = page->slab_cache;
89583 if (slab_equal_or_root(cachep, s))
89584 return cachep;
89585diff --git a/mm/slab_common.c b/mm/slab_common.c
89586index 2d41450..4efe6ee 100644
89587--- a/mm/slab_common.c
89588+++ b/mm/slab_common.c
89589@@ -22,11 +22,22 @@
89590
89591 #include "slab.h"
89592
89593-enum slab_state slab_state;
89594+enum slab_state slab_state __read_only;
89595 LIST_HEAD(slab_caches);
89596 DEFINE_MUTEX(slab_mutex);
89597 struct kmem_cache *kmem_cache;
89598
89599+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89600+bool pax_sanitize_slab __read_only = true;
89601+static int __init pax_sanitize_slab_setup(char *str)
89602+{
89603+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
89604+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
89605+ return 1;
89606+}
89607+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
89608+#endif
89609+
89610 #ifdef CONFIG_DEBUG_VM
89611 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
89612 size_t size)
89613@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
89614
89615 err = __kmem_cache_create(s, flags);
89616 if (!err) {
89617- s->refcount = 1;
89618+ atomic_set(&s->refcount, 1);
89619 list_add(&s->list, &slab_caches);
89620 memcg_cache_list_add(memcg, s);
89621 } else {
89622@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
89623
89624 get_online_cpus();
89625 mutex_lock(&slab_mutex);
89626- s->refcount--;
89627- if (!s->refcount) {
89628+ if (atomic_dec_and_test(&s->refcount)) {
89629 list_del(&s->list);
89630
89631 if (!__kmem_cache_shutdown(s)) {
89632@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
89633 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
89634 name, size, err);
89635
89636- s->refcount = -1; /* Exempt from merging for now */
89637+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
89638 }
89639
89640 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
89641@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
89642
89643 create_boot_cache(s, name, size, flags);
89644 list_add(&s->list, &slab_caches);
89645- s->refcount = 1;
89646+ atomic_set(&s->refcount, 1);
89647 return s;
89648 }
89649
89650@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
89651 EXPORT_SYMBOL(kmalloc_dma_caches);
89652 #endif
89653
89654+#ifdef CONFIG_PAX_USERCOPY_SLABS
89655+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
89656+EXPORT_SYMBOL(kmalloc_usercopy_caches);
89657+#endif
89658+
89659 /*
89660 * Conversion table for small slabs sizes / 8 to the index in the
89661 * kmalloc array. This is necessary for slabs < 192 since we have non power
89662@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
89663 return kmalloc_dma_caches[index];
89664
89665 #endif
89666+
89667+#ifdef CONFIG_PAX_USERCOPY_SLABS
89668+ if (unlikely((flags & GFP_USERCOPY)))
89669+ return kmalloc_usercopy_caches[index];
89670+
89671+#endif
89672+
89673 return kmalloc_caches[index];
89674 }
89675
89676@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
89677 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
89678 if (!kmalloc_caches[i]) {
89679 kmalloc_caches[i] = create_kmalloc_cache(NULL,
89680- 1 << i, flags);
89681+ 1 << i, SLAB_USERCOPY | flags);
89682 }
89683
89684 /*
89685@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
89686 * earlier power of two caches
89687 */
89688 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
89689- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
89690+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
89691
89692 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
89693- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
89694+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
89695 }
89696
89697 /* Kmalloc array is now usable */
89698@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
89699 }
89700 }
89701 #endif
89702+
89703+#ifdef CONFIG_PAX_USERCOPY_SLABS
89704+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
89705+ struct kmem_cache *s = kmalloc_caches[i];
89706+
89707+ if (s) {
89708+ int size = kmalloc_size(i);
89709+ char *n = kasprintf(GFP_NOWAIT,
89710+ "usercopy-kmalloc-%d", size);
89711+
89712+ BUG_ON(!n);
89713+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
89714+ size, SLAB_USERCOPY | flags);
89715+ }
89716+ }
89717+#endif
89718+
89719 }
89720 #endif /* !CONFIG_SLOB */
89721
89722@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
89723 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
89724 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
89725 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
89726+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89727+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
89728+#endif
89729 #endif
89730 seq_putc(m, '\n');
89731 }
89732diff --git a/mm/slob.c b/mm/slob.c
89733index eeed4a0..bb0e9ab 100644
89734--- a/mm/slob.c
89735+++ b/mm/slob.c
89736@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
89737 /*
89738 * Return the size of a slob block.
89739 */
89740-static slobidx_t slob_units(slob_t *s)
89741+static slobidx_t slob_units(const slob_t *s)
89742 {
89743 if (s->units > 0)
89744 return s->units;
89745@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
89746 /*
89747 * Return the next free slob block pointer after this one.
89748 */
89749-static slob_t *slob_next(slob_t *s)
89750+static slob_t *slob_next(const slob_t *s)
89751 {
89752 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
89753 slobidx_t next;
89754@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
89755 /*
89756 * Returns true if s is the last free block in its page.
89757 */
89758-static int slob_last(slob_t *s)
89759+static int slob_last(const slob_t *s)
89760 {
89761 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
89762 }
89763
89764-static void *slob_new_pages(gfp_t gfp, int order, int node)
89765+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
89766 {
89767- void *page;
89768+ struct page *page;
89769
89770 #ifdef CONFIG_NUMA
89771 if (node != NUMA_NO_NODE)
89772@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
89773 if (!page)
89774 return NULL;
89775
89776- return page_address(page);
89777+ __SetPageSlab(page);
89778+ return page;
89779 }
89780
89781-static void slob_free_pages(void *b, int order)
89782+static void slob_free_pages(struct page *sp, int order)
89783 {
89784 if (current->reclaim_state)
89785 current->reclaim_state->reclaimed_slab += 1 << order;
89786- free_pages((unsigned long)b, order);
89787+ __ClearPageSlab(sp);
89788+ page_mapcount_reset(sp);
89789+ sp->private = 0;
89790+ __free_pages(sp, order);
89791 }
89792
89793 /*
89794@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
89795
89796 /* Not enough space: must allocate a new page */
89797 if (!b) {
89798- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
89799- if (!b)
89800+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
89801+ if (!sp)
89802 return NULL;
89803- sp = virt_to_page(b);
89804- __SetPageSlab(sp);
89805+ b = page_address(sp);
89806
89807 spin_lock_irqsave(&slob_lock, flags);
89808 sp->units = SLOB_UNITS(PAGE_SIZE);
89809 sp->freelist = b;
89810+ sp->private = 0;
89811 INIT_LIST_HEAD(&sp->list);
89812 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
89813 set_slob_page_free(sp, slob_list);
89814@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
89815 if (slob_page_free(sp))
89816 clear_slob_page_free(sp);
89817 spin_unlock_irqrestore(&slob_lock, flags);
89818- __ClearPageSlab(sp);
89819- page_mapcount_reset(sp);
89820- slob_free_pages(b, 0);
89821+ slob_free_pages(sp, 0);
89822 return;
89823 }
89824
89825+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89826+ if (pax_sanitize_slab)
89827+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
89828+#endif
89829+
89830 if (!slob_page_free(sp)) {
89831 /* This slob page is about to become partially free. Easy! */
89832 sp->units = units;
89833@@ -424,11 +431,10 @@ out:
89834 */
89835
89836 static __always_inline void *
89837-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89838+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
89839 {
89840- unsigned int *m;
89841- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89842- void *ret;
89843+ slob_t *m;
89844+ void *ret = NULL;
89845
89846 gfp &= gfp_allowed_mask;
89847
89848@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89849
89850 if (!m)
89851 return NULL;
89852- *m = size;
89853+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
89854+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
89855+ m[0].units = size;
89856+ m[1].units = align;
89857 ret = (void *)m + align;
89858
89859 trace_kmalloc_node(caller, ret,
89860 size, size + align, gfp, node);
89861 } else {
89862 unsigned int order = get_order(size);
89863+ struct page *page;
89864
89865 if (likely(order))
89866 gfp |= __GFP_COMP;
89867- ret = slob_new_pages(gfp, order, node);
89868+ page = slob_new_pages(gfp, order, node);
89869+ if (page) {
89870+ ret = page_address(page);
89871+ page->private = size;
89872+ }
89873
89874 trace_kmalloc_node(caller, ret,
89875 size, PAGE_SIZE << order, gfp, node);
89876 }
89877
89878- kmemleak_alloc(ret, size, 1, gfp);
89879+ return ret;
89880+}
89881+
89882+static __always_inline void *
89883+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89884+{
89885+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89886+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
89887+
89888+ if (!ZERO_OR_NULL_PTR(ret))
89889+ kmemleak_alloc(ret, size, 1, gfp);
89890 return ret;
89891 }
89892
89893@@ -493,34 +517,112 @@ void kfree(const void *block)
89894 return;
89895 kmemleak_free(block);
89896
89897+ VM_BUG_ON(!virt_addr_valid(block));
89898 sp = virt_to_page(block);
89899- if (PageSlab(sp)) {
89900+ VM_BUG_ON(!PageSlab(sp));
89901+ if (!sp->private) {
89902 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89903- unsigned int *m = (unsigned int *)(block - align);
89904- slob_free(m, *m + align);
89905- } else
89906+ slob_t *m = (slob_t *)(block - align);
89907+ slob_free(m, m[0].units + align);
89908+ } else {
89909+ __ClearPageSlab(sp);
89910+ page_mapcount_reset(sp);
89911+ sp->private = 0;
89912 __free_pages(sp, compound_order(sp));
89913+ }
89914 }
89915 EXPORT_SYMBOL(kfree);
89916
89917+bool is_usercopy_object(const void *ptr)
89918+{
89919+ if (!slab_is_available())
89920+ return false;
89921+
89922+ // PAX: TODO
89923+
89924+ return false;
89925+}
89926+
89927+#ifdef CONFIG_PAX_USERCOPY
89928+const char *check_heap_object(const void *ptr, unsigned long n)
89929+{
89930+ struct page *page;
89931+ const slob_t *free;
89932+ const void *base;
89933+ unsigned long flags;
89934+
89935+ if (ZERO_OR_NULL_PTR(ptr))
89936+ return "<null>";
89937+
89938+ if (!virt_addr_valid(ptr))
89939+ return NULL;
89940+
89941+ page = virt_to_head_page(ptr);
89942+ if (!PageSlab(page))
89943+ return NULL;
89944+
89945+ if (page->private) {
89946+ base = page;
89947+ if (base <= ptr && n <= page->private - (ptr - base))
89948+ return NULL;
89949+ return "<slob>";
89950+ }
89951+
89952+ /* some tricky double walking to find the chunk */
89953+ spin_lock_irqsave(&slob_lock, flags);
89954+ base = (void *)((unsigned long)ptr & PAGE_MASK);
89955+ free = page->freelist;
89956+
89957+ while (!slob_last(free) && (void *)free <= ptr) {
89958+ base = free + slob_units(free);
89959+ free = slob_next(free);
89960+ }
89961+
89962+ while (base < (void *)free) {
89963+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
89964+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
89965+ int offset;
89966+
89967+ if (ptr < base + align)
89968+ break;
89969+
89970+ offset = ptr - base - align;
89971+ if (offset >= m) {
89972+ base += size;
89973+ continue;
89974+ }
89975+
89976+ if (n > m - offset)
89977+ break;
89978+
89979+ spin_unlock_irqrestore(&slob_lock, flags);
89980+ return NULL;
89981+ }
89982+
89983+ spin_unlock_irqrestore(&slob_lock, flags);
89984+ return "<slob>";
89985+}
89986+#endif
89987+
89988 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
89989 size_t ksize(const void *block)
89990 {
89991 struct page *sp;
89992 int align;
89993- unsigned int *m;
89994+ slob_t *m;
89995
89996 BUG_ON(!block);
89997 if (unlikely(block == ZERO_SIZE_PTR))
89998 return 0;
89999
90000 sp = virt_to_page(block);
90001- if (unlikely(!PageSlab(sp)))
90002- return PAGE_SIZE << compound_order(sp);
90003+ VM_BUG_ON(!PageSlab(sp));
90004+ if (sp->private)
90005+ return sp->private;
90006
90007 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
90008- m = (unsigned int *)(block - align);
90009- return SLOB_UNITS(*m) * SLOB_UNIT;
90010+ m = (slob_t *)(block - align);
90011+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
90012 }
90013 EXPORT_SYMBOL(ksize);
90014
90015@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
90016
90017 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
90018 {
90019- void *b;
90020+ void *b = NULL;
90021
90022 flags &= gfp_allowed_mask;
90023
90024 lockdep_trace_alloc(flags);
90025
90026+#ifdef CONFIG_PAX_USERCOPY_SLABS
90027+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
90028+#else
90029 if (c->size < PAGE_SIZE) {
90030 b = slob_alloc(c->size, flags, c->align, node);
90031 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
90032 SLOB_UNITS(c->size) * SLOB_UNIT,
90033 flags, node);
90034 } else {
90035- b = slob_new_pages(flags, get_order(c->size), node);
90036+ struct page *sp;
90037+
90038+ sp = slob_new_pages(flags, get_order(c->size), node);
90039+ if (sp) {
90040+ b = page_address(sp);
90041+ sp->private = c->size;
90042+ }
90043 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
90044 PAGE_SIZE << get_order(c->size),
90045 flags, node);
90046 }
90047+#endif
90048
90049 if (c->ctor)
90050 c->ctor(b);
90051@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
90052
90053 static void __kmem_cache_free(void *b, int size)
90054 {
90055- if (size < PAGE_SIZE)
90056+ struct page *sp;
90057+
90058+ sp = virt_to_page(b);
90059+ BUG_ON(!PageSlab(sp));
90060+ if (!sp->private)
90061 slob_free(b, size);
90062 else
90063- slob_free_pages(b, get_order(size));
90064+ slob_free_pages(sp, get_order(size));
90065 }
90066
90067 static void kmem_rcu_free(struct rcu_head *head)
90068@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
90069
90070 void kmem_cache_free(struct kmem_cache *c, void *b)
90071 {
90072+ int size = c->size;
90073+
90074+#ifdef CONFIG_PAX_USERCOPY_SLABS
90075+ if (size + c->align < PAGE_SIZE) {
90076+ size += c->align;
90077+ b -= c->align;
90078+ }
90079+#endif
90080+
90081 kmemleak_free_recursive(b, c->flags);
90082 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
90083 struct slob_rcu *slob_rcu;
90084- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
90085- slob_rcu->size = c->size;
90086+ slob_rcu = b + (size - sizeof(struct slob_rcu));
90087+ slob_rcu->size = size;
90088 call_rcu(&slob_rcu->head, kmem_rcu_free);
90089 } else {
90090- __kmem_cache_free(b, c->size);
90091+ __kmem_cache_free(b, size);
90092 }
90093
90094+#ifdef CONFIG_PAX_USERCOPY_SLABS
90095+ trace_kfree(_RET_IP_, b);
90096+#else
90097 trace_kmem_cache_free(_RET_IP_, b);
90098+#endif
90099+
90100 }
90101 EXPORT_SYMBOL(kmem_cache_free);
90102
90103diff --git a/mm/slub.c b/mm/slub.c
90104index 57707f0..7857bd3 100644
90105--- a/mm/slub.c
90106+++ b/mm/slub.c
90107@@ -198,7 +198,7 @@ struct track {
90108
90109 enum track_item { TRACK_ALLOC, TRACK_FREE };
90110
90111-#ifdef CONFIG_SYSFS
90112+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90113 static int sysfs_slab_add(struct kmem_cache *);
90114 static int sysfs_slab_alias(struct kmem_cache *, const char *);
90115 static void sysfs_slab_remove(struct kmem_cache *);
90116@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
90117 if (!t->addr)
90118 return;
90119
90120- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
90121+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
90122 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
90123 #ifdef CONFIG_STACKTRACE
90124 {
90125@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
90126
90127 slab_free_hook(s, x);
90128
90129+#ifdef CONFIG_PAX_MEMORY_SANITIZE
90130+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
90131+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
90132+ if (s->ctor)
90133+ s->ctor(x);
90134+ }
90135+#endif
90136+
90137 redo:
90138 /*
90139 * Determine the currently cpus per cpu slab.
90140@@ -2661,7 +2669,7 @@ static int slub_min_objects;
90141 * Merge control. If this is set then no merging of slab caches will occur.
90142 * (Could be removed. This was introduced to pacify the merge skeptics.)
90143 */
90144-static int slub_nomerge;
90145+static int slub_nomerge = 1;
90146
90147 /*
90148 * Calculate the order of allocation given an slab object size.
90149@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
90150 s->inuse = size;
90151
90152 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
90153+#ifdef CONFIG_PAX_MEMORY_SANITIZE
90154+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
90155+#endif
90156 s->ctor)) {
90157 /*
90158 * Relocate free pointer after the object if it is not
90159@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
90160 EXPORT_SYMBOL(__kmalloc_node);
90161 #endif
90162
90163+bool is_usercopy_object(const void *ptr)
90164+{
90165+ struct page *page;
90166+ struct kmem_cache *s;
90167+
90168+ if (ZERO_OR_NULL_PTR(ptr))
90169+ return false;
90170+
90171+ if (!slab_is_available())
90172+ return false;
90173+
90174+ if (!virt_addr_valid(ptr))
90175+ return false;
90176+
90177+ page = virt_to_head_page(ptr);
90178+
90179+ if (!PageSlab(page))
90180+ return false;
90181+
90182+ s = page->slab_cache;
90183+ return s->flags & SLAB_USERCOPY;
90184+}
90185+
90186+#ifdef CONFIG_PAX_USERCOPY
90187+const char *check_heap_object(const void *ptr, unsigned long n)
90188+{
90189+ struct page *page;
90190+ struct kmem_cache *s;
90191+ unsigned long offset;
90192+
90193+ if (ZERO_OR_NULL_PTR(ptr))
90194+ return "<null>";
90195+
90196+ if (!virt_addr_valid(ptr))
90197+ return NULL;
90198+
90199+ page = virt_to_head_page(ptr);
90200+
90201+ if (!PageSlab(page))
90202+ return NULL;
90203+
90204+ s = page->slab_cache;
90205+ if (!(s->flags & SLAB_USERCOPY))
90206+ return s->name;
90207+
90208+ offset = (ptr - page_address(page)) % s->size;
90209+ if (offset <= s->object_size && n <= s->object_size - offset)
90210+ return NULL;
90211+
90212+ return s->name;
90213+}
90214+#endif
90215+
90216 size_t ksize(const void *object)
90217 {
90218 struct page *page;
90219@@ -3347,6 +3411,7 @@ void kfree(const void *x)
90220 if (unlikely(ZERO_OR_NULL_PTR(x)))
90221 return;
90222
90223+ VM_BUG_ON(!virt_addr_valid(x));
90224 page = virt_to_head_page(x);
90225 if (unlikely(!PageSlab(page))) {
90226 BUG_ON(!PageCompound(page));
90227@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
90228 /*
90229 * We may have set a slab to be unmergeable during bootstrap.
90230 */
90231- if (s->refcount < 0)
90232+ if (atomic_read(&s->refcount) < 0)
90233 return 1;
90234
90235 return 0;
90236@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
90237
90238 s = find_mergeable(memcg, size, align, flags, name, ctor);
90239 if (s) {
90240- s->refcount++;
90241+ atomic_inc(&s->refcount);
90242 /*
90243 * Adjust the object sizes so that we clear
90244 * the complete object on kzalloc.
90245@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
90246 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
90247
90248 if (sysfs_slab_alias(s, name)) {
90249- s->refcount--;
90250+ atomic_dec(&s->refcount);
90251 s = NULL;
90252 }
90253 }
90254@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
90255 return NOTIFY_OK;
90256 }
90257
90258-static struct notifier_block __cpuinitdata slab_notifier = {
90259+static struct notifier_block slab_notifier = {
90260 .notifier_call = slab_cpuup_callback
90261 };
90262
90263@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
90264 }
90265 #endif
90266
90267-#ifdef CONFIG_SYSFS
90268+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90269 static int count_inuse(struct page *page)
90270 {
90271 return page->inuse;
90272@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
90273 validate_slab_cache(kmalloc_caches[9]);
90274 }
90275 #else
90276-#ifdef CONFIG_SYSFS
90277+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90278 static void resiliency_test(void) {};
90279 #endif
90280 #endif
90281
90282-#ifdef CONFIG_SYSFS
90283+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90284 enum slab_stat_type {
90285 SL_ALL, /* All slabs */
90286 SL_PARTIAL, /* Only partially allocated slabs */
90287@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
90288
90289 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
90290 {
90291- return sprintf(buf, "%d\n", s->refcount - 1);
90292+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
90293 }
90294 SLAB_ATTR_RO(aliases);
90295
90296@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
90297 SLAB_ATTR_RO(cache_dma);
90298 #endif
90299
90300+#ifdef CONFIG_PAX_USERCOPY_SLABS
90301+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
90302+{
90303+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
90304+}
90305+SLAB_ATTR_RO(usercopy);
90306+#endif
90307+
90308 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
90309 {
90310 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
90311@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
90312 #ifdef CONFIG_ZONE_DMA
90313 &cache_dma_attr.attr,
90314 #endif
90315+#ifdef CONFIG_PAX_USERCOPY_SLABS
90316+ &usercopy_attr.attr,
90317+#endif
90318 #ifdef CONFIG_NUMA
90319 &remote_node_defrag_ratio_attr.attr,
90320 #endif
90321@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
90322 return name;
90323 }
90324
90325+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90326 static int sysfs_slab_add(struct kmem_cache *s)
90327 {
90328 int err;
90329@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
90330 }
90331
90332 s->kobj.kset = slab_kset;
90333- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
90334+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
90335 if (err) {
90336 kobject_put(&s->kobj);
90337 return err;
90338@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
90339 kobject_del(&s->kobj);
90340 kobject_put(&s->kobj);
90341 }
90342+#endif
90343
90344 /*
90345 * Need to buffer aliases during bootup until sysfs becomes
90346@@ -5198,6 +5276,7 @@ struct saved_alias {
90347
90348 static struct saved_alias *alias_list;
90349
90350+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90351 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
90352 {
90353 struct saved_alias *al;
90354@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
90355 alias_list = al;
90356 return 0;
90357 }
90358+#endif
90359
90360 static int __init slab_sysfs_init(void)
90361 {
90362diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
90363index 27eeab3..7c3f7f2 100644
90364--- a/mm/sparse-vmemmap.c
90365+++ b/mm/sparse-vmemmap.c
90366@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
90367 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
90368 if (!p)
90369 return NULL;
90370- pud_populate(&init_mm, pud, p);
90371+ pud_populate_kernel(&init_mm, pud, p);
90372 }
90373 return pud;
90374 }
90375@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
90376 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
90377 if (!p)
90378 return NULL;
90379- pgd_populate(&init_mm, pgd, p);
90380+ pgd_populate_kernel(&init_mm, pgd, p);
90381 }
90382 return pgd;
90383 }
90384diff --git a/mm/sparse.c b/mm/sparse.c
90385index 1c91f0d3..485470a 100644
90386--- a/mm/sparse.c
90387+++ b/mm/sparse.c
90388@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
90389
90390 for (i = 0; i < PAGES_PER_SECTION; i++) {
90391 if (PageHWPoison(&memmap[i])) {
90392- atomic_long_sub(1, &num_poisoned_pages);
90393+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
90394 ClearPageHWPoison(&memmap[i]);
90395 }
90396 }
90397diff --git a/mm/swap.c b/mm/swap.c
90398index dfd7d71..ccdf688 100644
90399--- a/mm/swap.c
90400+++ b/mm/swap.c
90401@@ -31,6 +31,7 @@
90402 #include <linux/memcontrol.h>
90403 #include <linux/gfp.h>
90404 #include <linux/uio.h>
90405+#include <linux/hugetlb.h>
90406
90407 #include "internal.h"
90408
90409@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
90410
90411 __page_cache_release(page);
90412 dtor = get_compound_page_dtor(page);
90413+ if (!PageHuge(page))
90414+ BUG_ON(dtor != free_compound_page);
90415 (*dtor)(page);
90416 }
90417
90418diff --git a/mm/swapfile.c b/mm/swapfile.c
90419index 746af55b..7ac94ae 100644
90420--- a/mm/swapfile.c
90421+++ b/mm/swapfile.c
90422@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
90423
90424 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
90425 /* Activity counter to indicate that a swapon or swapoff has occurred */
90426-static atomic_t proc_poll_event = ATOMIC_INIT(0);
90427+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
90428
90429 static inline unsigned char swap_count(unsigned char ent)
90430 {
90431@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
90432 }
90433 filp_close(swap_file, NULL);
90434 err = 0;
90435- atomic_inc(&proc_poll_event);
90436+ atomic_inc_unchecked(&proc_poll_event);
90437 wake_up_interruptible(&proc_poll_wait);
90438
90439 out_dput:
90440@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
90441
90442 poll_wait(file, &proc_poll_wait, wait);
90443
90444- if (seq->poll_event != atomic_read(&proc_poll_event)) {
90445- seq->poll_event = atomic_read(&proc_poll_event);
90446+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
90447+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
90448 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
90449 }
90450
90451@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
90452 return ret;
90453
90454 seq = file->private_data;
90455- seq->poll_event = atomic_read(&proc_poll_event);
90456+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
90457 return 0;
90458 }
90459
90460@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
90461 (frontswap_map) ? "FS" : "");
90462
90463 mutex_unlock(&swapon_mutex);
90464- atomic_inc(&proc_poll_event);
90465+ atomic_inc_unchecked(&proc_poll_event);
90466 wake_up_interruptible(&proc_poll_wait);
90467
90468 if (S_ISREG(inode->i_mode))
90469diff --git a/mm/util.c b/mm/util.c
90470index ab1424d..7c5bd5a 100644
90471--- a/mm/util.c
90472+++ b/mm/util.c
90473@@ -294,6 +294,12 @@ done:
90474 void arch_pick_mmap_layout(struct mm_struct *mm)
90475 {
90476 mm->mmap_base = TASK_UNMAPPED_BASE;
90477+
90478+#ifdef CONFIG_PAX_RANDMMAP
90479+ if (mm->pax_flags & MF_PAX_RANDMMAP)
90480+ mm->mmap_base += mm->delta_mmap;
90481+#endif
90482+
90483 mm->get_unmapped_area = arch_get_unmapped_area;
90484 mm->unmap_area = arch_unmap_area;
90485 }
90486diff --git a/mm/vmalloc.c b/mm/vmalloc.c
90487index d365724..6cae7c2 100644
90488--- a/mm/vmalloc.c
90489+++ b/mm/vmalloc.c
90490@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
90491
90492 pte = pte_offset_kernel(pmd, addr);
90493 do {
90494- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
90495- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
90496+
90497+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90498+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
90499+ BUG_ON(!pte_exec(*pte));
90500+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
90501+ continue;
90502+ }
90503+#endif
90504+
90505+ {
90506+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
90507+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
90508+ }
90509 } while (pte++, addr += PAGE_SIZE, addr != end);
90510 }
90511
90512@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
90513 pte = pte_alloc_kernel(pmd, addr);
90514 if (!pte)
90515 return -ENOMEM;
90516+
90517+ pax_open_kernel();
90518 do {
90519 struct page *page = pages[*nr];
90520
90521- if (WARN_ON(!pte_none(*pte)))
90522+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90523+ if (pgprot_val(prot) & _PAGE_NX)
90524+#endif
90525+
90526+ if (!pte_none(*pte)) {
90527+ pax_close_kernel();
90528+ WARN_ON(1);
90529 return -EBUSY;
90530- if (WARN_ON(!page))
90531+ }
90532+ if (!page) {
90533+ pax_close_kernel();
90534+ WARN_ON(1);
90535 return -ENOMEM;
90536+ }
90537 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
90538 (*nr)++;
90539 } while (pte++, addr += PAGE_SIZE, addr != end);
90540+ pax_close_kernel();
90541 return 0;
90542 }
90543
90544@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
90545 pmd_t *pmd;
90546 unsigned long next;
90547
90548- pmd = pmd_alloc(&init_mm, pud, addr);
90549+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
90550 if (!pmd)
90551 return -ENOMEM;
90552 do {
90553@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
90554 pud_t *pud;
90555 unsigned long next;
90556
90557- pud = pud_alloc(&init_mm, pgd, addr);
90558+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
90559 if (!pud)
90560 return -ENOMEM;
90561 do {
90562@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
90563 if (addr >= MODULES_VADDR && addr < MODULES_END)
90564 return 1;
90565 #endif
90566+
90567+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90568+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
90569+ return 1;
90570+#endif
90571+
90572 return is_vmalloc_addr(x);
90573 }
90574
90575@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
90576
90577 if (!pgd_none(*pgd)) {
90578 pud_t *pud = pud_offset(pgd, addr);
90579+#ifdef CONFIG_X86
90580+ if (!pud_large(*pud))
90581+#endif
90582 if (!pud_none(*pud)) {
90583 pmd_t *pmd = pmd_offset(pud, addr);
90584+#ifdef CONFIG_X86
90585+ if (!pmd_large(*pmd))
90586+#endif
90587 if (!pmd_none(*pmd)) {
90588 pte_t *ptep, pte;
90589
90590@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
90591 * Allocate a region of KVA of the specified size and alignment, within the
90592 * vstart and vend.
90593 */
90594-static struct vmap_area *alloc_vmap_area(unsigned long size,
90595+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
90596 unsigned long align,
90597 unsigned long vstart, unsigned long vend,
90598 int node, gfp_t gfp_mask)
90599@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
90600 struct vm_struct *area;
90601
90602 BUG_ON(in_interrupt());
90603+
90604+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90605+ if (flags & VM_KERNEXEC) {
90606+ if (start != VMALLOC_START || end != VMALLOC_END)
90607+ return NULL;
90608+ start = (unsigned long)MODULES_EXEC_VADDR;
90609+ end = (unsigned long)MODULES_EXEC_END;
90610+ }
90611+#endif
90612+
90613 if (flags & VM_IOREMAP) {
90614 int bit = fls(size);
90615
90616@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
90617 if (count > totalram_pages)
90618 return NULL;
90619
90620+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90621+ if (!(pgprot_val(prot) & _PAGE_NX))
90622+ flags |= VM_KERNEXEC;
90623+#endif
90624+
90625 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
90626 __builtin_return_address(0));
90627 if (!area)
90628@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
90629 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
90630 goto fail;
90631
90632+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90633+ if (!(pgprot_val(prot) & _PAGE_NX))
90634+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
90635+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
90636+ else
90637+#endif
90638+
90639 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
90640 start, end, node, gfp_mask, caller);
90641 if (!area)
90642@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
90643 * For tight control over page level allocator and protection flags
90644 * use __vmalloc() instead.
90645 */
90646-
90647 void *vmalloc_exec(unsigned long size)
90648 {
90649- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
90650+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
90651 NUMA_NO_NODE, __builtin_return_address(0));
90652 }
90653
90654@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
90655 unsigned long uaddr = vma->vm_start;
90656 unsigned long usize = vma->vm_end - vma->vm_start;
90657
90658+ BUG_ON(vma->vm_mirror);
90659+
90660 if ((PAGE_SIZE-1) & (unsigned long)addr)
90661 return -EINVAL;
90662
90663@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
90664 v->addr, v->addr + v->size, v->size);
90665
90666 if (v->caller)
90667+#ifdef CONFIG_GRKERNSEC_HIDESYM
90668+ seq_printf(m, " %pK", v->caller);
90669+#else
90670 seq_printf(m, " %pS", v->caller);
90671+#endif
90672
90673 if (v->nr_pages)
90674 seq_printf(m, " pages=%d", v->nr_pages);
90675diff --git a/mm/vmstat.c b/mm/vmstat.c
90676index f42745e..62f8346 100644
90677--- a/mm/vmstat.c
90678+++ b/mm/vmstat.c
90679@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
90680 *
90681 * vm_stat contains the global counters
90682 */
90683-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90684+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90685 EXPORT_SYMBOL(vm_stat);
90686
90687 #ifdef CONFIG_SMP
90688@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
90689 v = p->vm_stat_diff[i];
90690 p->vm_stat_diff[i] = 0;
90691 local_irq_restore(flags);
90692- atomic_long_add(v, &zone->vm_stat[i]);
90693+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
90694 global_diff[i] += v;
90695 #ifdef CONFIG_NUMA
90696 /* 3 seconds idle till flush */
90697@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
90698
90699 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
90700 if (global_diff[i])
90701- atomic_long_add(global_diff[i], &vm_stat[i]);
90702+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
90703 }
90704
90705 /*
90706@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
90707 if (pset->vm_stat_diff[i]) {
90708 int v = pset->vm_stat_diff[i];
90709 pset->vm_stat_diff[i] = 0;
90710- atomic_long_add(v, &zone->vm_stat[i]);
90711- atomic_long_add(v, &vm_stat[i]);
90712+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
90713+ atomic_long_add_unchecked(v, &vm_stat[i]);
90714 }
90715 }
90716 #endif
90717@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
90718 return NOTIFY_OK;
90719 }
90720
90721-static struct notifier_block __cpuinitdata vmstat_notifier =
90722+static struct notifier_block vmstat_notifier =
90723 { &vmstat_cpuup_callback, NULL, 0 };
90724 #endif
90725
90726@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
90727 start_cpu_timer(cpu);
90728 #endif
90729 #ifdef CONFIG_PROC_FS
90730- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
90731- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
90732- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
90733- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
90734+ {
90735+ mode_t gr_mode = S_IRUGO;
90736+#ifdef CONFIG_GRKERNSEC_PROC_ADD
90737+ gr_mode = S_IRUSR;
90738+#endif
90739+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
90740+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
90741+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
90742+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
90743+#else
90744+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
90745+#endif
90746+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
90747+ }
90748 #endif
90749 return 0;
90750 }
90751diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
90752index 9424f37..6aabf19 100644
90753--- a/net/8021q/vlan.c
90754+++ b/net/8021q/vlan.c
90755@@ -469,7 +469,7 @@ out:
90756 return NOTIFY_DONE;
90757 }
90758
90759-static struct notifier_block vlan_notifier_block __read_mostly = {
90760+static struct notifier_block vlan_notifier_block = {
90761 .notifier_call = vlan_device_event,
90762 };
90763
90764@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
90765 err = -EPERM;
90766 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
90767 break;
90768- if ((args.u.name_type >= 0) &&
90769- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
90770+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
90771 struct vlan_net *vn;
90772
90773 vn = net_generic(net, vlan_net_id);
90774diff --git a/net/9p/mod.c b/net/9p/mod.c
90775index 6ab36ae..6f1841b 100644
90776--- a/net/9p/mod.c
90777+++ b/net/9p/mod.c
90778@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
90779 void v9fs_register_trans(struct p9_trans_module *m)
90780 {
90781 spin_lock(&v9fs_trans_lock);
90782- list_add_tail(&m->list, &v9fs_trans_list);
90783+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
90784 spin_unlock(&v9fs_trans_lock);
90785 }
90786 EXPORT_SYMBOL(v9fs_register_trans);
90787@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
90788 void v9fs_unregister_trans(struct p9_trans_module *m)
90789 {
90790 spin_lock(&v9fs_trans_lock);
90791- list_del_init(&m->list);
90792+ pax_list_del_init((struct list_head *)&m->list);
90793 spin_unlock(&v9fs_trans_lock);
90794 }
90795 EXPORT_SYMBOL(v9fs_unregister_trans);
90796diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
90797index 02efb25..41541a9 100644
90798--- a/net/9p/trans_fd.c
90799+++ b/net/9p/trans_fd.c
90800@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
90801 oldfs = get_fs();
90802 set_fs(get_ds());
90803 /* The cast to a user pointer is valid due to the set_fs() */
90804- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
90805+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
90806 set_fs(oldfs);
90807
90808 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
90809diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
90810index 876fbe8..8bbea9f 100644
90811--- a/net/atm/atm_misc.c
90812+++ b/net/atm/atm_misc.c
90813@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
90814 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
90815 return 1;
90816 atm_return(vcc, truesize);
90817- atomic_inc(&vcc->stats->rx_drop);
90818+ atomic_inc_unchecked(&vcc->stats->rx_drop);
90819 return 0;
90820 }
90821 EXPORT_SYMBOL(atm_charge);
90822@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
90823 }
90824 }
90825 atm_return(vcc, guess);
90826- atomic_inc(&vcc->stats->rx_drop);
90827+ atomic_inc_unchecked(&vcc->stats->rx_drop);
90828 return NULL;
90829 }
90830 EXPORT_SYMBOL(atm_alloc_charge);
90831@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
90832
90833 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
90834 {
90835-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
90836+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
90837 __SONET_ITEMS
90838 #undef __HANDLE_ITEM
90839 }
90840@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
90841
90842 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
90843 {
90844-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
90845+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
90846 __SONET_ITEMS
90847 #undef __HANDLE_ITEM
90848 }
90849diff --git a/net/atm/lec.h b/net/atm/lec.h
90850index 4149db1..f2ab682 100644
90851--- a/net/atm/lec.h
90852+++ b/net/atm/lec.h
90853@@ -48,7 +48,7 @@ struct lane2_ops {
90854 const u8 *tlvs, u32 sizeoftlvs);
90855 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
90856 const u8 *tlvs, u32 sizeoftlvs);
90857-};
90858+} __no_const;
90859
90860 /*
90861 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
90862diff --git a/net/atm/proc.c b/net/atm/proc.c
90863index bbb6461..cf04016 100644
90864--- a/net/atm/proc.c
90865+++ b/net/atm/proc.c
90866@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
90867 const struct k_atm_aal_stats *stats)
90868 {
90869 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
90870- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
90871- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
90872- atomic_read(&stats->rx_drop));
90873+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
90874+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
90875+ atomic_read_unchecked(&stats->rx_drop));
90876 }
90877
90878 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
90879diff --git a/net/atm/resources.c b/net/atm/resources.c
90880index 0447d5d..3cf4728 100644
90881--- a/net/atm/resources.c
90882+++ b/net/atm/resources.c
90883@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
90884 static void copy_aal_stats(struct k_atm_aal_stats *from,
90885 struct atm_aal_stats *to)
90886 {
90887-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
90888+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
90889 __AAL_STAT_ITEMS
90890 #undef __HANDLE_ITEM
90891 }
90892@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
90893 static void subtract_aal_stats(struct k_atm_aal_stats *from,
90894 struct atm_aal_stats *to)
90895 {
90896-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
90897+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
90898 __AAL_STAT_ITEMS
90899 #undef __HANDLE_ITEM
90900 }
90901diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
90902index d5744b7..506bae3 100644
90903--- a/net/ax25/sysctl_net_ax25.c
90904+++ b/net/ax25/sysctl_net_ax25.c
90905@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
90906 {
90907 char path[sizeof("net/ax25/") + IFNAMSIZ];
90908 int k;
90909- struct ctl_table *table;
90910+ ctl_table_no_const *table;
90911
90912 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
90913 if (!table)
90914diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
90915index f680ee1..97e3542 100644
90916--- a/net/batman-adv/bat_iv_ogm.c
90917+++ b/net/batman-adv/bat_iv_ogm.c
90918@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
90919
90920 /* randomize initial seqno to avoid collision */
90921 get_random_bytes(&random_seqno, sizeof(random_seqno));
90922- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
90923+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
90924
90925 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
90926 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
90927@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
90928 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
90929
90930 /* change sequence number to network order */
90931- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
90932+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
90933 batadv_ogm_packet->seqno = htonl(seqno);
90934- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
90935+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
90936
90937 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
90938 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
90939@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
90940 return;
90941
90942 /* could be changed by schedule_own_packet() */
90943- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
90944+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
90945
90946 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
90947 has_directlink_flag = 1;
90948diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
90949index de27b31..7058bfe 100644
90950--- a/net/batman-adv/bridge_loop_avoidance.c
90951+++ b/net/batman-adv/bridge_loop_avoidance.c
90952@@ -1522,6 +1522,8 @@ out:
90953 * in these cases, the skb is further handled by this function and
90954 * returns 1, otherwise it returns 0 and the caller shall further
90955 * process the skb.
90956+ *
90957+ * This call might reallocate skb data.
90958 */
90959 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
90960 {
90961diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
90962index f105219..7614af3 100644
90963--- a/net/batman-adv/gateway_client.c
90964+++ b/net/batman-adv/gateway_client.c
90965@@ -508,6 +508,7 @@ out:
90966 return 0;
90967 }
90968
90969+/* this call might reallocate skb data */
90970 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
90971 {
90972 int ret = false;
90973@@ -568,6 +569,7 @@ out:
90974 return ret;
90975 }
90976
90977+/* this call might reallocate skb data */
90978 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90979 {
90980 struct ethhdr *ethhdr;
90981@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90982
90983 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
90984 return false;
90985+
90986+ /* skb->data might have been reallocated by pskb_may_pull() */
90987+ ethhdr = (struct ethhdr *)skb->data;
90988+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
90989+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
90990+
90991 udphdr = (struct udphdr *)(skb->data + *header_len);
90992 *header_len += sizeof(*udphdr);
90993
90994@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90995 return true;
90996 }
90997
90998+/* this call might reallocate skb data */
90999 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
91000- struct sk_buff *skb, struct ethhdr *ethhdr)
91001+ struct sk_buff *skb)
91002 {
91003 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
91004 struct batadv_orig_node *orig_dst_node = NULL;
91005 struct batadv_gw_node *curr_gw = NULL;
91006+ struct ethhdr *ethhdr;
91007 bool ret, out_of_range = false;
91008 unsigned int header_len = 0;
91009 uint8_t curr_tq_avg;
91010@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
91011 if (!ret)
91012 goto out;
91013
91014+ ethhdr = (struct ethhdr *)skb->data;
91015 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
91016 ethhdr->h_dest);
91017 if (!orig_dst_node)
91018diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
91019index 039902d..1037d75 100644
91020--- a/net/batman-adv/gateway_client.h
91021+++ b/net/batman-adv/gateway_client.h
91022@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
91023 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
91024 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
91025 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
91026-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
91027- struct sk_buff *skb, struct ethhdr *ethhdr);
91028+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
91029
91030 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
91031diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
91032index 522243a..b48c0ef 100644
91033--- a/net/batman-adv/hard-interface.c
91034+++ b/net/batman-adv/hard-interface.c
91035@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
91036 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
91037 dev_add_pack(&hard_iface->batman_adv_ptype);
91038
91039- atomic_set(&hard_iface->frag_seqno, 1);
91040+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
91041 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
91042 hard_iface->net_dev->name);
91043
91044@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
91045 /* This can't be called via a bat_priv callback because
91046 * we have no bat_priv yet.
91047 */
91048- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
91049+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
91050 hard_iface->bat_iv.ogm_buff = NULL;
91051
91052 return hard_iface;
91053diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
91054index 819dfb0..226bacd 100644
91055--- a/net/batman-adv/soft-interface.c
91056+++ b/net/batman-adv/soft-interface.c
91057@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
91058 if (batadv_bla_tx(bat_priv, skb, vid))
91059 goto dropped;
91060
91061+ /* skb->data might have been reallocated by batadv_bla_tx() */
91062+ ethhdr = (struct ethhdr *)skb->data;
91063+
91064 /* Register the client MAC in the transtable */
91065 if (!is_multicast_ether_addr(ethhdr->h_source))
91066 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
91067@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
91068 default:
91069 break;
91070 }
91071+
91072+ /* reminder: ethhdr might have become unusable from here on
91073+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
91074+ */
91075 }
91076
91077 /* ethernet packet should be broadcasted */
91078@@ -253,7 +260,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
91079 primary_if->net_dev->dev_addr, ETH_ALEN);
91080
91081 /* set broadcast sequence number */
91082- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
91083+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
91084 bcast_packet->seqno = htonl(seqno);
91085
91086 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
91087@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
91088 /* unicast packet */
91089 } else {
91090 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
91091- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
91092+ ret = batadv_gw_out_of_range(bat_priv, skb);
91093 if (ret)
91094 goto dropped;
91095 }
91096@@ -472,7 +479,7 @@ static int batadv_softif_init_late(struct net_device *dev)
91097 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
91098
91099 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
91100- atomic_set(&bat_priv->bcast_seqno, 1);
91101+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
91102 atomic_set(&bat_priv->tt.vn, 0);
91103 atomic_set(&bat_priv->tt.local_changes, 0);
91104 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
91105diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
91106index aba8364..50fcbb8 100644
91107--- a/net/batman-adv/types.h
91108+++ b/net/batman-adv/types.h
91109@@ -51,7 +51,7 @@
91110 struct batadv_hard_iface_bat_iv {
91111 unsigned char *ogm_buff;
91112 int ogm_buff_len;
91113- atomic_t ogm_seqno;
91114+ atomic_unchecked_t ogm_seqno;
91115 };
91116
91117 /**
91118@@ -75,7 +75,7 @@ struct batadv_hard_iface {
91119 int16_t if_num;
91120 char if_status;
91121 struct net_device *net_dev;
91122- atomic_t frag_seqno;
91123+ atomic_unchecked_t frag_seqno;
91124 struct kobject *hardif_obj;
91125 atomic_t refcount;
91126 struct packet_type batman_adv_ptype;
91127@@ -558,7 +558,7 @@ struct batadv_priv {
91128 #ifdef CONFIG_BATMAN_ADV_DEBUG
91129 atomic_t log_level;
91130 #endif
91131- atomic_t bcast_seqno;
91132+ atomic_unchecked_t bcast_seqno;
91133 atomic_t bcast_queue_left;
91134 atomic_t batman_queue_left;
91135 char num_ifaces;
91136diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
91137index 0bb3b59..0e3052e 100644
91138--- a/net/batman-adv/unicast.c
91139+++ b/net/batman-adv/unicast.c
91140@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
91141 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
91142 frag2->flags = large_tail;
91143
91144- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
91145+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
91146 frag1->seqno = htons(seqno - 1);
91147 frag2->seqno = htons(seqno);
91148
91149@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
91150 * @skb: the skb containing the payload to encapsulate
91151 * @orig_node: the destination node
91152 *
91153- * Returns false if the payload could not be encapsulated or true otherwise
91154+ * Returns false if the payload could not be encapsulated or true otherwise.
91155+ *
91156+ * This call might reallocate skb data.
91157 */
91158 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
91159 struct batadv_orig_node *orig_node)
91160@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
91161 * @orig_node: the destination node
91162 * @packet_subtype: the batman 4addr packet subtype to use
91163 *
91164- * Returns false if the payload could not be encapsulated or true otherwise
91165+ * Returns false if the payload could not be encapsulated or true otherwise.
91166+ *
91167+ * This call might reallocate skb data.
91168 */
91169 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
91170 struct sk_buff *skb,
91171@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
91172 struct batadv_neigh_node *neigh_node;
91173 int data_len = skb->len;
91174 int ret = NET_RX_DROP;
91175- unsigned int dev_mtu;
91176+ unsigned int dev_mtu, header_len;
91177
91178 /* get routing information */
91179 if (is_multicast_ether_addr(ethhdr->h_dest)) {
91180@@ -429,10 +433,12 @@ find_router:
91181 switch (packet_type) {
91182 case BATADV_UNICAST:
91183 batadv_unicast_prepare_skb(skb, orig_node);
91184+ header_len = sizeof(struct batadv_unicast_packet);
91185 break;
91186 case BATADV_UNICAST_4ADDR:
91187 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
91188 packet_subtype);
91189+ header_len = sizeof(struct batadv_unicast_4addr_packet);
91190 break;
91191 default:
91192 /* this function supports UNICAST and UNICAST_4ADDR only. It
91193@@ -441,6 +447,7 @@ find_router:
91194 goto out;
91195 }
91196
91197+ ethhdr = (struct ethhdr *)(skb->data + header_len);
91198 unicast_packet = (struct batadv_unicast_packet *)skb->data;
91199
91200 /* inform the destination node that we are still missing a correct route
91201diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
91202index ace5e55..a65a1c0 100644
91203--- a/net/bluetooth/hci_core.c
91204+++ b/net/bluetooth/hci_core.c
91205@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
91206 list_add(&hdev->list, &hci_dev_list);
91207 write_unlock(&hci_dev_list_lock);
91208
91209- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
91210- WQ_MEM_RECLAIM, 1);
91211+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
91212+ WQ_MEM_RECLAIM, 1, hdev->name);
91213 if (!hdev->workqueue) {
91214 error = -ENOMEM;
91215 goto err;
91216 }
91217
91218- hdev->req_workqueue = alloc_workqueue(hdev->name,
91219+ hdev->req_workqueue = alloc_workqueue("%s",
91220 WQ_HIGHPRI | WQ_UNBOUND |
91221- WQ_MEM_RECLAIM, 1);
91222+ WQ_MEM_RECLAIM, 1, hdev->name);
91223 if (!hdev->req_workqueue) {
91224 destroy_workqueue(hdev->workqueue);
91225 error = -ENOMEM;
91226diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
91227index 9bd7d95..6c4884f 100644
91228--- a/net/bluetooth/hci_sock.c
91229+++ b/net/bluetooth/hci_sock.c
91230@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
91231 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
91232 }
91233
91234- len = min_t(unsigned int, len, sizeof(uf));
91235+ len = min((size_t)len, sizeof(uf));
91236 if (copy_from_user(&uf, optval, len)) {
91237 err = -EFAULT;
91238 break;
91239diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
91240index 68843a2..30e9342 100644
91241--- a/net/bluetooth/l2cap_core.c
91242+++ b/net/bluetooth/l2cap_core.c
91243@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
91244 break;
91245
91246 case L2CAP_CONF_RFC:
91247- if (olen == sizeof(rfc))
91248- memcpy(&rfc, (void *)val, olen);
91249+ if (olen != sizeof(rfc))
91250+ break;
91251+
91252+ memcpy(&rfc, (void *)val, olen);
91253
91254 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
91255 rfc.mode != chan->mode)
91256diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
91257index 36fed40..be2eeb2 100644
91258--- a/net/bluetooth/l2cap_sock.c
91259+++ b/net/bluetooth/l2cap_sock.c
91260@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
91261 struct sock *sk = sock->sk;
91262 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
91263 struct l2cap_options opts;
91264- int len, err = 0;
91265+ int err = 0;
91266+ size_t len = optlen;
91267 u32 opt;
91268
91269 BT_DBG("sk %p", sk);
91270@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
91271 opts.max_tx = chan->max_tx;
91272 opts.txwin_size = chan->tx_win;
91273
91274- len = min_t(unsigned int, sizeof(opts), optlen);
91275+ len = min(sizeof(opts), len);
91276 if (copy_from_user((char *) &opts, optval, len)) {
91277 err = -EFAULT;
91278 break;
91279@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91280 struct bt_security sec;
91281 struct bt_power pwr;
91282 struct l2cap_conn *conn;
91283- int len, err = 0;
91284+ int err = 0;
91285+ size_t len = optlen;
91286 u32 opt;
91287
91288 BT_DBG("sk %p", sk);
91289@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91290
91291 sec.level = BT_SECURITY_LOW;
91292
91293- len = min_t(unsigned int, sizeof(sec), optlen);
91294+ len = min(sizeof(sec), len);
91295 if (copy_from_user((char *) &sec, optval, len)) {
91296 err = -EFAULT;
91297 break;
91298@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91299
91300 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
91301
91302- len = min_t(unsigned int, sizeof(pwr), optlen);
91303+ len = min(sizeof(pwr), len);
91304 if (copy_from_user((char *) &pwr, optval, len)) {
91305 err = -EFAULT;
91306 break;
91307diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
91308index 30b3721..c1bd0a0 100644
91309--- a/net/bluetooth/rfcomm/sock.c
91310+++ b/net/bluetooth/rfcomm/sock.c
91311@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
91312 struct sock *sk = sock->sk;
91313 struct bt_security sec;
91314 int err = 0;
91315- size_t len;
91316+ size_t len = optlen;
91317 u32 opt;
91318
91319 BT_DBG("sk %p", sk);
91320@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
91321
91322 sec.level = BT_SECURITY_LOW;
91323
91324- len = min_t(unsigned int, sizeof(sec), optlen);
91325+ len = min(sizeof(sec), len);
91326 if (copy_from_user((char *) &sec, optval, len)) {
91327 err = -EFAULT;
91328 break;
91329diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
91330index b6e44ad..5b0d514 100644
91331--- a/net/bluetooth/rfcomm/tty.c
91332+++ b/net/bluetooth/rfcomm/tty.c
91333@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
91334 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
91335
91336 spin_lock_irqsave(&dev->port.lock, flags);
91337- if (dev->port.count > 0) {
91338+ if (atomic_read(&dev->port.count) > 0) {
91339 spin_unlock_irqrestore(&dev->port.lock, flags);
91340 return;
91341 }
91342@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
91343 return -ENODEV;
91344
91345 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
91346- dev->channel, dev->port.count);
91347+ dev->channel, atomic_read(&dev->port.count));
91348
91349 spin_lock_irqsave(&dev->port.lock, flags);
91350- if (++dev->port.count > 1) {
91351+ if (atomic_inc_return(&dev->port.count) > 1) {
91352 spin_unlock_irqrestore(&dev->port.lock, flags);
91353 return 0;
91354 }
91355@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
91356 return;
91357
91358 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
91359- dev->port.count);
91360+ atomic_read(&dev->port.count));
91361
91362 spin_lock_irqsave(&dev->port.lock, flags);
91363- if (!--dev->port.count) {
91364+ if (!atomic_dec_return(&dev->port.count)) {
91365 spin_unlock_irqrestore(&dev->port.lock, flags);
91366 if (dev->tty_dev->parent)
91367 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
91368diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
91369index 3d110c4..4e1b2eb 100644
91370--- a/net/bridge/netfilter/ebtables.c
91371+++ b/net/bridge/netfilter/ebtables.c
91372@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91373 tmp.valid_hooks = t->table->valid_hooks;
91374 }
91375 mutex_unlock(&ebt_mutex);
91376- if (copy_to_user(user, &tmp, *len) != 0){
91377+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
91378 BUGPRINT("c2u Didn't work\n");
91379 ret = -EFAULT;
91380 break;
91381@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
91382 goto out;
91383 tmp.valid_hooks = t->valid_hooks;
91384
91385- if (copy_to_user(user, &tmp, *len) != 0) {
91386+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
91387 ret = -EFAULT;
91388 break;
91389 }
91390@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
91391 tmp.entries_size = t->table->entries_size;
91392 tmp.valid_hooks = t->table->valid_hooks;
91393
91394- if (copy_to_user(user, &tmp, *len) != 0) {
91395+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
91396 ret = -EFAULT;
91397 break;
91398 }
91399diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
91400index 2bd4b58..0dc30a1 100644
91401--- a/net/caif/cfctrl.c
91402+++ b/net/caif/cfctrl.c
91403@@ -10,6 +10,7 @@
91404 #include <linux/spinlock.h>
91405 #include <linux/slab.h>
91406 #include <linux/pkt_sched.h>
91407+#include <linux/sched.h>
91408 #include <net/caif/caif_layer.h>
91409 #include <net/caif/cfpkt.h>
91410 #include <net/caif/cfctrl.h>
91411@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
91412 memset(&dev_info, 0, sizeof(dev_info));
91413 dev_info.id = 0xff;
91414 cfsrvl_init(&this->serv, 0, &dev_info, false);
91415- atomic_set(&this->req_seq_no, 1);
91416- atomic_set(&this->rsp_seq_no, 1);
91417+ atomic_set_unchecked(&this->req_seq_no, 1);
91418+ atomic_set_unchecked(&this->rsp_seq_no, 1);
91419 this->serv.layer.receive = cfctrl_recv;
91420 sprintf(this->serv.layer.name, "ctrl");
91421 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
91422@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
91423 struct cfctrl_request_info *req)
91424 {
91425 spin_lock_bh(&ctrl->info_list_lock);
91426- atomic_inc(&ctrl->req_seq_no);
91427- req->sequence_no = atomic_read(&ctrl->req_seq_no);
91428+ atomic_inc_unchecked(&ctrl->req_seq_no);
91429+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
91430 list_add_tail(&req->list, &ctrl->list);
91431 spin_unlock_bh(&ctrl->info_list_lock);
91432 }
91433@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
91434 if (p != first)
91435 pr_warn("Requests are not received in order\n");
91436
91437- atomic_set(&ctrl->rsp_seq_no,
91438+ atomic_set_unchecked(&ctrl->rsp_seq_no,
91439 p->sequence_no);
91440 list_del(&p->list);
91441 goto out;
91442diff --git a/net/can/af_can.c b/net/can/af_can.c
91443index c4e5085..aa9efdf 100644
91444--- a/net/can/af_can.c
91445+++ b/net/can/af_can.c
91446@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
91447 };
91448
91449 /* notifier block for netdevice event */
91450-static struct notifier_block can_netdev_notifier __read_mostly = {
91451+static struct notifier_block can_netdev_notifier = {
91452 .notifier_call = can_notifier,
91453 };
91454
91455diff --git a/net/can/gw.c b/net/can/gw.c
91456index 3ee690e..00d581b 100644
91457--- a/net/can/gw.c
91458+++ b/net/can/gw.c
91459@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
91460 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
91461
91462 static HLIST_HEAD(cgw_list);
91463-static struct notifier_block notifier;
91464
91465 static struct kmem_cache *cgw_cache __read_mostly;
91466
91467@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
91468 return err;
91469 }
91470
91471+static struct notifier_block notifier = {
91472+ .notifier_call = cgw_notifier
91473+};
91474+
91475 static __init int cgw_module_init(void)
91476 {
91477 /* sanitize given module parameter */
91478@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
91479 return -ENOMEM;
91480
91481 /* set notifier */
91482- notifier.notifier_call = cgw_notifier;
91483 register_netdevice_notifier(&notifier);
91484
91485 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
91486diff --git a/net/compat.c b/net/compat.c
91487index f0a1ba6..0541331 100644
91488--- a/net/compat.c
91489+++ b/net/compat.c
91490@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
91491 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
91492 __get_user(kmsg->msg_flags, &umsg->msg_flags))
91493 return -EFAULT;
91494- kmsg->msg_name = compat_ptr(tmp1);
91495- kmsg->msg_iov = compat_ptr(tmp2);
91496- kmsg->msg_control = compat_ptr(tmp3);
91497+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
91498+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
91499+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
91500 return 0;
91501 }
91502
91503@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91504
91505 if (kern_msg->msg_namelen) {
91506 if (mode == VERIFY_READ) {
91507- int err = move_addr_to_kernel(kern_msg->msg_name,
91508+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
91509 kern_msg->msg_namelen,
91510 kern_address);
91511 if (err < 0)
91512@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91513 kern_msg->msg_name = NULL;
91514
91515 tot_len = iov_from_user_compat_to_kern(kern_iov,
91516- (struct compat_iovec __user *)kern_msg->msg_iov,
91517+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
91518 kern_msg->msg_iovlen);
91519 if (tot_len >= 0)
91520 kern_msg->msg_iov = kern_iov;
91521@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91522
91523 #define CMSG_COMPAT_FIRSTHDR(msg) \
91524 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
91525- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
91526+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
91527 (struct compat_cmsghdr __user *)NULL)
91528
91529 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
91530 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
91531 (ucmlen) <= (unsigned long) \
91532 ((mhdr)->msg_controllen - \
91533- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
91534+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
91535
91536 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
91537 struct compat_cmsghdr __user *cmsg, int cmsg_len)
91538 {
91539 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
91540- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
91541+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
91542 msg->msg_controllen)
91543 return NULL;
91544 return (struct compat_cmsghdr __user *)ptr;
91545@@ -219,7 +219,7 @@ Efault:
91546
91547 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
91548 {
91549- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
91550+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
91551 struct compat_cmsghdr cmhdr;
91552 struct compat_timeval ctv;
91553 struct compat_timespec cts[3];
91554@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
91555
91556 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
91557 {
91558- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
91559+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
91560 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
91561 int fdnum = scm->fp->count;
91562 struct file **fp = scm->fp->fp;
91563@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
91564 return -EFAULT;
91565 old_fs = get_fs();
91566 set_fs(KERNEL_DS);
91567- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
91568+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
91569 set_fs(old_fs);
91570
91571 return err;
91572@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
91573 len = sizeof(ktime);
91574 old_fs = get_fs();
91575 set_fs(KERNEL_DS);
91576- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
91577+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
91578 set_fs(old_fs);
91579
91580 if (!err) {
91581@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91582 case MCAST_JOIN_GROUP:
91583 case MCAST_LEAVE_GROUP:
91584 {
91585- struct compat_group_req __user *gr32 = (void *)optval;
91586+ struct compat_group_req __user *gr32 = (void __user *)optval;
91587 struct group_req __user *kgr =
91588 compat_alloc_user_space(sizeof(struct group_req));
91589 u32 interface;
91590@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91591 case MCAST_BLOCK_SOURCE:
91592 case MCAST_UNBLOCK_SOURCE:
91593 {
91594- struct compat_group_source_req __user *gsr32 = (void *)optval;
91595+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
91596 struct group_source_req __user *kgsr = compat_alloc_user_space(
91597 sizeof(struct group_source_req));
91598 u32 interface;
91599@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91600 }
91601 case MCAST_MSFILTER:
91602 {
91603- struct compat_group_filter __user *gf32 = (void *)optval;
91604+ struct compat_group_filter __user *gf32 = (void __user *)optval;
91605 struct group_filter __user *kgf;
91606 u32 interface, fmode, numsrc;
91607
91608@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
91609 char __user *optval, int __user *optlen,
91610 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
91611 {
91612- struct compat_group_filter __user *gf32 = (void *)optval;
91613+ struct compat_group_filter __user *gf32 = (void __user *)optval;
91614 struct group_filter __user *kgf;
91615 int __user *koptlen;
91616 u32 interface, fmode, numsrc;
91617@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
91618
91619 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
91620 return -EINVAL;
91621- if (copy_from_user(a, args, nas[call]))
91622+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
91623 return -EFAULT;
91624 a0 = a[0];
91625 a1 = a[1];
91626diff --git a/net/core/datagram.c b/net/core/datagram.c
91627index b71423d..0360434 100644
91628--- a/net/core/datagram.c
91629+++ b/net/core/datagram.c
91630@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
91631 }
91632
91633 kfree_skb(skb);
91634- atomic_inc(&sk->sk_drops);
91635+ atomic_inc_unchecked(&sk->sk_drops);
91636 sk_mem_reclaim_partial(sk);
91637
91638 return err;
91639diff --git a/net/core/dev.c b/net/core/dev.c
91640index 7ddbb31..3902452 100644
91641--- a/net/core/dev.c
91642+++ b/net/core/dev.c
91643@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
91644 {
91645 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
91646 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
91647- atomic_long_inc(&dev->rx_dropped);
91648+ atomic_long_inc_unchecked(&dev->rx_dropped);
91649 kfree_skb(skb);
91650 return NET_RX_DROP;
91651 }
91652@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
91653 skb_orphan(skb);
91654
91655 if (unlikely(!is_skb_forwardable(dev, skb))) {
91656- atomic_long_inc(&dev->rx_dropped);
91657+ atomic_long_inc_unchecked(&dev->rx_dropped);
91658 kfree_skb(skb);
91659 return NET_RX_DROP;
91660 }
91661@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
91662
91663 struct dev_gso_cb {
91664 void (*destructor)(struct sk_buff *skb);
91665-};
91666+} __no_const;
91667
91668 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
91669
91670@@ -3139,7 +3139,7 @@ enqueue:
91671
91672 local_irq_restore(flags);
91673
91674- atomic_long_inc(&skb->dev->rx_dropped);
91675+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
91676 kfree_skb(skb);
91677 return NET_RX_DROP;
91678 }
91679@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
91680 }
91681 EXPORT_SYMBOL(netif_rx_ni);
91682
91683-static void net_tx_action(struct softirq_action *h)
91684+static void net_tx_action(void)
91685 {
91686 struct softnet_data *sd = &__get_cpu_var(softnet_data);
91687
91688@@ -3545,7 +3545,7 @@ ncls:
91689 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
91690 } else {
91691 drop:
91692- atomic_long_inc(&skb->dev->rx_dropped);
91693+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
91694 kfree_skb(skb);
91695 /* Jamal, now you will not able to escape explaining
91696 * me how you were going to use this. :-)
91697@@ -4153,7 +4153,7 @@ void netif_napi_del(struct napi_struct *napi)
91698 }
91699 EXPORT_SYMBOL(netif_napi_del);
91700
91701-static void net_rx_action(struct softirq_action *h)
91702+static void net_rx_action(void)
91703 {
91704 struct softnet_data *sd = &__get_cpu_var(softnet_data);
91705 unsigned long time_limit = jiffies + 2;
91706@@ -5590,7 +5590,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
91707 } else {
91708 netdev_stats_to_stats64(storage, &dev->stats);
91709 }
91710- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
91711+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
91712 return storage;
91713 }
91714 EXPORT_SYMBOL(dev_get_stats);
91715diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
91716index 5b7d0e1..cb960fc 100644
91717--- a/net/core/dev_ioctl.c
91718+++ b/net/core/dev_ioctl.c
91719@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
91720 if (no_module && capable(CAP_NET_ADMIN))
91721 no_module = request_module("netdev-%s", name);
91722 if (no_module && capable(CAP_SYS_MODULE)) {
91723+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91724+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
91725+#else
91726 if (!request_module("%s", name))
91727 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
91728 name);
91729+#endif
91730 }
91731 }
91732 EXPORT_SYMBOL(dev_load);
91733diff --git a/net/core/ethtool.c b/net/core/ethtool.c
91734index ce91766..3b71cdb 100644
91735--- a/net/core/ethtool.c
91736+++ b/net/core/ethtool.c
91737@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
91738 if (ret)
91739 return ret;
91740
91741- len = (tmp.len > dump.len) ? dump.len : tmp.len;
91742+ len = min(tmp.len, dump.len);
91743 if (!len)
91744 return -EFAULT;
91745
91746+ /* Don't ever let the driver think there's more space available
91747+ * than it requested with .get_dump_flag().
91748+ */
91749+ dump.len = len;
91750+
91751+ /* Always allocate enough space to hold the whole thing so that the
91752+ * driver does not need to check the length and bother with partial
91753+ * dumping.
91754+ */
91755 data = vzalloc(tmp.len);
91756 if (!data)
91757 return -ENOMEM;
91758@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
91759 if (ret)
91760 goto out;
91761
91762+ /* There are two sane possibilities:
91763+ * 1. The driver's .get_dump_data() does not touch dump.len.
91764+ * 2. Or it may set dump.len to how much it really writes, which
91765+ * should be tmp.len (or len if it can do a partial dump).
91766+ * In any case respond to userspace with the actual length of data
91767+ * it's receiving.
91768+ */
91769+ WARN_ON(dump.len != len && dump.len != tmp.len);
91770+ dump.len = len;
91771+
91772 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
91773 ret = -EFAULT;
91774 goto out;
91775diff --git a/net/core/flow.c b/net/core/flow.c
91776index 7102f16..146b4bd 100644
91777--- a/net/core/flow.c
91778+++ b/net/core/flow.c
91779@@ -61,7 +61,7 @@ struct flow_cache {
91780 struct timer_list rnd_timer;
91781 };
91782
91783-atomic_t flow_cache_genid = ATOMIC_INIT(0);
91784+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
91785 EXPORT_SYMBOL(flow_cache_genid);
91786 static struct flow_cache flow_cache_global;
91787 static struct kmem_cache *flow_cachep __read_mostly;
91788@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
91789
91790 static int flow_entry_valid(struct flow_cache_entry *fle)
91791 {
91792- if (atomic_read(&flow_cache_genid) != fle->genid)
91793+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
91794 return 0;
91795 if (fle->object && !fle->object->ops->check(fle->object))
91796 return 0;
91797@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
91798 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
91799 fcp->hash_count++;
91800 }
91801- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
91802+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
91803 flo = fle->object;
91804 if (!flo)
91805 goto ret_object;
91806@@ -279,7 +279,7 @@ nocache:
91807 }
91808 flo = resolver(net, key, family, dir, flo, ctx);
91809 if (fle) {
91810- fle->genid = atomic_read(&flow_cache_genid);
91811+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
91812 if (!IS_ERR(flo))
91813 fle->object = flo;
91814 else
91815diff --git a/net/core/iovec.c b/net/core/iovec.c
91816index de178e4..1dabd8b 100644
91817--- a/net/core/iovec.c
91818+++ b/net/core/iovec.c
91819@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
91820 if (m->msg_namelen) {
91821 if (mode == VERIFY_READ) {
91822 void __user *namep;
91823- namep = (void __user __force *) m->msg_name;
91824+ namep = (void __force_user *) m->msg_name;
91825 err = move_addr_to_kernel(namep, m->msg_namelen,
91826 address);
91827 if (err < 0)
91828@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
91829 }
91830
91831 size = m->msg_iovlen * sizeof(struct iovec);
91832- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
91833+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
91834 return -EFAULT;
91835
91836 m->msg_iov = iov;
91837diff --git a/net/core/neighbour.c b/net/core/neighbour.c
91838index ce90b02..8752627 100644
91839--- a/net/core/neighbour.c
91840+++ b/net/core/neighbour.c
91841@@ -2771,7 +2771,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
91842 size_t *lenp, loff_t *ppos)
91843 {
91844 int size, ret;
91845- ctl_table tmp = *ctl;
91846+ ctl_table_no_const tmp = *ctl;
91847
91848 tmp.extra1 = &zero;
91849 tmp.extra2 = &unres_qlen_max;
91850diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
91851index 569d355..79cf2d0 100644
91852--- a/net/core/net-procfs.c
91853+++ b/net/core/net-procfs.c
91854@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
91855 else
91856 seq_printf(seq, "%04x", ntohs(pt->type));
91857
91858+#ifdef CONFIG_GRKERNSEC_HIDESYM
91859+ seq_printf(seq, " %-8s %pf\n",
91860+ pt->dev ? pt->dev->name : "", NULL);
91861+#else
91862 seq_printf(seq, " %-8s %pf\n",
91863 pt->dev ? pt->dev->name : "", pt->func);
91864+#endif
91865 }
91866
91867 return 0;
91868diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
91869index 981fed3..536af34 100644
91870--- a/net/core/net-sysfs.c
91871+++ b/net/core/net-sysfs.c
91872@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
91873 }
91874 EXPORT_SYMBOL(netdev_class_remove_file);
91875
91876-int netdev_kobject_init(void)
91877+int __init netdev_kobject_init(void)
91878 {
91879 kobj_ns_type_register(&net_ns_type_operations);
91880 return class_register(&net_class);
91881diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
91882index f9765203..9feaef8 100644
91883--- a/net/core/net_namespace.c
91884+++ b/net/core/net_namespace.c
91885@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
91886 int error;
91887 LIST_HEAD(net_exit_list);
91888
91889- list_add_tail(&ops->list, list);
91890+ pax_list_add_tail((struct list_head *)&ops->list, list);
91891 if (ops->init || (ops->id && ops->size)) {
91892 for_each_net(net) {
91893 error = ops_init(ops, net);
91894@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
91895
91896 out_undo:
91897 /* If I have an error cleanup all namespaces I initialized */
91898- list_del(&ops->list);
91899+ pax_list_del((struct list_head *)&ops->list);
91900 ops_exit_list(ops, &net_exit_list);
91901 ops_free_list(ops, &net_exit_list);
91902 return error;
91903@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
91904 struct net *net;
91905 LIST_HEAD(net_exit_list);
91906
91907- list_del(&ops->list);
91908+ pax_list_del((struct list_head *)&ops->list);
91909 for_each_net(net)
91910 list_add_tail(&net->exit_list, &net_exit_list);
91911 ops_exit_list(ops, &net_exit_list);
91912@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
91913 mutex_lock(&net_mutex);
91914 error = register_pernet_operations(&pernet_list, ops);
91915 if (!error && (first_device == &pernet_list))
91916- first_device = &ops->list;
91917+ first_device = (struct list_head *)&ops->list;
91918 mutex_unlock(&net_mutex);
91919 return error;
91920 }
91921diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
91922index a08bd2b..c59bd7c 100644
91923--- a/net/core/rtnetlink.c
91924+++ b/net/core/rtnetlink.c
91925@@ -58,7 +58,7 @@ struct rtnl_link {
91926 rtnl_doit_func doit;
91927 rtnl_dumpit_func dumpit;
91928 rtnl_calcit_func calcit;
91929-};
91930+} __no_const;
91931
91932 static DEFINE_MUTEX(rtnl_mutex);
91933
91934@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
91935 if (rtnl_link_ops_get(ops->kind))
91936 return -EEXIST;
91937
91938- if (!ops->dellink)
91939- ops->dellink = unregister_netdevice_queue;
91940+ if (!ops->dellink) {
91941+ pax_open_kernel();
91942+ *(void **)&ops->dellink = unregister_netdevice_queue;
91943+ pax_close_kernel();
91944+ }
91945
91946- list_add_tail(&ops->list, &link_ops);
91947+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
91948 return 0;
91949 }
91950 EXPORT_SYMBOL_GPL(__rtnl_link_register);
91951@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
91952 for_each_net(net) {
91953 __rtnl_kill_links(net, ops);
91954 }
91955- list_del(&ops->list);
91956+ pax_list_del((struct list_head *)&ops->list);
91957 }
91958 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
91959
91960@@ -2374,7 +2377,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
91961 struct nlattr *extfilt;
91962 u32 filter_mask = 0;
91963
91964- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
91965+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
91966 IFLA_EXT_MASK);
91967 if (extfilt)
91968 filter_mask = nla_get_u32(extfilt);
91969diff --git a/net/core/scm.c b/net/core/scm.c
91970index 03795d0..98d6bdb 100644
91971--- a/net/core/scm.c
91972+++ b/net/core/scm.c
91973@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
91974 return -EINVAL;
91975
91976 if ((creds->pid == task_tgid_vnr(current) ||
91977- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
91978+ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
91979 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
91980 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
91981 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
91982@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
91983 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
91984 {
91985 struct cmsghdr __user *cm
91986- = (__force struct cmsghdr __user *)msg->msg_control;
91987+ = (struct cmsghdr __force_user *)msg->msg_control;
91988 struct cmsghdr cmhdr;
91989 int cmlen = CMSG_LEN(len);
91990 int err;
91991@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
91992 err = -EFAULT;
91993 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
91994 goto out;
91995- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
91996+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
91997 goto out;
91998 cmlen = CMSG_SPACE(len);
91999 if (msg->msg_controllen < cmlen)
92000@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
92001 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
92002 {
92003 struct cmsghdr __user *cm
92004- = (__force struct cmsghdr __user*)msg->msg_control;
92005+ = (struct cmsghdr __force_user *)msg->msg_control;
92006
92007 int fdmax = 0;
92008 int fdnum = scm->fp->count;
92009@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
92010 if (fdnum < fdmax)
92011 fdmax = fdnum;
92012
92013- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
92014+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
92015 i++, cmfptr++)
92016 {
92017 struct socket *sock;
92018diff --git a/net/core/skbuff.c b/net/core/skbuff.c
92019index 1c1738c..4cab7f0 100644
92020--- a/net/core/skbuff.c
92021+++ b/net/core/skbuff.c
92022@@ -3087,13 +3087,15 @@ void __init skb_init(void)
92023 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
92024 sizeof(struct sk_buff),
92025 0,
92026- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
92027+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
92028+ SLAB_NO_SANITIZE,
92029 NULL);
92030 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
92031 (2*sizeof(struct sk_buff)) +
92032 sizeof(atomic_t),
92033 0,
92034- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
92035+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
92036+ SLAB_NO_SANITIZE,
92037 NULL);
92038 }
92039
92040diff --git a/net/core/sock.c b/net/core/sock.c
92041index d6d024c..6ea7ab4 100644
92042--- a/net/core/sock.c
92043+++ b/net/core/sock.c
92044@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
92045 struct sk_buff_head *list = &sk->sk_receive_queue;
92046
92047 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
92048- atomic_inc(&sk->sk_drops);
92049+ atomic_inc_unchecked(&sk->sk_drops);
92050 trace_sock_rcvqueue_full(sk, skb);
92051 return -ENOMEM;
92052 }
92053@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
92054 return err;
92055
92056 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
92057- atomic_inc(&sk->sk_drops);
92058+ atomic_inc_unchecked(&sk->sk_drops);
92059 return -ENOBUFS;
92060 }
92061
92062@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
92063 skb_dst_force(skb);
92064
92065 spin_lock_irqsave(&list->lock, flags);
92066- skb->dropcount = atomic_read(&sk->sk_drops);
92067+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
92068 __skb_queue_tail(list, skb);
92069 spin_unlock_irqrestore(&list->lock, flags);
92070
92071@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
92072 skb->dev = NULL;
92073
92074 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
92075- atomic_inc(&sk->sk_drops);
92076+ atomic_inc_unchecked(&sk->sk_drops);
92077 goto discard_and_relse;
92078 }
92079 if (nested)
92080@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
92081 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
92082 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
92083 bh_unlock_sock(sk);
92084- atomic_inc(&sk->sk_drops);
92085+ atomic_inc_unchecked(&sk->sk_drops);
92086 goto discard_and_relse;
92087 }
92088
92089@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
92090 struct timeval tm;
92091 } v;
92092
92093- int lv = sizeof(int);
92094- int len;
92095+ unsigned int lv = sizeof(int);
92096+ unsigned int len;
92097
92098 if (get_user(len, optlen))
92099 return -EFAULT;
92100- if (len < 0)
92101+ if (len > INT_MAX)
92102 return -EINVAL;
92103
92104 memset(&v, 0, sizeof(v));
92105@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
92106
92107 case SO_PEERNAME:
92108 {
92109- char address[128];
92110+ char address[_K_SS_MAXSIZE];
92111
92112 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
92113 return -ENOTCONN;
92114- if (lv < len)
92115+ if (lv < len || sizeof address < len)
92116 return -EINVAL;
92117 if (copy_to_user(optval, address, len))
92118 return -EFAULT;
92119@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
92120
92121 if (len > lv)
92122 len = lv;
92123- if (copy_to_user(optval, &v, len))
92124+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
92125 return -EFAULT;
92126 lenout:
92127 if (put_user(len, optlen))
92128@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
92129 */
92130 smp_wmb();
92131 atomic_set(&sk->sk_refcnt, 1);
92132- atomic_set(&sk->sk_drops, 0);
92133+ atomic_set_unchecked(&sk->sk_drops, 0);
92134 }
92135 EXPORT_SYMBOL(sock_init_data);
92136
92137diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
92138index a0e9cf6..ef7f9ed 100644
92139--- a/net/core/sock_diag.c
92140+++ b/net/core/sock_diag.c
92141@@ -9,26 +9,33 @@
92142 #include <linux/inet_diag.h>
92143 #include <linux/sock_diag.h>
92144
92145-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
92146+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
92147 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
92148 static DEFINE_MUTEX(sock_diag_table_mutex);
92149
92150 int sock_diag_check_cookie(void *sk, __u32 *cookie)
92151 {
92152+#ifndef CONFIG_GRKERNSEC_HIDESYM
92153 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
92154 cookie[1] != INET_DIAG_NOCOOKIE) &&
92155 ((u32)(unsigned long)sk != cookie[0] ||
92156 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
92157 return -ESTALE;
92158 else
92159+#endif
92160 return 0;
92161 }
92162 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
92163
92164 void sock_diag_save_cookie(void *sk, __u32 *cookie)
92165 {
92166+#ifdef CONFIG_GRKERNSEC_HIDESYM
92167+ cookie[0] = 0;
92168+ cookie[1] = 0;
92169+#else
92170 cookie[0] = (u32)(unsigned long)sk;
92171 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
92172+#endif
92173 }
92174 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
92175
92176@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
92177 mutex_lock(&sock_diag_table_mutex);
92178 if (sock_diag_handlers[hndl->family])
92179 err = -EBUSY;
92180- else
92181+ else {
92182+ pax_open_kernel();
92183 sock_diag_handlers[hndl->family] = hndl;
92184+ pax_close_kernel();
92185+ }
92186 mutex_unlock(&sock_diag_table_mutex);
92187
92188 return err;
92189@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
92190
92191 mutex_lock(&sock_diag_table_mutex);
92192 BUG_ON(sock_diag_handlers[family] != hnld);
92193+ pax_open_kernel();
92194 sock_diag_handlers[family] = NULL;
92195+ pax_close_kernel();
92196 mutex_unlock(&sock_diag_table_mutex);
92197 }
92198 EXPORT_SYMBOL_GPL(sock_diag_unregister);
92199diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
92200index cfdb46a..cef55e1 100644
92201--- a/net/core/sysctl_net_core.c
92202+++ b/net/core/sysctl_net_core.c
92203@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
92204 {
92205 unsigned int orig_size, size;
92206 int ret, i;
92207- ctl_table tmp = {
92208+ ctl_table_no_const tmp = {
92209 .data = &size,
92210 .maxlen = sizeof(size),
92211 .mode = table->mode
92212@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
92213
92214 static __net_init int sysctl_core_net_init(struct net *net)
92215 {
92216- struct ctl_table *tbl;
92217+ ctl_table_no_const *tbl = NULL;
92218
92219 net->core.sysctl_somaxconn = SOMAXCONN;
92220
92221- tbl = netns_core_table;
92222 if (!net_eq(net, &init_net)) {
92223- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
92224+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
92225 if (tbl == NULL)
92226 goto err_dup;
92227
92228@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
92229 if (net->user_ns != &init_user_ns) {
92230 tbl[0].procname = NULL;
92231 }
92232- }
92233-
92234- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
92235+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
92236+ } else
92237+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
92238 if (net->core.sysctl_hdr == NULL)
92239 goto err_reg;
92240
92241 return 0;
92242
92243 err_reg:
92244- if (tbl != netns_core_table)
92245- kfree(tbl);
92246+ kfree(tbl);
92247 err_dup:
92248 return -ENOMEM;
92249 }
92250@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
92251 kfree(tbl);
92252 }
92253
92254-static __net_initdata struct pernet_operations sysctl_core_ops = {
92255+static __net_initconst struct pernet_operations sysctl_core_ops = {
92256 .init = sysctl_core_net_init,
92257 .exit = sysctl_core_net_exit,
92258 };
92259diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
92260index c21f200..bc4565b 100644
92261--- a/net/decnet/af_decnet.c
92262+++ b/net/decnet/af_decnet.c
92263@@ -465,6 +465,7 @@ static struct proto dn_proto = {
92264 .sysctl_rmem = sysctl_decnet_rmem,
92265 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
92266 .obj_size = sizeof(struct dn_sock),
92267+ .slab_flags = SLAB_USERCOPY,
92268 };
92269
92270 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
92271diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
92272index a55eecc..dd8428c 100644
92273--- a/net/decnet/sysctl_net_decnet.c
92274+++ b/net/decnet/sysctl_net_decnet.c
92275@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
92276
92277 if (len > *lenp) len = *lenp;
92278
92279- if (copy_to_user(buffer, addr, len))
92280+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
92281 return -EFAULT;
92282
92283 *lenp = len;
92284@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
92285
92286 if (len > *lenp) len = *lenp;
92287
92288- if (copy_to_user(buffer, devname, len))
92289+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
92290 return -EFAULT;
92291
92292 *lenp = len;
92293diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
92294index 55e1fd5..fd602b8 100644
92295--- a/net/ieee802154/6lowpan.c
92296+++ b/net/ieee802154/6lowpan.c
92297@@ -459,7 +459,7 @@ static int lowpan_header_create(struct sk_buff *skb,
92298 hc06_ptr += 3;
92299 } else {
92300 /* compress nothing */
92301- memcpy(hc06_ptr, &hdr, 4);
92302+ memcpy(hc06_ptr, hdr, 4);
92303 /* replace the top byte with new ECN | DSCP format */
92304 *hc06_ptr = tmp;
92305 hc06_ptr += 4;
92306diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
92307index d01be2a..8976537 100644
92308--- a/net/ipv4/af_inet.c
92309+++ b/net/ipv4/af_inet.c
92310@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
92311
92312 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
92313
92314- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
92315- if (!sysctl_local_reserved_ports)
92316- goto out;
92317-
92318 rc = proto_register(&tcp_prot, 1);
92319 if (rc)
92320- goto out_free_reserved_ports;
92321+ goto out;
92322
92323 rc = proto_register(&udp_prot, 1);
92324 if (rc)
92325@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
92326 proto_unregister(&udp_prot);
92327 out_unregister_tcp_proto:
92328 proto_unregister(&tcp_prot);
92329-out_free_reserved_ports:
92330- kfree(sysctl_local_reserved_ports);
92331 goto out;
92332 }
92333
92334diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
92335index 2e7f194..0fa4d6d 100644
92336--- a/net/ipv4/ah4.c
92337+++ b/net/ipv4/ah4.c
92338@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
92339 return;
92340
92341 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92342- atomic_inc(&flow_cache_genid);
92343+ atomic_inc_unchecked(&flow_cache_genid);
92344 rt_genid_bump(net);
92345
92346 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
92347diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
92348index dfc39d4..0d4fa52 100644
92349--- a/net/ipv4/devinet.c
92350+++ b/net/ipv4/devinet.c
92351@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
92352 ci = nla_data(tb[IFA_CACHEINFO]);
92353 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
92354 err = -EINVAL;
92355- goto errout;
92356+ goto errout_free;
92357 }
92358 *pvalid_lft = ci->ifa_valid;
92359 *pprefered_lft = ci->ifa_prefered;
92360@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
92361
92362 return ifa;
92363
92364+errout_free:
92365+ inet_free_ifa(ifa);
92366 errout:
92367 return ERR_PTR(err);
92368 }
92369@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
92370 idx = 0;
92371 head = &net->dev_index_head[h];
92372 rcu_read_lock();
92373- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
92374+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
92375 net->dev_base_seq;
92376 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92377 if (idx < s_idx)
92378@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
92379 idx = 0;
92380 head = &net->dev_index_head[h];
92381 rcu_read_lock();
92382- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
92383+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
92384 net->dev_base_seq;
92385 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92386 if (idx < s_idx)
92387@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
92388 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
92389 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
92390
92391-static struct devinet_sysctl_table {
92392+static const struct devinet_sysctl_table {
92393 struct ctl_table_header *sysctl_header;
92394 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
92395 } devinet_sysctl = {
92396@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
92397 int err;
92398 struct ipv4_devconf *all, *dflt;
92399 #ifdef CONFIG_SYSCTL
92400- struct ctl_table *tbl = ctl_forward_entry;
92401+ ctl_table_no_const *tbl = NULL;
92402 struct ctl_table_header *forw_hdr;
92403 #endif
92404
92405@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
92406 goto err_alloc_dflt;
92407
92408 #ifdef CONFIG_SYSCTL
92409- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
92410+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
92411 if (tbl == NULL)
92412 goto err_alloc_ctl;
92413
92414@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
92415 goto err_reg_dflt;
92416
92417 err = -ENOMEM;
92418- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
92419+ if (!net_eq(net, &init_net))
92420+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
92421+ else
92422+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
92423 if (forw_hdr == NULL)
92424 goto err_reg_ctl;
92425 net->ipv4.forw_hdr = forw_hdr;
92426@@ -2237,8 +2242,7 @@ err_reg_ctl:
92427 err_reg_dflt:
92428 __devinet_sysctl_unregister(all);
92429 err_reg_all:
92430- if (tbl != ctl_forward_entry)
92431- kfree(tbl);
92432+ kfree(tbl);
92433 err_alloc_ctl:
92434 #endif
92435 if (dflt != &ipv4_devconf_dflt)
92436diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
92437index 4cfe34d..d2fac8a 100644
92438--- a/net/ipv4/esp4.c
92439+++ b/net/ipv4/esp4.c
92440@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
92441 }
92442
92443 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
92444- net_adj) & ~(align - 1)) + (net_adj - 2);
92445+ net_adj) & ~(align - 1)) + net_adj - 2;
92446 }
92447
92448 static void esp4_err(struct sk_buff *skb, u32 info)
92449@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
92450 return;
92451
92452 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92453- atomic_inc(&flow_cache_genid);
92454+ atomic_inc_unchecked(&flow_cache_genid);
92455 rt_genid_bump(net);
92456
92457 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
92458diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
92459index c7629a2..b62d139 100644
92460--- a/net/ipv4/fib_frontend.c
92461+++ b/net/ipv4/fib_frontend.c
92462@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
92463 #ifdef CONFIG_IP_ROUTE_MULTIPATH
92464 fib_sync_up(dev);
92465 #endif
92466- atomic_inc(&net->ipv4.dev_addr_genid);
92467+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92468 rt_cache_flush(dev_net(dev));
92469 break;
92470 case NETDEV_DOWN:
92471 fib_del_ifaddr(ifa, NULL);
92472- atomic_inc(&net->ipv4.dev_addr_genid);
92473+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92474 if (ifa->ifa_dev->ifa_list == NULL) {
92475 /* Last address was deleted from this interface.
92476 * Disable IP.
92477@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
92478 #ifdef CONFIG_IP_ROUTE_MULTIPATH
92479 fib_sync_up(dev);
92480 #endif
92481- atomic_inc(&net->ipv4.dev_addr_genid);
92482+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92483 rt_cache_flush(net);
92484 break;
92485 case NETDEV_DOWN:
92486diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
92487index 8f6cb7a..34507f9 100644
92488--- a/net/ipv4/fib_semantics.c
92489+++ b/net/ipv4/fib_semantics.c
92490@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
92491 nh->nh_saddr = inet_select_addr(nh->nh_dev,
92492 nh->nh_gw,
92493 nh->nh_parent->fib_scope);
92494- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
92495+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
92496
92497 return nh->nh_saddr;
92498 }
92499diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
92500index 49616fe..6e8a13d 100644
92501--- a/net/ipv4/fib_trie.c
92502+++ b/net/ipv4/fib_trie.c
92503@@ -71,7 +71,6 @@
92504 #include <linux/init.h>
92505 #include <linux/list.h>
92506 #include <linux/slab.h>
92507-#include <linux/prefetch.h>
92508 #include <linux/export.h>
92509 #include <net/net_namespace.h>
92510 #include <net/ip.h>
92511@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
92512 if (!c)
92513 continue;
92514
92515- if (IS_LEAF(c)) {
92516- prefetch(rcu_dereference_rtnl(p->child[idx]));
92517+ if (IS_LEAF(c))
92518 return (struct leaf *) c;
92519- }
92520
92521 /* Rescan start scanning in new node */
92522 p = (struct tnode *) c;
92523diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
92524index 6acb541..9ea617d 100644
92525--- a/net/ipv4/inet_connection_sock.c
92526+++ b/net/ipv4/inet_connection_sock.c
92527@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
92528 .range = { 32768, 61000 },
92529 };
92530
92531-unsigned long *sysctl_local_reserved_ports;
92532+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
92533 EXPORT_SYMBOL(sysctl_local_reserved_ports);
92534
92535 void inet_get_local_port_range(int *low, int *high)
92536diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
92537index 6af375a..c493c74 100644
92538--- a/net/ipv4/inet_hashtables.c
92539+++ b/net/ipv4/inet_hashtables.c
92540@@ -18,12 +18,15 @@
92541 #include <linux/sched.h>
92542 #include <linux/slab.h>
92543 #include <linux/wait.h>
92544+#include <linux/security.h>
92545
92546 #include <net/inet_connection_sock.h>
92547 #include <net/inet_hashtables.h>
92548 #include <net/secure_seq.h>
92549 #include <net/ip.h>
92550
92551+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
92552+
92553 /*
92554 * Allocate and initialize a new local port bind bucket.
92555 * The bindhash mutex for snum's hash chain must be held here.
92556@@ -554,6 +557,8 @@ ok:
92557 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
92558 spin_unlock(&head->lock);
92559
92560+ gr_update_task_in_ip_table(current, inet_sk(sk));
92561+
92562 if (tw) {
92563 inet_twsk_deschedule(tw, death_row);
92564 while (twrefcnt) {
92565diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
92566index 000e3d2..5472da3 100644
92567--- a/net/ipv4/inetpeer.c
92568+++ b/net/ipv4/inetpeer.c
92569@@ -503,8 +503,8 @@ relookup:
92570 if (p) {
92571 p->daddr = *daddr;
92572 atomic_set(&p->refcnt, 1);
92573- atomic_set(&p->rid, 0);
92574- atomic_set(&p->ip_id_count,
92575+ atomic_set_unchecked(&p->rid, 0);
92576+ atomic_set_unchecked(&p->ip_id_count,
92577 (daddr->family == AF_INET) ?
92578 secure_ip_id(daddr->addr.a4) :
92579 secure_ipv6_id(daddr->addr.a6));
92580diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
92581index b66910a..cfe416e 100644
92582--- a/net/ipv4/ip_fragment.c
92583+++ b/net/ipv4/ip_fragment.c
92584@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
92585 return 0;
92586
92587 start = qp->rid;
92588- end = atomic_inc_return(&peer->rid);
92589+ end = atomic_inc_return_unchecked(&peer->rid);
92590 qp->rid = end;
92591
92592 rc = qp->q.fragments && (end - start) > max;
92593@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
92594
92595 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92596 {
92597- struct ctl_table *table;
92598+ ctl_table_no_const *table = NULL;
92599 struct ctl_table_header *hdr;
92600
92601- table = ip4_frags_ns_ctl_table;
92602 if (!net_eq(net, &init_net)) {
92603- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
92604+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
92605 if (table == NULL)
92606 goto err_alloc;
92607
92608@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92609 /* Don't export sysctls to unprivileged users */
92610 if (net->user_ns != &init_user_ns)
92611 table[0].procname = NULL;
92612- }
92613+ hdr = register_net_sysctl(net, "net/ipv4", table);
92614+ } else
92615+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
92616
92617- hdr = register_net_sysctl(net, "net/ipv4", table);
92618 if (hdr == NULL)
92619 goto err_reg;
92620
92621@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92622 return 0;
92623
92624 err_reg:
92625- if (!net_eq(net, &init_net))
92626- kfree(table);
92627+ kfree(table);
92628 err_alloc:
92629 return -ENOMEM;
92630 }
92631diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
92632index 855004f..9644112 100644
92633--- a/net/ipv4/ip_gre.c
92634+++ b/net/ipv4/ip_gre.c
92635@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
92636 module_param(log_ecn_error, bool, 0644);
92637 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
92638
92639-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
92640+static struct rtnl_link_ops ipgre_link_ops;
92641 static int ipgre_tunnel_init(struct net_device *dev);
92642
92643 static int ipgre_net_id __read_mostly;
92644@@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
92645 if (daddr)
92646 memcpy(&iph->daddr, daddr, 4);
92647 if (iph->daddr)
92648- return t->hlen;
92649+ return t->hlen + sizeof(*iph);
92650
92651 return -(t->hlen + sizeof(*iph));
92652 }
92653@@ -919,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
92654 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
92655 };
92656
92657-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
92658+static struct rtnl_link_ops ipgre_link_ops = {
92659 .kind = "gre",
92660 .maxtype = IFLA_GRE_MAX,
92661 .policy = ipgre_policy,
92662@@ -933,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
92663 .fill_info = ipgre_fill_info,
92664 };
92665
92666-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
92667+static struct rtnl_link_ops ipgre_tap_ops = {
92668 .kind = "gretap",
92669 .maxtype = IFLA_GRE_MAX,
92670 .policy = ipgre_policy,
92671diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
92672index d9c4f11..02b82dbc 100644
92673--- a/net/ipv4/ip_sockglue.c
92674+++ b/net/ipv4/ip_sockglue.c
92675@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
92676 len = min_t(unsigned int, len, opt->optlen);
92677 if (put_user(len, optlen))
92678 return -EFAULT;
92679- if (copy_to_user(optval, opt->__data, len))
92680+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
92681+ copy_to_user(optval, opt->__data, len))
92682 return -EFAULT;
92683 return 0;
92684 }
92685@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
92686 if (sk->sk_type != SOCK_STREAM)
92687 return -ENOPROTOOPT;
92688
92689- msg.msg_control = optval;
92690+ msg.msg_control = (void __force_kernel *)optval;
92691 msg.msg_controllen = len;
92692 msg.msg_flags = flags;
92693
92694diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
92695index 17cc0ff..63856c4 100644
92696--- a/net/ipv4/ip_vti.c
92697+++ b/net/ipv4/ip_vti.c
92698@@ -47,7 +47,7 @@
92699 #define HASH_SIZE 16
92700 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
92701
92702-static struct rtnl_link_ops vti_link_ops __read_mostly;
92703+static struct rtnl_link_ops vti_link_ops;
92704
92705 static int vti_net_id __read_mostly;
92706 struct vti_net {
92707@@ -840,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
92708 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
92709 };
92710
92711-static struct rtnl_link_ops vti_link_ops __read_mostly = {
92712+static struct rtnl_link_ops vti_link_ops = {
92713 .kind = "vti",
92714 .maxtype = IFLA_VTI_MAX,
92715 .policy = vti_policy,
92716diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
92717index 59cb8c7..a72160c 100644
92718--- a/net/ipv4/ipcomp.c
92719+++ b/net/ipv4/ipcomp.c
92720@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
92721 return;
92722
92723 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92724- atomic_inc(&flow_cache_genid);
92725+ atomic_inc_unchecked(&flow_cache_genid);
92726 rt_genid_bump(net);
92727
92728 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
92729diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
92730index efa1138..20dbba0 100644
92731--- a/net/ipv4/ipconfig.c
92732+++ b/net/ipv4/ipconfig.c
92733@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
92734
92735 mm_segment_t oldfs = get_fs();
92736 set_fs(get_ds());
92737- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
92738+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
92739 set_fs(oldfs);
92740 return res;
92741 }
92742@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
92743
92744 mm_segment_t oldfs = get_fs();
92745 set_fs(get_ds());
92746- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
92747+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
92748 set_fs(oldfs);
92749 return res;
92750 }
92751@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
92752
92753 mm_segment_t oldfs = get_fs();
92754 set_fs(get_ds());
92755- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
92756+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
92757 set_fs(oldfs);
92758 return res;
92759 }
92760diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
92761index 7cfc456..e726868 100644
92762--- a/net/ipv4/ipip.c
92763+++ b/net/ipv4/ipip.c
92764@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
92765 static int ipip_net_id __read_mostly;
92766
92767 static int ipip_tunnel_init(struct net_device *dev);
92768-static struct rtnl_link_ops ipip_link_ops __read_mostly;
92769+static struct rtnl_link_ops ipip_link_ops;
92770
92771 static int ipip_err(struct sk_buff *skb, u32 info)
92772 {
92773@@ -406,7 +406,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
92774 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
92775 };
92776
92777-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
92778+static struct rtnl_link_ops ipip_link_ops = {
92779 .kind = "ipip",
92780 .maxtype = IFLA_IPTUN_MAX,
92781 .policy = ipip_policy,
92782diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
92783index 85a4f21..1beb1f5 100644
92784--- a/net/ipv4/netfilter/arp_tables.c
92785+++ b/net/ipv4/netfilter/arp_tables.c
92786@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
92787 #endif
92788
92789 static int get_info(struct net *net, void __user *user,
92790- const int *len, int compat)
92791+ int len, int compat)
92792 {
92793 char name[XT_TABLE_MAXNAMELEN];
92794 struct xt_table *t;
92795 int ret;
92796
92797- if (*len != sizeof(struct arpt_getinfo)) {
92798- duprintf("length %u != %Zu\n", *len,
92799+ if (len != sizeof(struct arpt_getinfo)) {
92800+ duprintf("length %u != %Zu\n", len,
92801 sizeof(struct arpt_getinfo));
92802 return -EINVAL;
92803 }
92804@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
92805 info.size = private->size;
92806 strcpy(info.name, name);
92807
92808- if (copy_to_user(user, &info, *len) != 0)
92809+ if (copy_to_user(user, &info, len) != 0)
92810 ret = -EFAULT;
92811 else
92812 ret = 0;
92813@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
92814
92815 switch (cmd) {
92816 case ARPT_SO_GET_INFO:
92817- ret = get_info(sock_net(sk), user, len, 1);
92818+ ret = get_info(sock_net(sk), user, *len, 1);
92819 break;
92820 case ARPT_SO_GET_ENTRIES:
92821 ret = compat_get_entries(sock_net(sk), user, len);
92822@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
92823
92824 switch (cmd) {
92825 case ARPT_SO_GET_INFO:
92826- ret = get_info(sock_net(sk), user, len, 0);
92827+ ret = get_info(sock_net(sk), user, *len, 0);
92828 break;
92829
92830 case ARPT_SO_GET_ENTRIES:
92831diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
92832index d23118d..6ad7277 100644
92833--- a/net/ipv4/netfilter/ip_tables.c
92834+++ b/net/ipv4/netfilter/ip_tables.c
92835@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
92836 #endif
92837
92838 static int get_info(struct net *net, void __user *user,
92839- const int *len, int compat)
92840+ int len, int compat)
92841 {
92842 char name[XT_TABLE_MAXNAMELEN];
92843 struct xt_table *t;
92844 int ret;
92845
92846- if (*len != sizeof(struct ipt_getinfo)) {
92847- duprintf("length %u != %zu\n", *len,
92848+ if (len != sizeof(struct ipt_getinfo)) {
92849+ duprintf("length %u != %zu\n", len,
92850 sizeof(struct ipt_getinfo));
92851 return -EINVAL;
92852 }
92853@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
92854 info.size = private->size;
92855 strcpy(info.name, name);
92856
92857- if (copy_to_user(user, &info, *len) != 0)
92858+ if (copy_to_user(user, &info, len) != 0)
92859 ret = -EFAULT;
92860 else
92861 ret = 0;
92862@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92863
92864 switch (cmd) {
92865 case IPT_SO_GET_INFO:
92866- ret = get_info(sock_net(sk), user, len, 1);
92867+ ret = get_info(sock_net(sk), user, *len, 1);
92868 break;
92869 case IPT_SO_GET_ENTRIES:
92870 ret = compat_get_entries(sock_net(sk), user, len);
92871@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92872
92873 switch (cmd) {
92874 case IPT_SO_GET_INFO:
92875- ret = get_info(sock_net(sk), user, len, 0);
92876+ ret = get_info(sock_net(sk), user, *len, 0);
92877 break;
92878
92879 case IPT_SO_GET_ENTRIES:
92880diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
92881index 7d93d62..cbbf2a3 100644
92882--- a/net/ipv4/ping.c
92883+++ b/net/ipv4/ping.c
92884@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
92885 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
92886 0, sock_i_ino(sp),
92887 atomic_read(&sp->sk_refcnt), sp,
92888- atomic_read(&sp->sk_drops), len);
92889+ atomic_read_unchecked(&sp->sk_drops), len);
92890 }
92891
92892 static int ping_seq_show(struct seq_file *seq, void *v)
92893diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
92894index dd44e0a..06dcca4 100644
92895--- a/net/ipv4/raw.c
92896+++ b/net/ipv4/raw.c
92897@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
92898 int raw_rcv(struct sock *sk, struct sk_buff *skb)
92899 {
92900 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
92901- atomic_inc(&sk->sk_drops);
92902+ atomic_inc_unchecked(&sk->sk_drops);
92903 kfree_skb(skb);
92904 return NET_RX_DROP;
92905 }
92906@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
92907
92908 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
92909 {
92910+ struct icmp_filter filter;
92911+
92912 if (optlen > sizeof(struct icmp_filter))
92913 optlen = sizeof(struct icmp_filter);
92914- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
92915+ if (copy_from_user(&filter, optval, optlen))
92916 return -EFAULT;
92917+ raw_sk(sk)->filter = filter;
92918 return 0;
92919 }
92920
92921 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
92922 {
92923 int len, ret = -EFAULT;
92924+ struct icmp_filter filter;
92925
92926 if (get_user(len, optlen))
92927 goto out;
92928@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
92929 if (len > sizeof(struct icmp_filter))
92930 len = sizeof(struct icmp_filter);
92931 ret = -EFAULT;
92932- if (put_user(len, optlen) ||
92933- copy_to_user(optval, &raw_sk(sk)->filter, len))
92934+ filter = raw_sk(sk)->filter;
92935+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
92936 goto out;
92937 ret = 0;
92938 out: return ret;
92939@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
92940 0, 0L, 0,
92941 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
92942 0, sock_i_ino(sp),
92943- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
92944+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
92945 }
92946
92947 static int raw_seq_show(struct seq_file *seq, void *v)
92948diff --git a/net/ipv4/route.c b/net/ipv4/route.c
92949index d35bbf0..faa3ab8 100644
92950--- a/net/ipv4/route.c
92951+++ b/net/ipv4/route.c
92952@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
92953 .maxlen = sizeof(int),
92954 .mode = 0200,
92955 .proc_handler = ipv4_sysctl_rtcache_flush,
92956+ .extra1 = &init_net,
92957 },
92958 { },
92959 };
92960
92961 static __net_init int sysctl_route_net_init(struct net *net)
92962 {
92963- struct ctl_table *tbl;
92964+ ctl_table_no_const *tbl = NULL;
92965
92966- tbl = ipv4_route_flush_table;
92967 if (!net_eq(net, &init_net)) {
92968- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
92969+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
92970 if (tbl == NULL)
92971 goto err_dup;
92972
92973 /* Don't export sysctls to unprivileged users */
92974 if (net->user_ns != &init_user_ns)
92975 tbl[0].procname = NULL;
92976- }
92977- tbl[0].extra1 = net;
92978+ tbl[0].extra1 = net;
92979+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
92980+ } else
92981+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
92982
92983- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
92984 if (net->ipv4.route_hdr == NULL)
92985 goto err_reg;
92986 return 0;
92987
92988 err_reg:
92989- if (tbl != ipv4_route_flush_table)
92990- kfree(tbl);
92991+ kfree(tbl);
92992 err_dup:
92993 return -ENOMEM;
92994 }
92995@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
92996
92997 static __net_init int rt_genid_init(struct net *net)
92998 {
92999- atomic_set(&net->rt_genid, 0);
93000+ atomic_set_unchecked(&net->rt_genid, 0);
93001 get_random_bytes(&net->ipv4.dev_addr_genid,
93002 sizeof(net->ipv4.dev_addr_genid));
93003 return 0;
93004diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
93005index 3f25e75..3ae0f4d 100644
93006--- a/net/ipv4/sysctl_net_ipv4.c
93007+++ b/net/ipv4/sysctl_net_ipv4.c
93008@@ -57,7 +57,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
93009 {
93010 int ret;
93011 int range[2];
93012- ctl_table tmp = {
93013+ ctl_table_no_const tmp = {
93014 .data = &range,
93015 .maxlen = sizeof(range),
93016 .mode = table->mode,
93017@@ -110,7 +110,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
93018 int ret;
93019 gid_t urange[2];
93020 kgid_t low, high;
93021- ctl_table tmp = {
93022+ ctl_table_no_const tmp = {
93023 .data = &urange,
93024 .maxlen = sizeof(urange),
93025 .mode = table->mode,
93026@@ -141,7 +141,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
93027 void __user *buffer, size_t *lenp, loff_t *ppos)
93028 {
93029 char val[TCP_CA_NAME_MAX];
93030- ctl_table tbl = {
93031+ ctl_table_no_const tbl = {
93032 .data = val,
93033 .maxlen = TCP_CA_NAME_MAX,
93034 };
93035@@ -160,7 +160,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
93036 void __user *buffer, size_t *lenp,
93037 loff_t *ppos)
93038 {
93039- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
93040+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
93041 int ret;
93042
93043 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
93044@@ -177,7 +177,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
93045 void __user *buffer, size_t *lenp,
93046 loff_t *ppos)
93047 {
93048- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
93049+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
93050 int ret;
93051
93052 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
93053@@ -203,15 +203,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
93054 struct mem_cgroup *memcg;
93055 #endif
93056
93057- ctl_table tmp = {
93058+ ctl_table_no_const tmp = {
93059 .data = &vec,
93060 .maxlen = sizeof(vec),
93061 .mode = ctl->mode,
93062 };
93063
93064 if (!write) {
93065- ctl->data = &net->ipv4.sysctl_tcp_mem;
93066- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
93067+ ctl_table_no_const tcp_mem = *ctl;
93068+
93069+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
93070+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
93071 }
93072
93073 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
93074@@ -238,7 +240,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
93075 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
93076 size_t *lenp, loff_t *ppos)
93077 {
93078- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
93079+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
93080 struct tcp_fastopen_context *ctxt;
93081 int ret;
93082 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
93083@@ -481,7 +483,7 @@ static struct ctl_table ipv4_table[] = {
93084 },
93085 {
93086 .procname = "ip_local_reserved_ports",
93087- .data = NULL, /* initialized in sysctl_ipv4_init */
93088+ .data = sysctl_local_reserved_ports,
93089 .maxlen = 65536,
93090 .mode = 0644,
93091 .proc_handler = proc_do_large_bitmap,
93092@@ -846,11 +848,10 @@ static struct ctl_table ipv4_net_table[] = {
93093
93094 static __net_init int ipv4_sysctl_init_net(struct net *net)
93095 {
93096- struct ctl_table *table;
93097+ ctl_table_no_const *table = NULL;
93098
93099- table = ipv4_net_table;
93100 if (!net_eq(net, &init_net)) {
93101- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
93102+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
93103 if (table == NULL)
93104 goto err_alloc;
93105
93106@@ -885,15 +886,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
93107
93108 tcp_init_mem(net);
93109
93110- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
93111+ if (!net_eq(net, &init_net))
93112+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
93113+ else
93114+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
93115 if (net->ipv4.ipv4_hdr == NULL)
93116 goto err_reg;
93117
93118 return 0;
93119
93120 err_reg:
93121- if (!net_eq(net, &init_net))
93122- kfree(table);
93123+ kfree(table);
93124 err_alloc:
93125 return -ENOMEM;
93126 }
93127@@ -915,16 +918,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
93128 static __init int sysctl_ipv4_init(void)
93129 {
93130 struct ctl_table_header *hdr;
93131- struct ctl_table *i;
93132-
93133- for (i = ipv4_table; i->procname; i++) {
93134- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
93135- i->data = sysctl_local_reserved_ports;
93136- break;
93137- }
93138- }
93139- if (!i->procname)
93140- return -EINVAL;
93141
93142 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
93143 if (hdr == NULL)
93144diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
93145index 9c62257..651cc27 100644
93146--- a/net/ipv4/tcp_input.c
93147+++ b/net/ipv4/tcp_input.c
93148@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
93149 * simplifies code)
93150 */
93151 static void
93152-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
93153+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
93154 struct sk_buff *head, struct sk_buff *tail,
93155 u32 start, u32 end)
93156 {
93157@@ -5522,6 +5522,7 @@ discard:
93158 tcp_paws_reject(&tp->rx_opt, 0))
93159 goto discard_and_undo;
93160
93161+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
93162 if (th->syn) {
93163 /* We see SYN without ACK. It is attempt of
93164 * simultaneous connect with crossed SYNs.
93165@@ -5572,6 +5573,7 @@ discard:
93166 goto discard;
93167 #endif
93168 }
93169+#endif
93170 /* "fifth, if neither of the SYN or RST bits is set then
93171 * drop the segment and return."
93172 */
93173@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
93174 goto discard;
93175
93176 if (th->syn) {
93177- if (th->fin)
93178+ if (th->fin || th->urg || th->psh)
93179 goto discard;
93180 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
93181 return 1;
93182diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
93183index 7999fc5..c812f42 100644
93184--- a/net/ipv4/tcp_ipv4.c
93185+++ b/net/ipv4/tcp_ipv4.c
93186@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
93187 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93188
93189
93190+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93191+extern int grsec_enable_blackhole;
93192+#endif
93193+
93194 #ifdef CONFIG_TCP_MD5SIG
93195 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
93196 __be32 daddr, __be32 saddr, const struct tcphdr *th);
93197@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
93198 return 0;
93199
93200 reset:
93201+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93202+ if (!grsec_enable_blackhole)
93203+#endif
93204 tcp_v4_send_reset(rsk, skb);
93205 discard:
93206 kfree_skb(skb);
93207@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
93208 TCP_SKB_CB(skb)->sacked = 0;
93209
93210 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
93211- if (!sk)
93212+ if (!sk) {
93213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93214+ ret = 1;
93215+#endif
93216 goto no_tcp_socket;
93217-
93218+ }
93219 process:
93220- if (sk->sk_state == TCP_TIME_WAIT)
93221+ if (sk->sk_state == TCP_TIME_WAIT) {
93222+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93223+ ret = 2;
93224+#endif
93225 goto do_time_wait;
93226+ }
93227
93228 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
93229 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
93230@@ -2058,6 +2072,10 @@ csum_error:
93231 bad_packet:
93232 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
93233 } else {
93234+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93235+ if (!grsec_enable_blackhole || (ret == 1 &&
93236+ (skb->dev->flags & IFF_LOOPBACK)))
93237+#endif
93238 tcp_v4_send_reset(NULL, skb);
93239 }
93240
93241diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
93242index 0f01788..d52a859 100644
93243--- a/net/ipv4/tcp_minisocks.c
93244+++ b/net/ipv4/tcp_minisocks.c
93245@@ -27,6 +27,10 @@
93246 #include <net/inet_common.h>
93247 #include <net/xfrm.h>
93248
93249+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93250+extern int grsec_enable_blackhole;
93251+#endif
93252+
93253 int sysctl_tcp_syncookies __read_mostly = 1;
93254 EXPORT_SYMBOL(sysctl_tcp_syncookies);
93255
93256@@ -717,7 +721,10 @@ embryonic_reset:
93257 * avoid becoming vulnerable to outside attack aiming at
93258 * resetting legit local connections.
93259 */
93260- req->rsk_ops->send_reset(sk, skb);
93261+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93262+ if (!grsec_enable_blackhole)
93263+#endif
93264+ req->rsk_ops->send_reset(sk, skb);
93265 } else if (fastopen) { /* received a valid RST pkt */
93266 reqsk_fastopen_remove(sk, req, true);
93267 tcp_reset(sk);
93268diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
93269index d4943f6..e7a74a5 100644
93270--- a/net/ipv4/tcp_probe.c
93271+++ b/net/ipv4/tcp_probe.c
93272@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
93273 if (cnt + width >= len)
93274 break;
93275
93276- if (copy_to_user(buf + cnt, tbuf, width))
93277+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
93278 return -EFAULT;
93279 cnt += width;
93280 }
93281diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
93282index 4b85e6f..22f9ac9 100644
93283--- a/net/ipv4/tcp_timer.c
93284+++ b/net/ipv4/tcp_timer.c
93285@@ -22,6 +22,10 @@
93286 #include <linux/gfp.h>
93287 #include <net/tcp.h>
93288
93289+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93290+extern int grsec_lastack_retries;
93291+#endif
93292+
93293 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
93294 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
93295 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
93296@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
93297 }
93298 }
93299
93300+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93301+ if ((sk->sk_state == TCP_LAST_ACK) &&
93302+ (grsec_lastack_retries > 0) &&
93303+ (grsec_lastack_retries < retry_until))
93304+ retry_until = grsec_lastack_retries;
93305+#endif
93306+
93307 if (retransmits_timed_out(sk, retry_until,
93308 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
93309 /* Has it gone just too far? */
93310diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
93311index 93b731d..5a2dd92 100644
93312--- a/net/ipv4/udp.c
93313+++ b/net/ipv4/udp.c
93314@@ -87,6 +87,7 @@
93315 #include <linux/types.h>
93316 #include <linux/fcntl.h>
93317 #include <linux/module.h>
93318+#include <linux/security.h>
93319 #include <linux/socket.h>
93320 #include <linux/sockios.h>
93321 #include <linux/igmp.h>
93322@@ -111,6 +112,10 @@
93323 #include <trace/events/skb.h>
93324 #include "udp_impl.h"
93325
93326+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93327+extern int grsec_enable_blackhole;
93328+#endif
93329+
93330 struct udp_table udp_table __read_mostly;
93331 EXPORT_SYMBOL(udp_table);
93332
93333@@ -594,6 +599,9 @@ found:
93334 return s;
93335 }
93336
93337+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
93338+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
93339+
93340 /*
93341 * This routine is called by the ICMP module when it gets some
93342 * sort of error condition. If err < 0 then the socket should
93343@@ -890,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
93344 dport = usin->sin_port;
93345 if (dport == 0)
93346 return -EINVAL;
93347+
93348+ err = gr_search_udp_sendmsg(sk, usin);
93349+ if (err)
93350+ return err;
93351 } else {
93352 if (sk->sk_state != TCP_ESTABLISHED)
93353 return -EDESTADDRREQ;
93354+
93355+ err = gr_search_udp_sendmsg(sk, NULL);
93356+ if (err)
93357+ return err;
93358+
93359 daddr = inet->inet_daddr;
93360 dport = inet->inet_dport;
93361 /* Open fast path for connected socket.
93362@@ -1136,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
93363 IS_UDPLITE(sk));
93364 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93365 IS_UDPLITE(sk));
93366- atomic_inc(&sk->sk_drops);
93367+ atomic_inc_unchecked(&sk->sk_drops);
93368 __skb_unlink(skb, rcvq);
93369 __skb_queue_tail(&list_kill, skb);
93370 }
93371@@ -1222,6 +1239,10 @@ try_again:
93372 if (!skb)
93373 goto out;
93374
93375+ err = gr_search_udp_recvmsg(sk, skb);
93376+ if (err)
93377+ goto out_free;
93378+
93379 ulen = skb->len - sizeof(struct udphdr);
93380 copied = len;
93381 if (copied > ulen)
93382@@ -1255,7 +1276,7 @@ try_again:
93383 if (unlikely(err)) {
93384 trace_kfree_skb(skb, udp_recvmsg);
93385 if (!peeked) {
93386- atomic_inc(&sk->sk_drops);
93387+ atomic_inc_unchecked(&sk->sk_drops);
93388 UDP_INC_STATS_USER(sock_net(sk),
93389 UDP_MIB_INERRORS, is_udplite);
93390 }
93391@@ -1542,7 +1563,7 @@ csum_error:
93392 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
93393 drop:
93394 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
93395- atomic_inc(&sk->sk_drops);
93396+ atomic_inc_unchecked(&sk->sk_drops);
93397 kfree_skb(skb);
93398 return -1;
93399 }
93400@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
93401 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
93402
93403 if (!skb1) {
93404- atomic_inc(&sk->sk_drops);
93405+ atomic_inc_unchecked(&sk->sk_drops);
93406 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
93407 IS_UDPLITE(sk));
93408 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93409@@ -1730,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
93410 goto csum_error;
93411
93412 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
93413+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93414+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
93415+#endif
93416 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
93417
93418 /*
93419@@ -2160,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
93420 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
93421 0, sock_i_ino(sp),
93422 atomic_read(&sp->sk_refcnt), sp,
93423- atomic_read(&sp->sk_drops), len);
93424+ atomic_read_unchecked(&sp->sk_drops), len);
93425 }
93426
93427 int udp4_seq_show(struct seq_file *seq, void *v)
93428diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
93429index 9a459be..086b866 100644
93430--- a/net/ipv4/xfrm4_policy.c
93431+++ b/net/ipv4/xfrm4_policy.c
93432@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
93433
93434 static int __net_init xfrm4_net_init(struct net *net)
93435 {
93436- struct ctl_table *table;
93437+ ctl_table_no_const *table = NULL;
93438 struct ctl_table_header *hdr;
93439
93440- table = xfrm4_policy_table;
93441 if (!net_eq(net, &init_net)) {
93442- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
93443+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
93444 if (!table)
93445 goto err_alloc;
93446
93447 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
93448- }
93449-
93450- hdr = register_net_sysctl(net, "net/ipv4", table);
93451+ hdr = register_net_sysctl(net, "net/ipv4", table);
93452+ } else
93453+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
93454 if (!hdr)
93455 goto err_reg;
93456
93457@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
93458 return 0;
93459
93460 err_reg:
93461- if (!net_eq(net, &init_net))
93462- kfree(table);
93463+ kfree(table);
93464 err_alloc:
93465 return -ENOMEM;
93466 }
93467diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
93468index fb8c94c..80a31d8 100644
93469--- a/net/ipv6/addrconf.c
93470+++ b/net/ipv6/addrconf.c
93471@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
93472 idx = 0;
93473 head = &net->dev_index_head[h];
93474 rcu_read_lock();
93475- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
93476+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
93477 net->dev_base_seq;
93478 hlist_for_each_entry_rcu(dev, head, index_hlist) {
93479 if (idx < s_idx)
93480@@ -1124,12 +1124,10 @@ retry:
93481 if (ifp->flags & IFA_F_OPTIMISTIC)
93482 addr_flags |= IFA_F_OPTIMISTIC;
93483
93484- ift = !max_addresses ||
93485- ipv6_count_addresses(idev) < max_addresses ?
93486- ipv6_add_addr(idev, &addr, tmp_plen,
93487- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
93488- addr_flags) : NULL;
93489- if (IS_ERR_OR_NULL(ift)) {
93490+ ift = ipv6_add_addr(idev, &addr, tmp_plen,
93491+ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
93492+ addr_flags);
93493+ if (IS_ERR(ift)) {
93494 in6_ifa_put(ifp);
93495 in6_dev_put(idev);
93496 pr_info("%s: retry temporary address regeneration\n", __func__);
93497@@ -2380,7 +2378,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
93498 p.iph.ihl = 5;
93499 p.iph.protocol = IPPROTO_IPV6;
93500 p.iph.ttl = 64;
93501- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
93502+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
93503
93504 if (ops->ndo_do_ioctl) {
93505 mm_segment_t oldfs = get_fs();
93506@@ -4002,7 +4000,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
93507 s_ip_idx = ip_idx = cb->args[2];
93508
93509 rcu_read_lock();
93510- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
93511+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
93512 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
93513 idx = 0;
93514 head = &net->dev_index_head[h];
93515@@ -4587,7 +4585,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
93516 dst_free(&ifp->rt->dst);
93517 break;
93518 }
93519- atomic_inc(&net->ipv6.dev_addr_genid);
93520+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
93521 }
93522
93523 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
93524@@ -4607,7 +4605,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
93525 int *valp = ctl->data;
93526 int val = *valp;
93527 loff_t pos = *ppos;
93528- ctl_table lctl;
93529+ ctl_table_no_const lctl;
93530 int ret;
93531
93532 /*
93533@@ -4689,7 +4687,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
93534 int *valp = ctl->data;
93535 int val = *valp;
93536 loff_t pos = *ppos;
93537- ctl_table lctl;
93538+ ctl_table_no_const lctl;
93539 int ret;
93540
93541 /*
93542diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
93543index 40ffd72..aeac0dc 100644
93544--- a/net/ipv6/esp6.c
93545+++ b/net/ipv6/esp6.c
93546@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
93547 net_adj = 0;
93548
93549 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
93550- net_adj) & ~(align - 1)) + (net_adj - 2);
93551+ net_adj) & ~(align - 1)) + net_adj - 2;
93552 }
93553
93554 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
93555diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
93556index b4ff0a4..db9b764 100644
93557--- a/net/ipv6/icmp.c
93558+++ b/net/ipv6/icmp.c
93559@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
93560
93561 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
93562 {
93563- struct ctl_table *table;
93564+ ctl_table_no_const *table;
93565
93566 table = kmemdup(ipv6_icmp_table_template,
93567 sizeof(ipv6_icmp_table_template),
93568diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
93569index ecd6073..58162ae 100644
93570--- a/net/ipv6/ip6_gre.c
93571+++ b/net/ipv6/ip6_gre.c
93572@@ -74,7 +74,7 @@ struct ip6gre_net {
93573 struct net_device *fb_tunnel_dev;
93574 };
93575
93576-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
93577+static struct rtnl_link_ops ip6gre_link_ops;
93578 static int ip6gre_tunnel_init(struct net_device *dev);
93579 static void ip6gre_tunnel_setup(struct net_device *dev);
93580 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
93581@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
93582 }
93583
93584
93585-static struct inet6_protocol ip6gre_protocol __read_mostly = {
93586+static struct inet6_protocol ip6gre_protocol = {
93587 .handler = ip6gre_rcv,
93588 .err_handler = ip6gre_err,
93589 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
93590@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
93591 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
93592 };
93593
93594-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
93595+static struct rtnl_link_ops ip6gre_link_ops = {
93596 .kind = "ip6gre",
93597 .maxtype = IFLA_GRE_MAX,
93598 .policy = ip6gre_policy,
93599@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
93600 .fill_info = ip6gre_fill_info,
93601 };
93602
93603-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
93604+static struct rtnl_link_ops ip6gre_tap_ops = {
93605 .kind = "ip6gretap",
93606 .maxtype = IFLA_GRE_MAX,
93607 .policy = ip6gre_policy,
93608diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
93609index 1e55866..b398dab 100644
93610--- a/net/ipv6/ip6_tunnel.c
93611+++ b/net/ipv6/ip6_tunnel.c
93612@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
93613
93614 static int ip6_tnl_dev_init(struct net_device *dev);
93615 static void ip6_tnl_dev_setup(struct net_device *dev);
93616-static struct rtnl_link_ops ip6_link_ops __read_mostly;
93617+static struct rtnl_link_ops ip6_link_ops;
93618
93619 static int ip6_tnl_net_id __read_mostly;
93620 struct ip6_tnl_net {
93621@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
93622 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
93623 };
93624
93625-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
93626+static struct rtnl_link_ops ip6_link_ops = {
93627 .kind = "ip6tnl",
93628 .maxtype = IFLA_IPTUN_MAX,
93629 .policy = ip6_tnl_policy,
93630diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
93631index d1e2e8e..51c19ae 100644
93632--- a/net/ipv6/ipv6_sockglue.c
93633+++ b/net/ipv6/ipv6_sockglue.c
93634@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
93635 if (sk->sk_type != SOCK_STREAM)
93636 return -ENOPROTOOPT;
93637
93638- msg.msg_control = optval;
93639+ msg.msg_control = (void __force_kernel *)optval;
93640 msg.msg_controllen = len;
93641 msg.msg_flags = flags;
93642
93643diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
93644index 44400c2..8e11f52 100644
93645--- a/net/ipv6/netfilter/ip6_tables.c
93646+++ b/net/ipv6/netfilter/ip6_tables.c
93647@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
93648 #endif
93649
93650 static int get_info(struct net *net, void __user *user,
93651- const int *len, int compat)
93652+ int len, int compat)
93653 {
93654 char name[XT_TABLE_MAXNAMELEN];
93655 struct xt_table *t;
93656 int ret;
93657
93658- if (*len != sizeof(struct ip6t_getinfo)) {
93659- duprintf("length %u != %zu\n", *len,
93660+ if (len != sizeof(struct ip6t_getinfo)) {
93661+ duprintf("length %u != %zu\n", len,
93662 sizeof(struct ip6t_getinfo));
93663 return -EINVAL;
93664 }
93665@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
93666 info.size = private->size;
93667 strcpy(info.name, name);
93668
93669- if (copy_to_user(user, &info, *len) != 0)
93670+ if (copy_to_user(user, &info, len) != 0)
93671 ret = -EFAULT;
93672 else
93673 ret = 0;
93674@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93675
93676 switch (cmd) {
93677 case IP6T_SO_GET_INFO:
93678- ret = get_info(sock_net(sk), user, len, 1);
93679+ ret = get_info(sock_net(sk), user, *len, 1);
93680 break;
93681 case IP6T_SO_GET_ENTRIES:
93682 ret = compat_get_entries(sock_net(sk), user, len);
93683@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93684
93685 switch (cmd) {
93686 case IP6T_SO_GET_INFO:
93687- ret = get_info(sock_net(sk), user, len, 0);
93688+ ret = get_info(sock_net(sk), user, *len, 0);
93689 break;
93690
93691 case IP6T_SO_GET_ENTRIES:
93692diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
93693index dffdc1a..ccc6678 100644
93694--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
93695+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
93696@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
93697
93698 static int nf_ct_frag6_sysctl_register(struct net *net)
93699 {
93700- struct ctl_table *table;
93701+ ctl_table_no_const *table = NULL;
93702 struct ctl_table_header *hdr;
93703
93704- table = nf_ct_frag6_sysctl_table;
93705 if (!net_eq(net, &init_net)) {
93706- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
93707+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
93708 GFP_KERNEL);
93709 if (table == NULL)
93710 goto err_alloc;
93711@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
93712 table[0].data = &net->nf_frag.frags.timeout;
93713 table[1].data = &net->nf_frag.frags.low_thresh;
93714 table[2].data = &net->nf_frag.frags.high_thresh;
93715- }
93716-
93717- hdr = register_net_sysctl(net, "net/netfilter", table);
93718+ hdr = register_net_sysctl(net, "net/netfilter", table);
93719+ } else
93720+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
93721 if (hdr == NULL)
93722 goto err_reg;
93723
93724@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
93725 return 0;
93726
93727 err_reg:
93728- if (!net_eq(net, &init_net))
93729- kfree(table);
93730+ kfree(table);
93731 err_alloc:
93732 return -ENOMEM;
93733 }
93734diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
93735index eedff8c..7d7e24a 100644
93736--- a/net/ipv6/raw.c
93737+++ b/net/ipv6/raw.c
93738@@ -108,7 +108,7 @@ found:
93739 */
93740 static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
93741 {
93742- struct icmp6hdr *_hdr;
93743+ struct icmp6hdr _hdr;
93744 const struct icmp6hdr *hdr;
93745
93746 hdr = skb_header_pointer(skb, skb_transport_offset(skb),
93747@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
93748 {
93749 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
93750 skb_checksum_complete(skb)) {
93751- atomic_inc(&sk->sk_drops);
93752+ atomic_inc_unchecked(&sk->sk_drops);
93753 kfree_skb(skb);
93754 return NET_RX_DROP;
93755 }
93756@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
93757 struct raw6_sock *rp = raw6_sk(sk);
93758
93759 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
93760- atomic_inc(&sk->sk_drops);
93761+ atomic_inc_unchecked(&sk->sk_drops);
93762 kfree_skb(skb);
93763 return NET_RX_DROP;
93764 }
93765@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
93766
93767 if (inet->hdrincl) {
93768 if (skb_checksum_complete(skb)) {
93769- atomic_inc(&sk->sk_drops);
93770+ atomic_inc_unchecked(&sk->sk_drops);
93771 kfree_skb(skb);
93772 return NET_RX_DROP;
93773 }
93774@@ -602,7 +602,7 @@ out:
93775 return err;
93776 }
93777
93778-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
93779+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
93780 struct flowi6 *fl6, struct dst_entry **dstp,
93781 unsigned int flags)
93782 {
93783@@ -914,12 +914,15 @@ do_confirm:
93784 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
93785 char __user *optval, int optlen)
93786 {
93787+ struct icmp6_filter filter;
93788+
93789 switch (optname) {
93790 case ICMPV6_FILTER:
93791 if (optlen > sizeof(struct icmp6_filter))
93792 optlen = sizeof(struct icmp6_filter);
93793- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
93794+ if (copy_from_user(&filter, optval, optlen))
93795 return -EFAULT;
93796+ raw6_sk(sk)->filter = filter;
93797 return 0;
93798 default:
93799 return -ENOPROTOOPT;
93800@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
93801 char __user *optval, int __user *optlen)
93802 {
93803 int len;
93804+ struct icmp6_filter filter;
93805
93806 switch (optname) {
93807 case ICMPV6_FILTER:
93808@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
93809 len = sizeof(struct icmp6_filter);
93810 if (put_user(len, optlen))
93811 return -EFAULT;
93812- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
93813+ filter = raw6_sk(sk)->filter;
93814+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
93815 return -EFAULT;
93816 return 0;
93817 default:
93818@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
93819 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
93820 0,
93821 sock_i_ino(sp),
93822- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
93823+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
93824 }
93825
93826 static int raw6_seq_show(struct seq_file *seq, void *v)
93827diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
93828index 790d9f4..68ae078 100644
93829--- a/net/ipv6/reassembly.c
93830+++ b/net/ipv6/reassembly.c
93831@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
93832
93833 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93834 {
93835- struct ctl_table *table;
93836+ ctl_table_no_const *table = NULL;
93837 struct ctl_table_header *hdr;
93838
93839- table = ip6_frags_ns_ctl_table;
93840 if (!net_eq(net, &init_net)) {
93841- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
93842+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
93843 if (table == NULL)
93844 goto err_alloc;
93845
93846@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93847 /* Don't export sysctls to unprivileged users */
93848 if (net->user_ns != &init_user_ns)
93849 table[0].procname = NULL;
93850- }
93851+ hdr = register_net_sysctl(net, "net/ipv6", table);
93852+ } else
93853+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
93854
93855- hdr = register_net_sysctl(net, "net/ipv6", table);
93856 if (hdr == NULL)
93857 goto err_reg;
93858
93859@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93860 return 0;
93861
93862 err_reg:
93863- if (!net_eq(net, &init_net))
93864- kfree(table);
93865+ kfree(table);
93866 err_alloc:
93867 return -ENOMEM;
93868 }
93869diff --git a/net/ipv6/route.c b/net/ipv6/route.c
93870index bacce6c..9d1741a 100644
93871--- a/net/ipv6/route.c
93872+++ b/net/ipv6/route.c
93873@@ -2903,7 +2903,7 @@ ctl_table ipv6_route_table_template[] = {
93874
93875 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
93876 {
93877- struct ctl_table *table;
93878+ ctl_table_no_const *table;
93879
93880 table = kmemdup(ipv6_route_table_template,
93881 sizeof(ipv6_route_table_template),
93882diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
93883index 60df36d..f3ab7c8 100644
93884--- a/net/ipv6/sit.c
93885+++ b/net/ipv6/sit.c
93886@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
93887 static void ipip6_dev_free(struct net_device *dev);
93888 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
93889 __be32 *v4dst);
93890-static struct rtnl_link_ops sit_link_ops __read_mostly;
93891+static struct rtnl_link_ops sit_link_ops;
93892
93893 static int sit_net_id __read_mostly;
93894 struct sit_net {
93895@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
93896 #endif
93897 };
93898
93899-static struct rtnl_link_ops sit_link_ops __read_mostly = {
93900+static struct rtnl_link_ops sit_link_ops = {
93901 .kind = "sit",
93902 .maxtype = IFLA_IPTUN_MAX,
93903 .policy = ipip6_policy,
93904diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
93905index e85c48b..b8268d3 100644
93906--- a/net/ipv6/sysctl_net_ipv6.c
93907+++ b/net/ipv6/sysctl_net_ipv6.c
93908@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
93909
93910 static int __net_init ipv6_sysctl_net_init(struct net *net)
93911 {
93912- struct ctl_table *ipv6_table;
93913+ ctl_table_no_const *ipv6_table;
93914 struct ctl_table *ipv6_route_table;
93915 struct ctl_table *ipv6_icmp_table;
93916 int err;
93917diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
93918index 0a17ed9..2526cc3 100644
93919--- a/net/ipv6/tcp_ipv6.c
93920+++ b/net/ipv6/tcp_ipv6.c
93921@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93922 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
93923 }
93924
93925+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93926+extern int grsec_enable_blackhole;
93927+#endif
93928+
93929 static void tcp_v6_hash(struct sock *sk)
93930 {
93931 if (sk->sk_state != TCP_CLOSE) {
93932@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
93933 return 0;
93934
93935 reset:
93936+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93937+ if (!grsec_enable_blackhole)
93938+#endif
93939 tcp_v6_send_reset(sk, skb);
93940 discard:
93941 if (opt_skb)
93942@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
93943 TCP_SKB_CB(skb)->sacked = 0;
93944
93945 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
93946- if (!sk)
93947+ if (!sk) {
93948+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93949+ ret = 1;
93950+#endif
93951 goto no_tcp_socket;
93952+ }
93953
93954 process:
93955- if (sk->sk_state == TCP_TIME_WAIT)
93956+ if (sk->sk_state == TCP_TIME_WAIT) {
93957+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93958+ ret = 2;
93959+#endif
93960 goto do_time_wait;
93961+ }
93962
93963 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
93964 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
93965@@ -1536,6 +1551,10 @@ csum_error:
93966 bad_packet:
93967 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
93968 } else {
93969+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93970+ if (!grsec_enable_blackhole || (ret == 1 &&
93971+ (skb->dev->flags & IFF_LOOPBACK)))
93972+#endif
93973 tcp_v6_send_reset(NULL, skb);
93974 }
93975
93976diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
93977index e7b28f9..d09c290 100644
93978--- a/net/ipv6/udp.c
93979+++ b/net/ipv6/udp.c
93980@@ -52,6 +52,10 @@
93981 #include <trace/events/skb.h>
93982 #include "udp_impl.h"
93983
93984+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93985+extern int grsec_enable_blackhole;
93986+#endif
93987+
93988 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
93989 {
93990 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
93991@@ -419,7 +423,7 @@ try_again:
93992 if (unlikely(err)) {
93993 trace_kfree_skb(skb, udpv6_recvmsg);
93994 if (!peeked) {
93995- atomic_inc(&sk->sk_drops);
93996+ atomic_inc_unchecked(&sk->sk_drops);
93997 if (is_udp4)
93998 UDP_INC_STATS_USER(sock_net(sk),
93999 UDP_MIB_INERRORS,
94000@@ -665,7 +669,7 @@ csum_error:
94001 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
94002 drop:
94003 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
94004- atomic_inc(&sk->sk_drops);
94005+ atomic_inc_unchecked(&sk->sk_drops);
94006 kfree_skb(skb);
94007 return -1;
94008 }
94009@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
94010 if (likely(skb1 == NULL))
94011 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
94012 if (!skb1) {
94013- atomic_inc(&sk->sk_drops);
94014+ atomic_inc_unchecked(&sk->sk_drops);
94015 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
94016 IS_UDPLITE(sk));
94017 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
94018@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
94019 goto csum_error;
94020
94021 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
94022+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
94023+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
94024+#endif
94025 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
94026
94027 kfree_skb(skb);
94028@@ -1392,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
94029 0,
94030 sock_i_ino(sp),
94031 atomic_read(&sp->sk_refcnt), sp,
94032- atomic_read(&sp->sk_drops));
94033+ atomic_read_unchecked(&sp->sk_drops));
94034 }
94035
94036 int udp6_seq_show(struct seq_file *seq, void *v)
94037diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
94038index 23ed03d..465a71d 100644
94039--- a/net/ipv6/xfrm6_policy.c
94040+++ b/net/ipv6/xfrm6_policy.c
94041@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
94042
94043 static int __net_init xfrm6_net_init(struct net *net)
94044 {
94045- struct ctl_table *table;
94046+ ctl_table_no_const *table = NULL;
94047 struct ctl_table_header *hdr;
94048
94049- table = xfrm6_policy_table;
94050 if (!net_eq(net, &init_net)) {
94051- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
94052+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
94053 if (!table)
94054 goto err_alloc;
94055
94056 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
94057- }
94058+ hdr = register_net_sysctl(net, "net/ipv6", table);
94059+ } else
94060+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
94061
94062- hdr = register_net_sysctl(net, "net/ipv6", table);
94063 if (!hdr)
94064 goto err_reg;
94065
94066@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
94067 return 0;
94068
94069 err_reg:
94070- if (!net_eq(net, &init_net))
94071- kfree(table);
94072+ kfree(table);
94073 err_alloc:
94074 return -ENOMEM;
94075 }
94076diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
94077index 41ac7938..75e3bb1 100644
94078--- a/net/irda/ircomm/ircomm_tty.c
94079+++ b/net/irda/ircomm/ircomm_tty.c
94080@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
94081 add_wait_queue(&port->open_wait, &wait);
94082
94083 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
94084- __FILE__, __LINE__, tty->driver->name, port->count);
94085+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
94086
94087 spin_lock_irqsave(&port->lock, flags);
94088 if (!tty_hung_up_p(filp))
94089- port->count--;
94090+ atomic_dec(&port->count);
94091 port->blocked_open++;
94092 spin_unlock_irqrestore(&port->lock, flags);
94093
94094@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
94095 }
94096
94097 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
94098- __FILE__, __LINE__, tty->driver->name, port->count);
94099+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
94100
94101 schedule();
94102 }
94103@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
94104
94105 spin_lock_irqsave(&port->lock, flags);
94106 if (!tty_hung_up_p(filp))
94107- port->count++;
94108+ atomic_inc(&port->count);
94109 port->blocked_open--;
94110 spin_unlock_irqrestore(&port->lock, flags);
94111
94112 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
94113- __FILE__, __LINE__, tty->driver->name, port->count);
94114+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
94115
94116 if (!retval)
94117 port->flags |= ASYNC_NORMAL_ACTIVE;
94118@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
94119
94120 /* ++ is not atomic, so this should be protected - Jean II */
94121 spin_lock_irqsave(&self->port.lock, flags);
94122- self->port.count++;
94123+ atomic_inc(&self->port.count);
94124 spin_unlock_irqrestore(&self->port.lock, flags);
94125 tty_port_tty_set(&self->port, tty);
94126
94127 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
94128- self->line, self->port.count);
94129+ self->line, atomic_read(&self->port.count));
94130
94131 /* Not really used by us, but lets do it anyway */
94132 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
94133@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
94134 tty_kref_put(port->tty);
94135 }
94136 port->tty = NULL;
94137- port->count = 0;
94138+ atomic_set(&port->count, 0);
94139 spin_unlock_irqrestore(&port->lock, flags);
94140
94141 wake_up_interruptible(&port->open_wait);
94142@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
94143 seq_putc(m, '\n');
94144
94145 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
94146- seq_printf(m, "Open count: %d\n", self->port.count);
94147+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
94148 seq_printf(m, "Max data size: %d\n", self->max_data_size);
94149 seq_printf(m, "Max header size: %d\n", self->max_header_size);
94150
94151diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
94152index ae69165..c8b82d8 100644
94153--- a/net/iucv/af_iucv.c
94154+++ b/net/iucv/af_iucv.c
94155@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
94156
94157 write_lock_bh(&iucv_sk_list.lock);
94158
94159- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
94160+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
94161 while (__iucv_get_sock_by_name(name)) {
94162 sprintf(name, "%08x",
94163- atomic_inc_return(&iucv_sk_list.autobind_name));
94164+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
94165 }
94166
94167 write_unlock_bh(&iucv_sk_list.lock);
94168diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
94169index 4fe76ff..426a904 100644
94170--- a/net/iucv/iucv.c
94171+++ b/net/iucv/iucv.c
94172@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
94173 return NOTIFY_OK;
94174 }
94175
94176-static struct notifier_block __refdata iucv_cpu_notifier = {
94177+static struct notifier_block iucv_cpu_notifier = {
94178 .notifier_call = iucv_cpu_notify,
94179 };
94180
94181diff --git a/net/key/af_key.c b/net/key/af_key.c
94182index ab8bd2c..cd2d641 100644
94183--- a/net/key/af_key.c
94184+++ b/net/key/af_key.c
94185@@ -3048,10 +3048,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
94186 static u32 get_acqseq(void)
94187 {
94188 u32 res;
94189- static atomic_t acqseq;
94190+ static atomic_unchecked_t acqseq;
94191
94192 do {
94193- res = atomic_inc_return(&acqseq);
94194+ res = atomic_inc_return_unchecked(&acqseq);
94195 } while (!res);
94196 return res;
94197 }
94198diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
94199index ae36f8e..09d42ac 100644
94200--- a/net/mac80211/cfg.c
94201+++ b/net/mac80211/cfg.c
94202@@ -806,7 +806,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
94203 ret = ieee80211_vif_use_channel(sdata, chandef,
94204 IEEE80211_CHANCTX_EXCLUSIVE);
94205 }
94206- } else if (local->open_count == local->monitors) {
94207+ } else if (local_read(&local->open_count) == local->monitors) {
94208 local->_oper_chandef = *chandef;
94209 ieee80211_hw_config(local, 0);
94210 }
94211@@ -2922,7 +2922,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
94212 else
94213 local->probe_req_reg--;
94214
94215- if (!local->open_count)
94216+ if (!local_read(&local->open_count))
94217 break;
94218
94219 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
94220@@ -3385,8 +3385,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
94221 if (chanctx_conf) {
94222 *chandef = chanctx_conf->def;
94223 ret = 0;
94224- } else if (local->open_count > 0 &&
94225- local->open_count == local->monitors &&
94226+ } else if (local_read(&local->open_count) > 0 &&
94227+ local_read(&local->open_count) == local->monitors &&
94228 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
94229 if (local->use_chanctx)
94230 *chandef = local->monitor_chandef;
94231diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
94232index 9ca8e32..48e4a9b 100644
94233--- a/net/mac80211/ieee80211_i.h
94234+++ b/net/mac80211/ieee80211_i.h
94235@@ -28,6 +28,7 @@
94236 #include <net/ieee80211_radiotap.h>
94237 #include <net/cfg80211.h>
94238 #include <net/mac80211.h>
94239+#include <asm/local.h>
94240 #include "key.h"
94241 #include "sta_info.h"
94242 #include "debug.h"
94243@@ -891,7 +892,7 @@ struct ieee80211_local {
94244 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
94245 spinlock_t queue_stop_reason_lock;
94246
94247- int open_count;
94248+ local_t open_count;
94249 int monitors, cooked_mntrs;
94250 /* number of interfaces with corresponding FIF_ flags */
94251 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
94252diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
94253index 514e90f..56f22bf 100644
94254--- a/net/mac80211/iface.c
94255+++ b/net/mac80211/iface.c
94256@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94257 break;
94258 }
94259
94260- if (local->open_count == 0) {
94261+ if (local_read(&local->open_count) == 0) {
94262 res = drv_start(local);
94263 if (res)
94264 goto err_del_bss;
94265@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94266 break;
94267 }
94268
94269- if (local->monitors == 0 && local->open_count == 0) {
94270+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
94271 res = ieee80211_add_virtual_monitor(local);
94272 if (res)
94273 goto err_stop;
94274@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94275 atomic_inc(&local->iff_promiscs);
94276
94277 if (coming_up)
94278- local->open_count++;
94279+ local_inc(&local->open_count);
94280
94281 if (hw_reconf_flags)
94282 ieee80211_hw_config(local, hw_reconf_flags);
94283@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94284 err_del_interface:
94285 drv_remove_interface(local, sdata);
94286 err_stop:
94287- if (!local->open_count)
94288+ if (!local_read(&local->open_count))
94289 drv_stop(local);
94290 err_del_bss:
94291 sdata->bss = NULL;
94292@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94293 }
94294
94295 if (going_down)
94296- local->open_count--;
94297+ local_dec(&local->open_count);
94298
94299 switch (sdata->vif.type) {
94300 case NL80211_IFTYPE_AP_VLAN:
94301@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94302 }
94303 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
94304
94305- if (local->open_count == 0)
94306+ if (local_read(&local->open_count) == 0)
94307 ieee80211_clear_tx_pending(local);
94308
94309 /*
94310@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94311
94312 ieee80211_recalc_ps(local, -1);
94313
94314- if (local->open_count == 0) {
94315+ if (local_read(&local->open_count) == 0) {
94316 ieee80211_stop_device(local);
94317
94318 /* no reconfiguring after stop! */
94319@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94320 ieee80211_configure_filter(local);
94321 ieee80211_hw_config(local, hw_reconf_flags);
94322
94323- if (local->monitors == local->open_count)
94324+ if (local->monitors == local_read(&local->open_count))
94325 ieee80211_add_virtual_monitor(local);
94326 }
94327
94328diff --git a/net/mac80211/main.c b/net/mac80211/main.c
94329index 8a7bfc4..be07e86 100644
94330--- a/net/mac80211/main.c
94331+++ b/net/mac80211/main.c
94332@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
94333 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
94334 IEEE80211_CONF_CHANGE_POWER);
94335
94336- if (changed && local->open_count) {
94337+ if (changed && local_read(&local->open_count)) {
94338 ret = drv_config(local, changed);
94339 /*
94340 * Goal:
94341@@ -921,7 +921,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
94342 hw->queues = IEEE80211_MAX_QUEUES;
94343
94344 local->workqueue =
94345- alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
94346+ alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy));
94347 if (!local->workqueue) {
94348 result = -ENOMEM;
94349 goto fail_workqueue;
94350diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
94351index 3401262..d5cd68d 100644
94352--- a/net/mac80211/pm.c
94353+++ b/net/mac80211/pm.c
94354@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94355 struct ieee80211_sub_if_data *sdata;
94356 struct sta_info *sta;
94357
94358- if (!local->open_count)
94359+ if (!local_read(&local->open_count))
94360 goto suspend;
94361
94362 ieee80211_scan_cancel(local);
94363@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94364 cancel_work_sync(&local->dynamic_ps_enable_work);
94365 del_timer_sync(&local->dynamic_ps_timer);
94366
94367- local->wowlan = wowlan && local->open_count;
94368+ local->wowlan = wowlan && local_read(&local->open_count);
94369 if (local->wowlan) {
94370 int err = drv_suspend(local, wowlan);
94371 if (err < 0) {
94372@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94373 WARN_ON(!list_empty(&local->chanctx_list));
94374
94375 /* stop hardware - this must stop RX */
94376- if (local->open_count)
94377+ if (local_read(&local->open_count))
94378 ieee80211_stop_device(local);
94379
94380 suspend:
94381diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
94382index a02bef3..f2f38dd 100644
94383--- a/net/mac80211/rate.c
94384+++ b/net/mac80211/rate.c
94385@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
94386
94387 ASSERT_RTNL();
94388
94389- if (local->open_count)
94390+ if (local_read(&local->open_count))
94391 return -EBUSY;
94392
94393 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
94394diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
94395index c97a065..ff61928 100644
94396--- a/net/mac80211/rc80211_pid_debugfs.c
94397+++ b/net/mac80211/rc80211_pid_debugfs.c
94398@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
94399
94400 spin_unlock_irqrestore(&events->lock, status);
94401
94402- if (copy_to_user(buf, pb, p))
94403+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
94404 return -EFAULT;
94405
94406 return p;
94407diff --git a/net/mac80211/util.c b/net/mac80211/util.c
94408index 72e6292..e6319eb 100644
94409--- a/net/mac80211/util.c
94410+++ b/net/mac80211/util.c
94411@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
94412 }
94413 #endif
94414 /* everything else happens only if HW was up & running */
94415- if (!local->open_count)
94416+ if (!local_read(&local->open_count))
94417 goto wake_up;
94418
94419 /*
94420@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
94421 local->in_reconfig = false;
94422 barrier();
94423
94424- if (local->monitors == local->open_count && local->monitors > 0)
94425+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
94426 ieee80211_add_virtual_monitor(local);
94427
94428 /*
94429diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
94430index 56d22ca..87c778f 100644
94431--- a/net/netfilter/Kconfig
94432+++ b/net/netfilter/Kconfig
94433@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
94434
94435 To compile it as a module, choose M here. If unsure, say N.
94436
94437+config NETFILTER_XT_MATCH_GRADM
94438+ tristate '"gradm" match support'
94439+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
94440+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
94441+ ---help---
94442+ The gradm match allows to match on grsecurity RBAC being enabled.
94443+ It is useful when iptables rules are applied early on bootup to
94444+ prevent connections to the machine (except from a trusted host)
94445+ while the RBAC system is disabled.
94446+
94447 config NETFILTER_XT_MATCH_HASHLIMIT
94448 tristate '"hashlimit" match support'
94449 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
94450diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
94451index a1abf87..dbcb7ee 100644
94452--- a/net/netfilter/Makefile
94453+++ b/net/netfilter/Makefile
94454@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
94455 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
94456 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
94457 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
94458+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
94459 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
94460 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
94461 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
94462diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
94463index f771390..145b765 100644
94464--- a/net/netfilter/ipset/ip_set_core.c
94465+++ b/net/netfilter/ipset/ip_set_core.c
94466@@ -1820,7 +1820,7 @@ done:
94467 return ret;
94468 }
94469
94470-static struct nf_sockopt_ops so_set __read_mostly = {
94471+static struct nf_sockopt_ops so_set = {
94472 .pf = PF_INET,
94473 .get_optmin = SO_IP_SET,
94474 .get_optmax = SO_IP_SET + 1,
94475diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
94476index a083bda..da661c3 100644
94477--- a/net/netfilter/ipvs/ip_vs_conn.c
94478+++ b/net/netfilter/ipvs/ip_vs_conn.c
94479@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
94480 /* Increase the refcnt counter of the dest */
94481 ip_vs_dest_hold(dest);
94482
94483- conn_flags = atomic_read(&dest->conn_flags);
94484+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
94485 if (cp->protocol != IPPROTO_UDP)
94486 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
94487 flags = cp->flags;
94488@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
94489
94490 cp->control = NULL;
94491 atomic_set(&cp->n_control, 0);
94492- atomic_set(&cp->in_pkts, 0);
94493+ atomic_set_unchecked(&cp->in_pkts, 0);
94494
94495 cp->packet_xmit = NULL;
94496 cp->app = NULL;
94497@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
94498
94499 /* Don't drop the entry if its number of incoming packets is not
94500 located in [0, 8] */
94501- i = atomic_read(&cp->in_pkts);
94502+ i = atomic_read_unchecked(&cp->in_pkts);
94503 if (i > 8 || i < 0) return 0;
94504
94505 if (!todrop_rate[i]) return 0;
94506diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
94507index 23b8eb5..48a8959 100644
94508--- a/net/netfilter/ipvs/ip_vs_core.c
94509+++ b/net/netfilter/ipvs/ip_vs_core.c
94510@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
94511 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
94512 /* do not touch skb anymore */
94513
94514- atomic_inc(&cp->in_pkts);
94515+ atomic_inc_unchecked(&cp->in_pkts);
94516 ip_vs_conn_put(cp);
94517 return ret;
94518 }
94519@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
94520 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
94521 pkts = sysctl_sync_threshold(ipvs);
94522 else
94523- pkts = atomic_add_return(1, &cp->in_pkts);
94524+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94525
94526 if (ipvs->sync_state & IP_VS_STATE_MASTER)
94527 ip_vs_sync_conn(net, cp, pkts);
94528diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
94529index 9e6c2a0..28552e2 100644
94530--- a/net/netfilter/ipvs/ip_vs_ctl.c
94531+++ b/net/netfilter/ipvs/ip_vs_ctl.c
94532@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
94533 */
94534 ip_vs_rs_hash(ipvs, dest);
94535 }
94536- atomic_set(&dest->conn_flags, conn_flags);
94537+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
94538
94539 /* bind the service */
94540 if (!dest->svc) {
94541@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
94542 * align with netns init in ip_vs_control_net_init()
94543 */
94544
94545-static struct ctl_table vs_vars[] = {
94546+static ctl_table_no_const vs_vars[] __read_only = {
94547 {
94548 .procname = "amemthresh",
94549 .maxlen = sizeof(int),
94550@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
94551 " %-7s %-6d %-10d %-10d\n",
94552 &dest->addr.in6,
94553 ntohs(dest->port),
94554- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
94555+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
94556 atomic_read(&dest->weight),
94557 atomic_read(&dest->activeconns),
94558 atomic_read(&dest->inactconns));
94559@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
94560 "%-7s %-6d %-10d %-10d\n",
94561 ntohl(dest->addr.ip),
94562 ntohs(dest->port),
94563- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
94564+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
94565 atomic_read(&dest->weight),
94566 atomic_read(&dest->activeconns),
94567 atomic_read(&dest->inactconns));
94568@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
94569
94570 entry.addr = dest->addr.ip;
94571 entry.port = dest->port;
94572- entry.conn_flags = atomic_read(&dest->conn_flags);
94573+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
94574 entry.weight = atomic_read(&dest->weight);
94575 entry.u_threshold = dest->u_threshold;
94576 entry.l_threshold = dest->l_threshold;
94577@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
94578 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
94579 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
94580 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
94581- (atomic_read(&dest->conn_flags) &
94582+ (atomic_read_unchecked(&dest->conn_flags) &
94583 IP_VS_CONN_F_FWD_MASK)) ||
94584 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
94585 atomic_read(&dest->weight)) ||
94586@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
94587 {
94588 int idx;
94589 struct netns_ipvs *ipvs = net_ipvs(net);
94590- struct ctl_table *tbl;
94591+ ctl_table_no_const *tbl;
94592
94593 atomic_set(&ipvs->dropentry, 0);
94594 spin_lock_init(&ipvs->dropentry_lock);
94595diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
94596index 5ea26bd..c9bc65f 100644
94597--- a/net/netfilter/ipvs/ip_vs_lblc.c
94598+++ b/net/netfilter/ipvs/ip_vs_lblc.c
94599@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
94600 * IPVS LBLC sysctl table
94601 */
94602 #ifdef CONFIG_SYSCTL
94603-static ctl_table vs_vars_table[] = {
94604+static ctl_table_no_const vs_vars_table[] __read_only = {
94605 {
94606 .procname = "lblc_expiration",
94607 .data = NULL,
94608diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
94609index 50123c2..067c773 100644
94610--- a/net/netfilter/ipvs/ip_vs_lblcr.c
94611+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
94612@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
94613 * IPVS LBLCR sysctl table
94614 */
94615
94616-static ctl_table vs_vars_table[] = {
94617+static ctl_table_no_const vs_vars_table[] __read_only = {
94618 {
94619 .procname = "lblcr_expiration",
94620 .data = NULL,
94621diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
94622index f6046d9..4f10cfd 100644
94623--- a/net/netfilter/ipvs/ip_vs_sync.c
94624+++ b/net/netfilter/ipvs/ip_vs_sync.c
94625@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
94626 cp = cp->control;
94627 if (cp) {
94628 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
94629- pkts = atomic_add_return(1, &cp->in_pkts);
94630+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94631 else
94632 pkts = sysctl_sync_threshold(ipvs);
94633 ip_vs_sync_conn(net, cp->control, pkts);
94634@@ -758,7 +758,7 @@ control:
94635 if (!cp)
94636 return;
94637 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
94638- pkts = atomic_add_return(1, &cp->in_pkts);
94639+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94640 else
94641 pkts = sysctl_sync_threshold(ipvs);
94642 goto sloop;
94643@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
94644
94645 if (opt)
94646 memcpy(&cp->in_seq, opt, sizeof(*opt));
94647- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
94648+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
94649 cp->state = state;
94650 cp->old_state = cp->state;
94651 /*
94652diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
94653index b75ff64..0c51bbe 100644
94654--- a/net/netfilter/ipvs/ip_vs_xmit.c
94655+++ b/net/netfilter/ipvs/ip_vs_xmit.c
94656@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
94657 else
94658 rc = NF_ACCEPT;
94659 /* do not touch skb anymore */
94660- atomic_inc(&cp->in_pkts);
94661+ atomic_inc_unchecked(&cp->in_pkts);
94662 goto out;
94663 }
94664
94665@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
94666 else
94667 rc = NF_ACCEPT;
94668 /* do not touch skb anymore */
94669- atomic_inc(&cp->in_pkts);
94670+ atomic_inc_unchecked(&cp->in_pkts);
94671 goto out;
94672 }
94673
94674diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
94675index 2d3030a..7ba1c0a 100644
94676--- a/net/netfilter/nf_conntrack_acct.c
94677+++ b/net/netfilter/nf_conntrack_acct.c
94678@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
94679 #ifdef CONFIG_SYSCTL
94680 static int nf_conntrack_acct_init_sysctl(struct net *net)
94681 {
94682- struct ctl_table *table;
94683+ ctl_table_no_const *table;
94684
94685 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
94686 GFP_KERNEL);
94687diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
94688index 0283bae..5febcb0 100644
94689--- a/net/netfilter/nf_conntrack_core.c
94690+++ b/net/netfilter/nf_conntrack_core.c
94691@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
94692 #define DYING_NULLS_VAL ((1<<30)+1)
94693 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
94694
94695+#ifdef CONFIG_GRKERNSEC_HIDESYM
94696+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
94697+#endif
94698+
94699 int nf_conntrack_init_net(struct net *net)
94700 {
94701 int ret;
94702@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
94703 goto err_stat;
94704 }
94705
94706+#ifdef CONFIG_GRKERNSEC_HIDESYM
94707+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
94708+#else
94709 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
94710+#endif
94711 if (!net->ct.slabname) {
94712 ret = -ENOMEM;
94713 goto err_slabname;
94714diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
94715index 1df1761..ce8b88a 100644
94716--- a/net/netfilter/nf_conntrack_ecache.c
94717+++ b/net/netfilter/nf_conntrack_ecache.c
94718@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
94719 #ifdef CONFIG_SYSCTL
94720 static int nf_conntrack_event_init_sysctl(struct net *net)
94721 {
94722- struct ctl_table *table;
94723+ ctl_table_no_const *table;
94724
94725 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
94726 GFP_KERNEL);
94727diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
94728index 974a2a4..52cc6ff 100644
94729--- a/net/netfilter/nf_conntrack_helper.c
94730+++ b/net/netfilter/nf_conntrack_helper.c
94731@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
94732
94733 static int nf_conntrack_helper_init_sysctl(struct net *net)
94734 {
94735- struct ctl_table *table;
94736+ ctl_table_no_const *table;
94737
94738 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
94739 GFP_KERNEL);
94740diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
94741index 0ab9636..cea3c6a 100644
94742--- a/net/netfilter/nf_conntrack_proto.c
94743+++ b/net/netfilter/nf_conntrack_proto.c
94744@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
94745
94746 static void
94747 nf_ct_unregister_sysctl(struct ctl_table_header **header,
94748- struct ctl_table **table,
94749+ ctl_table_no_const **table,
94750 unsigned int users)
94751 {
94752 if (users > 0)
94753diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
94754index a99b6c3..cb372f9 100644
94755--- a/net/netfilter/nf_conntrack_proto_dccp.c
94756+++ b/net/netfilter/nf_conntrack_proto_dccp.c
94757@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
94758 const char *msg;
94759 u_int8_t state;
94760
94761- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94762+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94763 BUG_ON(dh == NULL);
94764
94765 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
94766@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
94767 out_invalid:
94768 if (LOG_INVALID(net, IPPROTO_DCCP))
94769 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
94770- NULL, msg);
94771+ NULL, "%s", msg);
94772 return false;
94773 }
94774
94775@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
94776 u_int8_t type, old_state, new_state;
94777 enum ct_dccp_roles role;
94778
94779- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94780+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94781 BUG_ON(dh == NULL);
94782 type = dh->dccph_type;
94783
94784@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
94785 unsigned int cscov;
94786 const char *msg;
94787
94788- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94789+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94790 if (dh == NULL) {
94791 msg = "nf_ct_dccp: short packet ";
94792 goto out_invalid;
94793@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
94794
94795 out_invalid:
94796 if (LOG_INVALID(net, IPPROTO_DCCP))
94797- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
94798+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
94799 return -NF_ACCEPT;
94800 }
94801
94802diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
94803index 4d4d8f1..e0f9a32 100644
94804--- a/net/netfilter/nf_conntrack_proto_tcp.c
94805+++ b/net/netfilter/nf_conntrack_proto_tcp.c
94806@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
94807 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
94808 __u32 seq, ack, sack, end, win, swin;
94809 s16 receiver_offset;
94810- bool res;
94811+ bool res, in_recv_win;
94812
94813 /*
94814 * Get the required data from the packet.
94815@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
94816 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
94817 receiver->td_scale);
94818
94819+ /* Is the ending sequence in the receive window (if available)? */
94820+ in_recv_win = !receiver->td_maxwin ||
94821+ after(end, sender->td_end - receiver->td_maxwin - 1);
94822+
94823 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
94824 before(seq, sender->td_maxend + 1),
94825- after(end, sender->td_end - receiver->td_maxwin - 1),
94826+ (in_recv_win ? 1 : 0),
94827 before(sack, receiver->td_end + 1),
94828 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
94829
94830 if (before(seq, sender->td_maxend + 1) &&
94831- after(end, sender->td_end - receiver->td_maxwin - 1) &&
94832+ in_recv_win &&
94833 before(sack, receiver->td_end + 1) &&
94834 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
94835 /*
94836@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
94837 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
94838 "nf_ct_tcp: %s ",
94839 before(seq, sender->td_maxend + 1) ?
94840- after(end, sender->td_end - receiver->td_maxwin - 1) ?
94841+ in_recv_win ?
94842 before(sack, receiver->td_end + 1) ?
94843 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
94844 : "ACK is under the lower bound (possible overly delayed ACK)"
94845diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
94846index bd700b4..4a3dc61 100644
94847--- a/net/netfilter/nf_conntrack_standalone.c
94848+++ b/net/netfilter/nf_conntrack_standalone.c
94849@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
94850
94851 static int nf_conntrack_standalone_init_sysctl(struct net *net)
94852 {
94853- struct ctl_table *table;
94854+ ctl_table_no_const *table;
94855
94856 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
94857 GFP_KERNEL);
94858diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
94859index 902fb0a..87f7fdb 100644
94860--- a/net/netfilter/nf_conntrack_timestamp.c
94861+++ b/net/netfilter/nf_conntrack_timestamp.c
94862@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
94863 #ifdef CONFIG_SYSCTL
94864 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
94865 {
94866- struct ctl_table *table;
94867+ ctl_table_no_const *table;
94868
94869 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
94870 GFP_KERNEL);
94871diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
94872index 3b18dd1..f79e0ca 100644
94873--- a/net/netfilter/nf_log.c
94874+++ b/net/netfilter/nf_log.c
94875@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
94876
94877 #ifdef CONFIG_SYSCTL
94878 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
94879-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
94880+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
94881
94882 static int nf_log_proc_dostring(ctl_table *table, int write,
94883 void __user *buffer, size_t *lenp, loff_t *ppos)
94884@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
94885 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
94886 mutex_unlock(&nf_log_mutex);
94887 } else {
94888+ ctl_table_no_const nf_log_table = *table;
94889+
94890 mutex_lock(&nf_log_mutex);
94891 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
94892 lockdep_is_held(&nf_log_mutex));
94893 if (!logger)
94894- table->data = "NONE";
94895+ nf_log_table.data = "NONE";
94896 else
94897- table->data = logger->name;
94898- r = proc_dostring(table, write, buffer, lenp, ppos);
94899+ nf_log_table.data = logger->name;
94900+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
94901 mutex_unlock(&nf_log_mutex);
94902 }
94903
94904diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
94905index f042ae5..30ea486 100644
94906--- a/net/netfilter/nf_sockopt.c
94907+++ b/net/netfilter/nf_sockopt.c
94908@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
94909 }
94910 }
94911
94912- list_add(&reg->list, &nf_sockopts);
94913+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
94914 out:
94915 mutex_unlock(&nf_sockopt_mutex);
94916 return ret;
94917@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
94918 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
94919 {
94920 mutex_lock(&nf_sockopt_mutex);
94921- list_del(&reg->list);
94922+ pax_list_del((struct list_head *)&reg->list);
94923 mutex_unlock(&nf_sockopt_mutex);
94924 }
94925 EXPORT_SYMBOL(nf_unregister_sockopt);
94926diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
94927index 962e979..e46f350 100644
94928--- a/net/netfilter/nfnetlink_log.c
94929+++ b/net/netfilter/nfnetlink_log.c
94930@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
94931 struct nfnl_log_net {
94932 spinlock_t instances_lock;
94933 struct hlist_head instance_table[INSTANCE_BUCKETS];
94934- atomic_t global_seq;
94935+ atomic_unchecked_t global_seq;
94936 };
94937
94938 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
94939@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
94940 nfmsg->version = NFNETLINK_V0;
94941 nfmsg->res_id = htons(inst->group_num);
94942
94943+ memset(&pmsg, 0, sizeof(pmsg));
94944 pmsg.hw_protocol = skb->protocol;
94945 pmsg.hook = hooknum;
94946
94947@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
94948 if (indev && skb->dev &&
94949 skb->mac_header != skb->network_header) {
94950 struct nfulnl_msg_packet_hw phw;
94951- int len = dev_parse_header(skb, phw.hw_addr);
94952+ int len;
94953+
94954+ memset(&phw, 0, sizeof(phw));
94955+ len = dev_parse_header(skb, phw.hw_addr);
94956 if (len > 0) {
94957 phw.hw_addrlen = htons(len);
94958 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
94959@@ -559,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
94960 /* global sequence number */
94961 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
94962 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
94963- htonl(atomic_inc_return(&log->global_seq))))
94964+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
94965 goto nla_put_failure;
94966
94967 if (data_len) {
94968diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
94969index 5352b2d..e0083ce 100644
94970--- a/net/netfilter/nfnetlink_queue_core.c
94971+++ b/net/netfilter/nfnetlink_queue_core.c
94972@@ -444,7 +444,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
94973 if (indev && entskb->dev &&
94974 entskb->mac_header != entskb->network_header) {
94975 struct nfqnl_msg_packet_hw phw;
94976- int len = dev_parse_header(entskb, phw.hw_addr);
94977+ int len;
94978+
94979+ memset(&phw, 0, sizeof(phw));
94980+ len = dev_parse_header(entskb, phw.hw_addr);
94981 if (len) {
94982 phw.hw_addrlen = htons(len);
94983 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
94984diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
94985index 7011c71..6113cc7 100644
94986--- a/net/netfilter/xt_TCPMSS.c
94987+++ b/net/netfilter/xt_TCPMSS.c
94988@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94989 {
94990 const struct xt_tcpmss_info *info = par->targinfo;
94991 struct tcphdr *tcph;
94992- unsigned int tcplen, i;
94993+ int len, tcp_hdrlen;
94994+ unsigned int i;
94995 __be16 oldval;
94996 u16 newmss;
94997 u8 *opt;
94998@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94999 if (!skb_make_writable(skb, skb->len))
95000 return -1;
95001
95002- tcplen = skb->len - tcphoff;
95003+ len = skb->len - tcphoff;
95004+ if (len < (int)sizeof(struct tcphdr))
95005+ return -1;
95006+
95007 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
95008+ tcp_hdrlen = tcph->doff * 4;
95009
95010- /* Header cannot be larger than the packet */
95011- if (tcplen < tcph->doff*4)
95012+ if (len < tcp_hdrlen)
95013 return -1;
95014
95015 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
95016@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
95017 newmss = info->mss;
95018
95019 opt = (u_int8_t *)tcph;
95020- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
95021- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
95022- opt[i+1] == TCPOLEN_MSS) {
95023+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
95024+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
95025 u_int16_t oldmss;
95026
95027 oldmss = (opt[i+2] << 8) | opt[i+3];
95028@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
95029 }
95030
95031 /* There is data after the header so the option can't be added
95032- without moving it, and doing so may make the SYN packet
95033- itself too large. Accept the packet unmodified instead. */
95034- if (tcplen > tcph->doff*4)
95035+ * without moving it, and doing so may make the SYN packet
95036+ * itself too large. Accept the packet unmodified instead.
95037+ */
95038+ if (len > tcp_hdrlen)
95039 return 0;
95040
95041 /*
95042@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
95043 newmss = min(newmss, (u16)1220);
95044
95045 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
95046- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
95047+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
95048
95049 inet_proto_csum_replace2(&tcph->check, skb,
95050- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
95051+ htons(len), htons(len + TCPOLEN_MSS), 1);
95052 opt[0] = TCPOPT_MSS;
95053 opt[1] = TCPOLEN_MSS;
95054 opt[2] = (newmss & 0xff00) >> 8;
95055diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
95056index b68fa19..625fa1d 100644
95057--- a/net/netfilter/xt_TCPOPTSTRIP.c
95058+++ b/net/netfilter/xt_TCPOPTSTRIP.c
95059@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
95060 struct tcphdr *tcph;
95061 u_int16_t n, o;
95062 u_int8_t *opt;
95063- int len;
95064+ int len, tcp_hdrlen;
95065
95066 /* This is a fragment, no TCP header is available */
95067 if (par->fragoff != 0)
95068@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
95069 return NF_DROP;
95070
95071 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
95072- if (tcph->doff * 4 > len)
95073+ tcp_hdrlen = tcph->doff * 4;
95074+
95075+ if (len < tcp_hdrlen)
95076 return NF_DROP;
95077
95078 opt = (u_int8_t *)tcph;
95079@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
95080 * Walk through all TCP options - if we find some option to remove,
95081 * set all octets to %TCPOPT_NOP and adjust checksum.
95082 */
95083- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
95084+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
95085 optl = optlen(opt, i);
95086
95087- if (i + optl > tcp_hdrlen(skb))
95088+ if (i + optl > tcp_hdrlen)
95089 break;
95090
95091 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
95092diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
95093new file mode 100644
95094index 0000000..c566332
95095--- /dev/null
95096+++ b/net/netfilter/xt_gradm.c
95097@@ -0,0 +1,51 @@
95098+/*
95099+ * gradm match for netfilter
95100